1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   bool is_CAS(int opcode);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275  // case Op_CompareAndSwapB:
1276  // case Op_CompareAndSwapS:
1277       return true;
1278       // These are TBD
1279     case Op_WeakCompareAndSwapB:
1280     case Op_WeakCompareAndSwapS:
1281     case Op_WeakCompareAndSwapI:
1282     case Op_WeakCompareAndSwapL:
1283     case Op_WeakCompareAndSwapP:
1284     case Op_WeakCompareAndSwapN:
1285     case Op_CompareAndExchangeB:
1286     case Op_CompareAndExchangeS:
1287     case Op_CompareAndExchangeI:
1288     case Op_CompareAndExchangeL:
1289     case Op_CompareAndExchangeP:
1290     case Op_CompareAndExchangeN:
1291       return false;
1292     default:
1293       return false;
1294     }
1295   }
1296 
1297   // helper to determine the maximum number of Phi nodes we may need to
1298   // traverse when searching from a card mark membar for the merge mem
1299   // feeding a trailing membar or vice versa
1300 
1301 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1302 
1303 bool unnecessary_acquire(const Node *barrier)
1304 {
1305   assert(barrier->is_MemBar(), "expecting a membar");
1306 
1307   if (UseBarriersForVolatile) {
1308     // we need to plant a dmb
1309     return false;
1310   }
1311 
1312   MemBarNode* mb = barrier->as_MemBar();
1313 
1314   if (mb->trailing_load()) {
1315     return true;
1316   }
1317 
1318   if (mb->trailing_load_store()) {
1319     Node* load_store = mb->in(MemBarNode::Precedent);
1320     assert(load_store->is_LoadStore(), "unexpected graph shape");
1321     return is_CAS(load_store->Opcode());
1322   }
1323 
1324   return false;
1325 }
1326 
1327 bool needs_acquiring_load(const Node *n)
1328 {
1329   assert(n->is_Load(), "expecting a load");
1330   if (UseBarriersForVolatile) {
1331     // we use a normal load and a dmb
1332     return false;
1333   }
1334 
1335   LoadNode *ld = n->as_Load();
1336 
1337   return ld->is_acquire();
1338 }
1339 
1340 bool unnecessary_release(const Node *n)
1341 {
1342   assert((n->is_MemBar() &&
1343           n->Opcode() == Op_MemBarRelease),
1344          "expecting a release membar");
1345 
1346   if (UseBarriersForVolatile) {
1347     // we need to plant a dmb
1348     return false;
1349   }
1350 
1351   MemBarNode *barrier = n->as_MemBar();
1352   if (!barrier->leading()) {
1353     return false;
1354   } else {
1355     Node* trailing = barrier->trailing_membar();
1356     MemBarNode* trailing_mb = trailing->as_MemBar();
1357     assert(trailing_mb->trailing(), "Not a trailing membar?");
1358     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1359 
1360     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1361     if (mem->is_Store()) {
1362       assert(mem->as_Store()->is_release(), "");
1363       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1364       return true;
1365     } else {
1366       assert(mem->is_LoadStore(), "");
1367       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1368       return is_CAS(mem->Opcode());
1369     }
1370   }
1371   return false;
1372 }
1373 
1374 bool unnecessary_volatile(const Node *n)
1375 {
1376   // assert n->is_MemBar();
1377   if (UseBarriersForVolatile) {
1378     // we need to plant a dmb
1379     return false;
1380   }
1381 
1382   MemBarNode *mbvol = n->as_MemBar();
1383 
1384   bool release = mbvol->trailing_store();
1385   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1386 #ifdef ASSERT
1387   if (release) {
1388     Node* leading = mbvol->leading_membar();
1389     assert(leading->Opcode() == Op_MemBarRelease, "");
1390     assert(leading->as_MemBar()->leading_store(), "");
1391     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1392   }
1393 #endif
1394 
1395   return release;
1396 }
1397 
1398 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1399 
1400 bool needs_releasing_store(const Node *n)
1401 {
1402   // assert n->is_Store();
1403   if (UseBarriersForVolatile) {
1404     // we use a normal store and dmb combination
1405     return false;
1406   }
1407 
1408   StoreNode *st = n->as_Store();
1409 
1410   return st->trailing_membar() != NULL;
1411 }
1412 
1413 // predicate controlling translation of CAS
1414 //
1415 // returns true if CAS needs to use an acquiring load otherwise false
1416 
1417 bool needs_acquiring_load_exclusive(const Node *n)
1418 {
1419   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
1420   if (UseBarriersForVolatile) {
1421     return false;
1422   }
1423 
1424   LoadStoreNode* ldst = n->as_LoadStore();
1425   assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1426 
1427   // so we can just return true here
1428   return true;
1429 }
1430 
1431 // predicate controlling translation of StoreCM
1432 //
1433 // returns true if a StoreStore must precede the card write otherwise
1434 // false
1435 
1436 bool unnecessary_storestore(const Node *storecm)
1437 {
1438   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1439 
1440   // we need to generate a dmb ishst between an object put and the
1441   // associated card mark when we are using CMS without conditional
1442   // card marking
1443 
1444   if (UseConcMarkSweepGC && !UseCondCardMark) {
1445     return false;
1446   }
1447 
1448   // a storestore is unnecesary in all other cases
1449 
1450   return true;
1451 }
1452 
1453 
1454 #define __ _masm.
1455 
1456 // advance declarations for helper functions to convert register
1457 // indices to register objects
1458 
1459 // the ad file has to provide implementations of certain methods
1460 // expected by the generic code
1461 //
1462 // REQUIRED FUNCTIONALITY
1463 
1464 //=============================================================================
1465 
1466 // !!!!! Special hack to get all types of calls to specify the byte offset
1467 //       from the start of the call to the point where the return address
1468 //       will point.
1469 
1470 int MachCallStaticJavaNode::ret_addr_offset()
1471 {
1472   // call should be a simple bl
1473   int off = 4;
1474   return off;
1475 }
1476 
1477 int MachCallDynamicJavaNode::ret_addr_offset()
1478 {
1479   return 16; // movz, movk, movk, bl
1480 }
1481 
1482 int MachCallRuntimeNode::ret_addr_offset() {
1483   // for generated stubs the call will be
1484   //   far_call(addr)
1485   // for real runtime callouts it will be six instructions
1486   // see aarch64_enc_java_to_runtime
1487   //   adr(rscratch2, retaddr)
1488   //   lea(rscratch1, RuntimeAddress(addr)
1489   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1490   //   blrt rscratch1
1491   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1492   if (cb) {
1493     return MacroAssembler::far_branch_size();
1494   } else {
1495     return 6 * NativeInstruction::instruction_size;
1496   }
1497 }
1498 
1499 // Indicate if the safepoint node needs the polling page as an input
1500 
1501 // the shared code plants the oop data at the start of the generated
1502 // code for the safepoint node and that needs ot be at the load
1503 // instruction itself. so we cannot plant a mov of the safepoint poll
1504 // address followed by a load. setting this to true means the mov is
1505 // scheduled as a prior instruction. that's better for scheduling
1506 // anyway.
1507 
1508 bool SafePointNode::needs_polling_address_input()
1509 {
1510   return true;
1511 }
1512 
1513 //=============================================================================
1514 
1515 #ifndef PRODUCT
1516 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1517   st->print("BREAKPOINT");
1518 }
1519 #endif
1520 
1521 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1522   MacroAssembler _masm(&cbuf);
1523   __ brk(0);
1524 }
1525 
1526 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1527   return MachNode::size(ra_);
1528 }
1529 
1530 //=============================================================================
1531 
1532 #ifndef PRODUCT
1533   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1534     st->print("nop \t# %d bytes pad for loops and calls", _count);
1535   }
1536 #endif
1537 
1538   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1539     MacroAssembler _masm(&cbuf);
1540     for (int i = 0; i < _count; i++) {
1541       __ nop();
1542     }
1543   }
1544 
1545   uint MachNopNode::size(PhaseRegAlloc*) const {
1546     return _count * NativeInstruction::instruction_size;
1547   }
1548 
1549 //=============================================================================
1550 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1551 
1552 int Compile::ConstantTable::calculate_table_base_offset() const {
1553   return 0;  // absolute addressing, no offset
1554 }
1555 
1556 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1557 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1558   ShouldNotReachHere();
1559 }
1560 
1561 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1562   // Empty encoding
1563 }
1564 
1565 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1566   return 0;
1567 }
1568 
1569 #ifndef PRODUCT
1570 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1571   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1572 }
1573 #endif
1574 
1575 #ifndef PRODUCT
1576 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1577   Compile* C = ra_->C;
1578 
1579   int framesize = C->frame_slots() << LogBytesPerInt;
1580 
1581   if (C->need_stack_bang(framesize))
1582     st->print("# stack bang size=%d\n\t", framesize);
1583 
1584   if (framesize < ((1 << 9) + 2 * wordSize)) {
1585     st->print("sub  sp, sp, #%d\n\t", framesize);
1586     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1587     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1588   } else {
1589     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1590     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1591     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1592     st->print("sub  sp, sp, rscratch1");
1593   }
1594 }
1595 #endif
1596 
1597 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1598   Compile* C = ra_->C;
1599   MacroAssembler _masm(&cbuf);
1600 
1601   // n.b. frame size includes space for return pc and rfp
1602   const long framesize = C->frame_size_in_bytes();
1603   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1604 
1605   // insert a nop at the start of the prolog so we can patch in a
1606   // branch if we need to invalidate the method later
1607   __ nop();
1608 
1609   int bangsize = C->bang_size_in_bytes();
1610   if (C->need_stack_bang(bangsize) && UseStackBanging)
1611     __ generate_stack_overflow_check(bangsize);
1612 
1613   __ build_frame(framesize);
1614 
1615   if (NotifySimulator) {
1616     __ notify(Assembler::method_entry);
1617   }
1618 
1619   if (VerifyStackAtCalls) {
1620     Unimplemented();
1621   }
1622 
1623   C->set_frame_complete(cbuf.insts_size());
1624 
1625   if (C->has_mach_constant_base_node()) {
1626     // NOTE: We set the table base offset here because users might be
1627     // emitted before MachConstantBaseNode.
1628     Compile::ConstantTable& constant_table = C->constant_table();
1629     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1630   }
1631 }
1632 
1633 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1634 {
1635   return MachNode::size(ra_); // too many variables; just compute it
1636                               // the hard way
1637 }
1638 
1639 int MachPrologNode::reloc() const
1640 {
1641   return 0;
1642 }
1643 
1644 //=============================================================================
1645 
1646 #ifndef PRODUCT
1647 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1648   Compile* C = ra_->C;
1649   int framesize = C->frame_slots() << LogBytesPerInt;
1650 
1651   st->print("# pop frame %d\n\t",framesize);
1652 
1653   if (framesize == 0) {
1654     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1655   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1656     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1657     st->print("add  sp, sp, #%d\n\t", framesize);
1658   } else {
1659     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1660     st->print("add  sp, sp, rscratch1\n\t");
1661     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1662   }
1663 
1664   if (do_polling() && C->is_method_compilation()) {
1665     st->print("# touch polling page\n\t");
1666     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1667     st->print("ldr zr, [rscratch1]");
1668   }
1669 }
1670 #endif
1671 
1672 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1673   Compile* C = ra_->C;
1674   MacroAssembler _masm(&cbuf);
1675   int framesize = C->frame_slots() << LogBytesPerInt;
1676 
1677   __ remove_frame(framesize);
1678 
1679   if (NotifySimulator) {
1680     __ notify(Assembler::method_reentry);
1681   }
1682 
1683   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1684     __ reserved_stack_check();
1685   }
1686 
1687   if (do_polling() && C->is_method_compilation()) {
1688     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1689   }
1690 }
1691 
1692 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1693   // Variable size. Determine dynamically.
1694   return MachNode::size(ra_);
1695 }
1696 
1697 int MachEpilogNode::reloc() const {
1698   // Return number of relocatable values contained in this instruction.
1699   return 1; // 1 for polling page.
1700 }
1701 
1702 const Pipeline * MachEpilogNode::pipeline() const {
1703   return MachNode::pipeline_class();
1704 }
1705 
1706 // This method seems to be obsolete. It is declared in machnode.hpp
1707 // and defined in all *.ad files, but it is never called. Should we
1708 // get rid of it?
1709 int MachEpilogNode::safepoint_offset() const {
1710   assert(do_polling(), "no return for this epilog node");
1711   return 4;
1712 }
1713 
1714 //=============================================================================
1715 
1716 // Figure out which register class each belongs in: rc_int, rc_float or
1717 // rc_stack.
1718 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1719 
1720 static enum RC rc_class(OptoReg::Name reg) {
1721 
1722   if (reg == OptoReg::Bad) {
1723     return rc_bad;
1724   }
1725 
1726   // we have 30 int registers * 2 halves
1727   // (rscratch1 and rscratch2 are omitted)
1728 
1729   if (reg < 60) {
1730     return rc_int;
1731   }
1732 
1733   // we have 32 float register * 2 halves
1734   if (reg < 60 + 128) {
1735     return rc_float;
1736   }
1737 
1738   // Between float regs & stack is the flags regs.
1739   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1740 
1741   return rc_stack;
1742 }
1743 
1744 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1745   Compile* C = ra_->C;
1746 
1747   // Get registers to move.
1748   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1749   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1750   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1751   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1752 
1753   enum RC src_hi_rc = rc_class(src_hi);
1754   enum RC src_lo_rc = rc_class(src_lo);
1755   enum RC dst_hi_rc = rc_class(dst_hi);
1756   enum RC dst_lo_rc = rc_class(dst_lo);
1757 
1758   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1759 
1760   if (src_hi != OptoReg::Bad) {
1761     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1762            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1763            "expected aligned-adjacent pairs");
1764   }
1765 
1766   if (src_lo == dst_lo && src_hi == dst_hi) {
1767     return 0;            // Self copy, no move.
1768   }
1769 
1770   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1771               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1772   int src_offset = ra_->reg2offset(src_lo);
1773   int dst_offset = ra_->reg2offset(dst_lo);
1774 
1775   if (bottom_type()->isa_vect() != NULL) {
1776     uint ireg = ideal_reg();
1777     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1778     if (cbuf) {
1779       MacroAssembler _masm(cbuf);
1780       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1781       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1782         // stack->stack
1783         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1784         if (ireg == Op_VecD) {
1785           __ unspill(rscratch1, true, src_offset);
1786           __ spill(rscratch1, true, dst_offset);
1787         } else {
1788           __ spill_copy128(src_offset, dst_offset);
1789         }
1790       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1791         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1792                ireg == Op_VecD ? __ T8B : __ T16B,
1793                as_FloatRegister(Matcher::_regEncode[src_lo]));
1794       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1795         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1796                        ireg == Op_VecD ? __ D : __ Q,
1797                        ra_->reg2offset(dst_lo));
1798       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1799         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1800                        ireg == Op_VecD ? __ D : __ Q,
1801                        ra_->reg2offset(src_lo));
1802       } else {
1803         ShouldNotReachHere();
1804       }
1805     }
1806   } else if (cbuf) {
1807     MacroAssembler _masm(cbuf);
1808     switch (src_lo_rc) {
1809     case rc_int:
1810       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1811         if (is64) {
1812             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1813                    as_Register(Matcher::_regEncode[src_lo]));
1814         } else {
1815             MacroAssembler _masm(cbuf);
1816             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1817                     as_Register(Matcher::_regEncode[src_lo]));
1818         }
1819       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1820         if (is64) {
1821             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1822                      as_Register(Matcher::_regEncode[src_lo]));
1823         } else {
1824             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1825                      as_Register(Matcher::_regEncode[src_lo]));
1826         }
1827       } else {                    // gpr --> stack spill
1828         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1829         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1830       }
1831       break;
1832     case rc_float:
1833       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1834         if (is64) {
1835             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1836                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1837         } else {
1838             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1839                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1840         }
1841       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1842           if (cbuf) {
1843             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1844                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1845         } else {
1846             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1847                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1848         }
1849       } else {                    // fpr --> stack spill
1850         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1851         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1852                  is64 ? __ D : __ S, dst_offset);
1853       }
1854       break;
1855     case rc_stack:
1856       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1857         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1858       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1859         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1860                    is64 ? __ D : __ S, src_offset);
1861       } else {                    // stack --> stack copy
1862         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1863         __ unspill(rscratch1, is64, src_offset);
1864         __ spill(rscratch1, is64, dst_offset);
1865       }
1866       break;
1867     default:
1868       assert(false, "bad rc_class for spill");
1869       ShouldNotReachHere();
1870     }
1871   }
1872 
1873   if (st) {
1874     st->print("spill ");
1875     if (src_lo_rc == rc_stack) {
1876       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1877     } else {
1878       st->print("%s -> ", Matcher::regName[src_lo]);
1879     }
1880     if (dst_lo_rc == rc_stack) {
1881       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1882     } else {
1883       st->print("%s", Matcher::regName[dst_lo]);
1884     }
1885     if (bottom_type()->isa_vect() != NULL) {
1886       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1887     } else {
1888       st->print("\t# spill size = %d", is64 ? 64:32);
1889     }
1890   }
1891 
1892   return 0;
1893 
1894 }
1895 
1896 #ifndef PRODUCT
1897 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1898   if (!ra_)
1899     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1900   else
1901     implementation(NULL, ra_, false, st);
1902 }
1903 #endif
1904 
1905 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1906   implementation(&cbuf, ra_, false, NULL);
1907 }
1908 
1909 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1910   return MachNode::size(ra_);
1911 }
1912 
1913 //=============================================================================
1914 
1915 #ifndef PRODUCT
1916 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1917   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1918   int reg = ra_->get_reg_first(this);
1919   st->print("add %s, rsp, #%d]\t# box lock",
1920             Matcher::regName[reg], offset);
1921 }
1922 #endif
1923 
1924 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1925   MacroAssembler _masm(&cbuf);
1926 
1927   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1928   int reg    = ra_->get_encode(this);
1929 
1930   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1931     __ add(as_Register(reg), sp, offset);
1932   } else {
1933     ShouldNotReachHere();
1934   }
1935 }
1936 
1937 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1938   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1939   return 4;
1940 }
1941 
1942 //=============================================================================
1943 
1944 #ifndef PRODUCT
1945 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1946 {
1947   st->print_cr("# MachUEPNode");
1948   if (UseCompressedClassPointers) {
1949     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1950     if (Universe::narrow_klass_shift() != 0) {
1951       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1952     }
1953   } else {
1954    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1955   }
1956   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1957   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1958 }
1959 #endif
1960 
1961 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1962 {
1963   // This is the unverified entry point.
1964   MacroAssembler _masm(&cbuf);
1965 
1966   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1967   Label skip;
1968   // TODO
1969   // can we avoid this skip and still use a reloc?
1970   __ br(Assembler::EQ, skip);
1971   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1972   __ bind(skip);
1973 }
1974 
1975 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1976 {
1977   return MachNode::size(ra_);
1978 }
1979 
1980 // REQUIRED EMIT CODE
1981 
1982 //=============================================================================
1983 
1984 // Emit exception handler code.
1985 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1986 {
1987   // mov rscratch1 #exception_blob_entry_point
1988   // br rscratch1
1989   // Note that the code buffer's insts_mark is always relative to insts.
1990   // That's why we must use the macroassembler to generate a handler.
1991   MacroAssembler _masm(&cbuf);
1992   address base = __ start_a_stub(size_exception_handler());
1993   if (base == NULL) {
1994     ciEnv::current()->record_failure("CodeCache is full");
1995     return 0;  // CodeBuffer::expand failed
1996   }
1997   int offset = __ offset();
1998   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1999   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2000   __ end_a_stub();
2001   return offset;
2002 }
2003 
2004 // Emit deopt handler code.
2005 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2006 {
2007   // Note that the code buffer's insts_mark is always relative to insts.
2008   // That's why we must use the macroassembler to generate a handler.
2009   MacroAssembler _masm(&cbuf);
2010   address base = __ start_a_stub(size_deopt_handler());
2011   if (base == NULL) {
2012     ciEnv::current()->record_failure("CodeCache is full");
2013     return 0;  // CodeBuffer::expand failed
2014   }
2015   int offset = __ offset();
2016 
2017   __ adr(lr, __ pc());
2018   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2019 
2020   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2021   __ end_a_stub();
2022   return offset;
2023 }
2024 
2025 // REQUIRED MATCHER CODE
2026 
2027 //=============================================================================
2028 
2029 const bool Matcher::match_rule_supported(int opcode) {
2030 
2031   switch (opcode) {
2032   default:
2033     break;
2034   }
2035 
2036   if (!has_match_rule(opcode)) {
2037     return false;
2038   }
2039 
2040   return true;  // Per default match rules are supported.
2041 }
2042 
2043 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2044 
2045   // TODO
2046   // identify extra cases that we might want to provide match rules for
2047   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2048   bool ret_value = match_rule_supported(opcode);
2049   // Add rules here.
2050 
2051   return ret_value;  // Per default match rules are supported.
2052 }
2053 
2054 const bool Matcher::has_predicated_vectors(void) {
2055   return false;
2056 }
2057 
2058 const int Matcher::float_pressure(int default_pressure_threshold) {
2059   return default_pressure_threshold;
2060 }
2061 
2062 int Matcher::regnum_to_fpu_offset(int regnum)
2063 {
2064   Unimplemented();
2065   return 0;
2066 }
2067 
2068 // Is this branch offset short enough that a short branch can be used?
2069 //
2070 // NOTE: If the platform does not provide any short branch variants, then
2071 //       this method should return false for offset 0.
2072 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2073   // The passed offset is relative to address of the branch.
2074 
2075   return (-32768 <= offset && offset < 32768);
2076 }
2077 
2078 const bool Matcher::isSimpleConstant64(jlong value) {
2079   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2080   // Probably always true, even if a temp register is required.
2081   return true;
2082 }
2083 
2084 // true just means we have fast l2f conversion
2085 const bool Matcher::convL2FSupported(void) {
2086   return true;
2087 }
2088 
2089 // Vector width in bytes.
2090 const int Matcher::vector_width_in_bytes(BasicType bt) {
2091   int size = MIN2(16,(int)MaxVectorSize);
2092   // Minimum 2 values in vector
2093   if (size < 2*type2aelembytes(bt)) size = 0;
2094   // But never < 4
2095   if (size < 4) size = 0;
2096   return size;
2097 }
2098 
2099 // Limits on vector size (number of elements) loaded into vector.
2100 const int Matcher::max_vector_size(const BasicType bt) {
2101   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2102 }
2103 const int Matcher::min_vector_size(const BasicType bt) {
2104 //  For the moment limit the vector size to 8 bytes
2105     int size = 8 / type2aelembytes(bt);
2106     if (size < 2) size = 2;
2107     return size;
2108 }
2109 
2110 // Vector ideal reg.
2111 const uint Matcher::vector_ideal_reg(int len) {
2112   switch(len) {
2113     case  8: return Op_VecD;
2114     case 16: return Op_VecX;
2115   }
2116   ShouldNotReachHere();
2117   return 0;
2118 }
2119 
2120 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2121   return Op_VecX;
2122 }
2123 
2124 // AES support not yet implemented
2125 const bool Matcher::pass_original_key_for_aes() {
2126   return false;
2127 }
2128 
2129 // x86 supports misaligned vectors store/load.
2130 const bool Matcher::misaligned_vectors_ok() {
2131   return !AlignVector; // can be changed by flag
2132 }
2133 
2134 // false => size gets scaled to BytesPerLong, ok.
2135 const bool Matcher::init_array_count_is_in_bytes = false;
2136 
2137 // Use conditional move (CMOVL)
2138 const int Matcher::long_cmove_cost() {
2139   // long cmoves are no more expensive than int cmoves
2140   return 0;
2141 }
2142 
2143 const int Matcher::float_cmove_cost() {
2144   // float cmoves are no more expensive than int cmoves
2145   return 0;
2146 }
2147 
2148 // Does the CPU require late expand (see block.cpp for description of late expand)?
2149 const bool Matcher::require_postalloc_expand = false;
2150 
2151 // Do we need to mask the count passed to shift instructions or does
2152 // the cpu only look at the lower 5/6 bits anyway?
2153 const bool Matcher::need_masked_shift_count = false;
2154 
2155 // This affects two different things:
2156 //  - how Decode nodes are matched
2157 //  - how ImplicitNullCheck opportunities are recognized
2158 // If true, the matcher will try to remove all Decodes and match them
2159 // (as operands) into nodes. NullChecks are not prepared to deal with
2160 // Decodes by final_graph_reshaping().
2161 // If false, final_graph_reshaping() forces the decode behind the Cmp
2162 // for a NullCheck. The matcher matches the Decode node into a register.
2163 // Implicit_null_check optimization moves the Decode along with the
2164 // memory operation back up before the NullCheck.
2165 bool Matcher::narrow_oop_use_complex_address() {
2166   return Universe::narrow_oop_shift() == 0;
2167 }
2168 
2169 bool Matcher::narrow_klass_use_complex_address() {
2170 // TODO
2171 // decide whether we need to set this to true
2172   return false;
2173 }
2174 
2175 bool Matcher::const_oop_prefer_decode() {
2176   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2177   return Universe::narrow_oop_base() == NULL;
2178 }
2179 
2180 bool Matcher::const_klass_prefer_decode() {
2181   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2182   return Universe::narrow_klass_base() == NULL;
2183 }
2184 
2185 // Is it better to copy float constants, or load them directly from
2186 // memory?  Intel can load a float constant from a direct address,
2187 // requiring no extra registers.  Most RISCs will have to materialize
2188 // an address into a register first, so they would do better to copy
2189 // the constant from stack.
2190 const bool Matcher::rematerialize_float_constants = false;
2191 
2192 // If CPU can load and store mis-aligned doubles directly then no
2193 // fixup is needed.  Else we split the double into 2 integer pieces
2194 // and move it piece-by-piece.  Only happens when passing doubles into
2195 // C code as the Java calling convention forces doubles to be aligned.
2196 const bool Matcher::misaligned_doubles_ok = true;
2197 
2198 // No-op on amd64
2199 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2200   Unimplemented();
2201 }
2202 
2203 // Advertise here if the CPU requires explicit rounding operations to
2204 // implement the UseStrictFP mode.
2205 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2206 
2207 // Are floats converted to double when stored to stack during
2208 // deoptimization?
2209 bool Matcher::float_in_double() { return false; }
2210 
2211 // Do ints take an entire long register or just half?
2212 // The relevant question is how the int is callee-saved:
2213 // the whole long is written but de-opt'ing will have to extract
2214 // the relevant 32 bits.
2215 const bool Matcher::int_in_long = true;
2216 
2217 // Return whether or not this register is ever used as an argument.
2218 // This function is used on startup to build the trampoline stubs in
2219 // generateOptoStub.  Registers not mentioned will be killed by the VM
2220 // call in the trampoline, and arguments in those registers not be
2221 // available to the callee.
2222 bool Matcher::can_be_java_arg(int reg)
2223 {
2224   return
2225     reg ==  R0_num || reg == R0_H_num ||
2226     reg ==  R1_num || reg == R1_H_num ||
2227     reg ==  R2_num || reg == R2_H_num ||
2228     reg ==  R3_num || reg == R3_H_num ||
2229     reg ==  R4_num || reg == R4_H_num ||
2230     reg ==  R5_num || reg == R5_H_num ||
2231     reg ==  R6_num || reg == R6_H_num ||
2232     reg ==  R7_num || reg == R7_H_num ||
2233     reg ==  V0_num || reg == V0_H_num ||
2234     reg ==  V1_num || reg == V1_H_num ||
2235     reg ==  V2_num || reg == V2_H_num ||
2236     reg ==  V3_num || reg == V3_H_num ||
2237     reg ==  V4_num || reg == V4_H_num ||
2238     reg ==  V5_num || reg == V5_H_num ||
2239     reg ==  V6_num || reg == V6_H_num ||
2240     reg ==  V7_num || reg == V7_H_num;
2241 }
2242 
2243 bool Matcher::is_spillable_arg(int reg)
2244 {
2245   return can_be_java_arg(reg);
2246 }
2247 
2248 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2249   return false;
2250 }
2251 
2252 RegMask Matcher::divI_proj_mask() {
2253   ShouldNotReachHere();
2254   return RegMask();
2255 }
2256 
2257 // Register for MODI projection of divmodI.
2258 RegMask Matcher::modI_proj_mask() {
2259   ShouldNotReachHere();
2260   return RegMask();
2261 }
2262 
2263 // Register for DIVL projection of divmodL.
2264 RegMask Matcher::divL_proj_mask() {
2265   ShouldNotReachHere();
2266   return RegMask();
2267 }
2268 
2269 // Register for MODL projection of divmodL.
2270 RegMask Matcher::modL_proj_mask() {
2271   ShouldNotReachHere();
2272   return RegMask();
2273 }
2274 
2275 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2276   return FP_REG_mask();
2277 }
2278 
2279 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2280   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2281     Node* u = addp->fast_out(i);
2282     if (u->is_Mem()) {
2283       int opsize = u->as_Mem()->memory_size();
2284       assert(opsize > 0, "unexpected memory operand size");
2285       if (u->as_Mem()->memory_size() != (1<<shift)) {
2286         return false;
2287       }
2288     }
2289   }
2290   return true;
2291 }
2292 
2293 const bool Matcher::convi2l_type_required = false;
2294 
2295 // Should the Matcher clone shifts on addressing modes, expecting them
2296 // to be subsumed into complex addressing expressions or compute them
2297 // into registers?
2298 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2299   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2300     return true;
2301   }
2302 
2303   Node *off = m->in(AddPNode::Offset);
2304   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2305       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2306       // Are there other uses besides address expressions?
2307       !is_visited(off)) {
2308     address_visited.set(off->_idx); // Flag as address_visited
2309     mstack.push(off->in(2), Visit);
2310     Node *conv = off->in(1);
2311     if (conv->Opcode() == Op_ConvI2L &&
2312         // Are there other uses besides address expressions?
2313         !is_visited(conv)) {
2314       address_visited.set(conv->_idx); // Flag as address_visited
2315       mstack.push(conv->in(1), Pre_Visit);
2316     } else {
2317       mstack.push(conv, Pre_Visit);
2318     }
2319     address_visited.test_set(m->_idx); // Flag as address_visited
2320     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2321     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2322     return true;
2323   } else if (off->Opcode() == Op_ConvI2L &&
2324              // Are there other uses besides address expressions?
2325              !is_visited(off)) {
2326     address_visited.test_set(m->_idx); // Flag as address_visited
2327     address_visited.set(off->_idx); // Flag as address_visited
2328     mstack.push(off->in(1), Pre_Visit);
2329     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2330     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2331     return true;
2332   }
2333   return false;
2334 }
2335 
2336 void Compile::reshape_address(AddPNode* addp) {
2337 }
2338 
2339 // helper for encoding java_to_runtime calls on sim
2340 //
2341 // this is needed to compute the extra arguments required when
2342 // planting a call to the simulator blrt instruction. the TypeFunc
2343 // can be queried to identify the counts for integral, and floating
2344 // arguments and the return type
2345 
2346 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2347 {
2348   int gps = 0;
2349   int fps = 0;
2350   const TypeTuple *domain = tf->domain();
2351   int max = domain->cnt();
2352   for (int i = TypeFunc::Parms; i < max; i++) {
2353     const Type *t = domain->field_at(i);
2354     switch(t->basic_type()) {
2355     case T_FLOAT:
2356     case T_DOUBLE:
2357       fps++;
2358     default:
2359       gps++;
2360     }
2361   }
2362   gpcnt = gps;
2363   fpcnt = fps;
2364   BasicType rt = tf->return_type();
2365   switch (rt) {
2366   case T_VOID:
2367     rtype = MacroAssembler::ret_type_void;
2368     break;
2369   default:
2370     rtype = MacroAssembler::ret_type_integral;
2371     break;
2372   case T_FLOAT:
2373     rtype = MacroAssembler::ret_type_float;
2374     break;
2375   case T_DOUBLE:
2376     rtype = MacroAssembler::ret_type_double;
2377     break;
2378   }
2379 }
2380 
2381 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2382   MacroAssembler _masm(&cbuf);                                          \
2383   {                                                                     \
2384     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2385     guarantee(DISP == 0, "mode not permitted for volatile");            \
2386     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2387     __ INSN(REG, as_Register(BASE));                                    \
2388   }
2389 
2390 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2391 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2392 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2393                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2394 
2395   // Used for all non-volatile memory accesses.  The use of
2396   // $mem->opcode() to discover whether this pattern uses sign-extended
2397   // offsets is something of a kludge.
2398   static void loadStore(MacroAssembler masm, mem_insn insn,
2399                          Register reg, int opcode,
2400                          Register base, int index, int size, int disp)
2401   {
2402     Address::extend scale;
2403 
2404     // Hooboy, this is fugly.  We need a way to communicate to the
2405     // encoder that the index needs to be sign extended, so we have to
2406     // enumerate all the cases.
2407     switch (opcode) {
2408     case INDINDEXSCALEDI2L:
2409     case INDINDEXSCALEDI2LN:
2410     case INDINDEXI2L:
2411     case INDINDEXI2LN:
2412       scale = Address::sxtw(size);
2413       break;
2414     default:
2415       scale = Address::lsl(size);
2416     }
2417 
2418     if (index == -1) {
2419       (masm.*insn)(reg, Address(base, disp));
2420     } else {
2421       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2422       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2423     }
2424   }
2425 
2426   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2427                          FloatRegister reg, int opcode,
2428                          Register base, int index, int size, int disp)
2429   {
2430     Address::extend scale;
2431 
2432     switch (opcode) {
2433     case INDINDEXSCALEDI2L:
2434     case INDINDEXSCALEDI2LN:
2435       scale = Address::sxtw(size);
2436       break;
2437     default:
2438       scale = Address::lsl(size);
2439     }
2440 
2441      if (index == -1) {
2442       (masm.*insn)(reg, Address(base, disp));
2443     } else {
2444       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2445       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2446     }
2447   }
2448 
2449   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2450                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2451                          int opcode, Register base, int index, int size, int disp)
2452   {
2453     if (index == -1) {
2454       (masm.*insn)(reg, T, Address(base, disp));
2455     } else {
2456       assert(disp == 0, "unsupported address mode");
2457       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2458     }
2459   }
2460 
2461 %}
2462 
2463 
2464 
2465 //----------ENCODING BLOCK-----------------------------------------------------
2466 // This block specifies the encoding classes used by the compiler to
2467 // output byte streams.  Encoding classes are parameterized macros
2468 // used by Machine Instruction Nodes in order to generate the bit
2469 // encoding of the instruction.  Operands specify their base encoding
2470 // interface with the interface keyword.  There are currently
2471 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2472 // COND_INTER.  REG_INTER causes an operand to generate a function
2473 // which returns its register number when queried.  CONST_INTER causes
2474 // an operand to generate a function which returns the value of the
2475 // constant when queried.  MEMORY_INTER causes an operand to generate
2476 // four functions which return the Base Register, the Index Register,
2477 // the Scale Value, and the Offset Value of the operand when queried.
2478 // COND_INTER causes an operand to generate six functions which return
2479 // the encoding code (ie - encoding bits for the instruction)
2480 // associated with each basic boolean condition for a conditional
2481 // instruction.
2482 //
2483 // Instructions specify two basic values for encoding.  Again, a
2484 // function is available to check if the constant displacement is an
2485 // oop. They use the ins_encode keyword to specify their encoding
2486 // classes (which must be a sequence of enc_class names, and their
2487 // parameters, specified in the encoding block), and they use the
2488 // opcode keyword to specify, in order, their primary, secondary, and
2489 // tertiary opcode.  Only the opcode sections which a particular
2490 // instruction needs for encoding need to be specified.
2491 encode %{
2492   // Build emit functions for each basic byte or larger field in the
2493   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2494   // from C++ code in the enc_class source block.  Emit functions will
2495   // live in the main source block for now.  In future, we can
2496   // generalize this by adding a syntax that specifies the sizes of
2497   // fields in an order, so that the adlc can build the emit functions
2498   // automagically
2499 
2500   // catch all for unimplemented encodings
2501   enc_class enc_unimplemented %{
2502     MacroAssembler _masm(&cbuf);
2503     __ unimplemented("C2 catch all");
2504   %}
2505 
2506   // BEGIN Non-volatile memory access
2507 
2508   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2509     Register dst_reg = as_Register($dst$$reg);
2510     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2511                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2512   %}
2513 
2514   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2515     Register dst_reg = as_Register($dst$$reg);
2516     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2517                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2518   %}
2519 
2520   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2521     Register dst_reg = as_Register($dst$$reg);
2522     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2523                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2524   %}
2525 
2526   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2527     Register dst_reg = as_Register($dst$$reg);
2528     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2529                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2530   %}
2531 
2532   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2533     Register dst_reg = as_Register($dst$$reg);
2534     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2535                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2536   %}
2537 
2538   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2539     Register dst_reg = as_Register($dst$$reg);
2540     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2541                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2542   %}
2543 
2544   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2545     Register dst_reg = as_Register($dst$$reg);
2546     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2547                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2548   %}
2549 
2550   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2551     Register dst_reg = as_Register($dst$$reg);
2552     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2553                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2554   %}
2555 
2556   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2557     Register dst_reg = as_Register($dst$$reg);
2558     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2559                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2560   %}
2561 
2562   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2563     Register dst_reg = as_Register($dst$$reg);
2564     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2565                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2566   %}
2567 
2568   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2569     Register dst_reg = as_Register($dst$$reg);
2570     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2571                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2572   %}
2573 
2574   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2575     Register dst_reg = as_Register($dst$$reg);
2576     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2577                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2578   %}
2579 
2580   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2581     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2582     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2583                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2584   %}
2585 
2586   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2587     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2588     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2589                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2590   %}
2591 
2592   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2593     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2594     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2595        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2596   %}
2597 
2598   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2599     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2600     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2601        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2602   %}
2603 
2604   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2605     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2606     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2607        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2608   %}
2609 
2610   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2611     Register src_reg = as_Register($src$$reg);
2612     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2613                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2614   %}
2615 
2616   enc_class aarch64_enc_strb0(memory mem) %{
2617     MacroAssembler _masm(&cbuf);
2618     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2619                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2620   %}
2621 
2622   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2623     MacroAssembler _masm(&cbuf);
2624     __ membar(Assembler::StoreStore);
2625     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2626                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2627   %}
2628 
2629   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2630     Register src_reg = as_Register($src$$reg);
2631     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2632                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2633   %}
2634 
2635   enc_class aarch64_enc_strh0(memory mem) %{
2636     MacroAssembler _masm(&cbuf);
2637     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2638                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2639   %}
2640 
2641   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2642     Register src_reg = as_Register($src$$reg);
2643     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2644                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2645   %}
2646 
2647   enc_class aarch64_enc_strw0(memory mem) %{
2648     MacroAssembler _masm(&cbuf);
2649     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2650                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2651   %}
2652 
2653   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2654     Register src_reg = as_Register($src$$reg);
2655     // we sometimes get asked to store the stack pointer into the
2656     // current thread -- we cannot do that directly on AArch64
2657     if (src_reg == r31_sp) {
2658       MacroAssembler _masm(&cbuf);
2659       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2660       __ mov(rscratch2, sp);
2661       src_reg = rscratch2;
2662     }
2663     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2664                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2665   %}
2666 
2667   enc_class aarch64_enc_str0(memory mem) %{
2668     MacroAssembler _masm(&cbuf);
2669     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2670                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2671   %}
2672 
2673   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2674     FloatRegister src_reg = as_FloatRegister($src$$reg);
2675     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2676                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2677   %}
2678 
2679   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2680     FloatRegister src_reg = as_FloatRegister($src$$reg);
2681     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2682                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2683   %}
2684 
2685   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2686     FloatRegister src_reg = as_FloatRegister($src$$reg);
2687     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2688        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2689   %}
2690 
2691   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2692     FloatRegister src_reg = as_FloatRegister($src$$reg);
2693     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2694        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2695   %}
2696 
2697   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2698     FloatRegister src_reg = as_FloatRegister($src$$reg);
2699     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2700        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2701   %}
2702 
2703   // END Non-volatile memory access
2704 
2705   // volatile loads and stores
2706 
2707   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2708     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2709                  rscratch1, stlrb);
2710   %}
2711 
2712   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2713     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2714                  rscratch1, stlrh);
2715   %}
2716 
2717   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2718     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2719                  rscratch1, stlrw);
2720   %}
2721 
2722 
2723   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2724     Register dst_reg = as_Register($dst$$reg);
2725     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2726              rscratch1, ldarb);
2727     __ sxtbw(dst_reg, dst_reg);
2728   %}
2729 
2730   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2731     Register dst_reg = as_Register($dst$$reg);
2732     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2733              rscratch1, ldarb);
2734     __ sxtb(dst_reg, dst_reg);
2735   %}
2736 
2737   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2738     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2739              rscratch1, ldarb);
2740   %}
2741 
2742   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2743     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2744              rscratch1, ldarb);
2745   %}
2746 
2747   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2748     Register dst_reg = as_Register($dst$$reg);
2749     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2750              rscratch1, ldarh);
2751     __ sxthw(dst_reg, dst_reg);
2752   %}
2753 
2754   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2755     Register dst_reg = as_Register($dst$$reg);
2756     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2757              rscratch1, ldarh);
2758     __ sxth(dst_reg, dst_reg);
2759   %}
2760 
2761   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2762     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2763              rscratch1, ldarh);
2764   %}
2765 
2766   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2767     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2768              rscratch1, ldarh);
2769   %}
2770 
2771   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2772     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2773              rscratch1, ldarw);
2774   %}
2775 
2776   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2777     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2778              rscratch1, ldarw);
2779   %}
2780 
2781   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2782     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2783              rscratch1, ldar);
2784   %}
2785 
2786   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2787     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2788              rscratch1, ldarw);
2789     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2790   %}
2791 
2792   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2793     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2794              rscratch1, ldar);
2795     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2796   %}
2797 
2798   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2799     Register src_reg = as_Register($src$$reg);
2800     // we sometimes get asked to store the stack pointer into the
2801     // current thread -- we cannot do that directly on AArch64
2802     if (src_reg == r31_sp) {
2803         MacroAssembler _masm(&cbuf);
2804       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2805       __ mov(rscratch2, sp);
2806       src_reg = rscratch2;
2807     }
2808     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2809                  rscratch1, stlr);
2810   %}
2811 
2812   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2813     {
2814       MacroAssembler _masm(&cbuf);
2815       FloatRegister src_reg = as_FloatRegister($src$$reg);
2816       __ fmovs(rscratch2, src_reg);
2817     }
2818     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2819                  rscratch1, stlrw);
2820   %}
2821 
2822   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2823     {
2824       MacroAssembler _masm(&cbuf);
2825       FloatRegister src_reg = as_FloatRegister($src$$reg);
2826       __ fmovd(rscratch2, src_reg);
2827     }
2828     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2829                  rscratch1, stlr);
2830   %}
2831 
2832   // synchronized read/update encodings
2833 
2834   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2835     MacroAssembler _masm(&cbuf);
2836     Register dst_reg = as_Register($dst$$reg);
2837     Register base = as_Register($mem$$base);
2838     int index = $mem$$index;
2839     int scale = $mem$$scale;
2840     int disp = $mem$$disp;
2841     if (index == -1) {
2842        if (disp != 0) {
2843         __ lea(rscratch1, Address(base, disp));
2844         __ ldaxr(dst_reg, rscratch1);
2845       } else {
2846         // TODO
2847         // should we ever get anything other than this case?
2848         __ ldaxr(dst_reg, base);
2849       }
2850     } else {
2851       Register index_reg = as_Register(index);
2852       if (disp == 0) {
2853         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2854         __ ldaxr(dst_reg, rscratch1);
2855       } else {
2856         __ lea(rscratch1, Address(base, disp));
2857         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2858         __ ldaxr(dst_reg, rscratch1);
2859       }
2860     }
2861   %}
2862 
2863   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2864     MacroAssembler _masm(&cbuf);
2865     Register src_reg = as_Register($src$$reg);
2866     Register base = as_Register($mem$$base);
2867     int index = $mem$$index;
2868     int scale = $mem$$scale;
2869     int disp = $mem$$disp;
2870     if (index == -1) {
2871        if (disp != 0) {
2872         __ lea(rscratch2, Address(base, disp));
2873         __ stlxr(rscratch1, src_reg, rscratch2);
2874       } else {
2875         // TODO
2876         // should we ever get anything other than this case?
2877         __ stlxr(rscratch1, src_reg, base);
2878       }
2879     } else {
2880       Register index_reg = as_Register(index);
2881       if (disp == 0) {
2882         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2883         __ stlxr(rscratch1, src_reg, rscratch2);
2884       } else {
2885         __ lea(rscratch2, Address(base, disp));
2886         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2887         __ stlxr(rscratch1, src_reg, rscratch2);
2888       }
2889     }
2890     __ cmpw(rscratch1, zr);
2891   %}
2892 
2893   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2894     MacroAssembler _masm(&cbuf);
2895     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2896     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2897                Assembler::xword, /*acquire*/ false, /*release*/ true,
2898                /*weak*/ false, noreg);
2899   %}
2900 
2901   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2902     MacroAssembler _masm(&cbuf);
2903     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2904     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2905                Assembler::word, /*acquire*/ false, /*release*/ true,
2906                /*weak*/ false, noreg);
2907   %}
2908 
2909   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2910     MacroAssembler _masm(&cbuf);
2911     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2912     __ uxthw(rscratch2, $oldval$$Register);
2913     __ cmpxchg($mem$$base$$Register, rscratch2, $newval$$Register,
2914                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2915                /*weak*/ false, noreg);
2916   %}
2917 
2918   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2919     MacroAssembler _masm(&cbuf);
2920     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2921     __ uxtbw(rscratch2, $oldval$$Register);
2922     __ cmpxchg($mem$$base$$Register, rscratch2, $newval$$Register,
2923                Assembler::byte, /*acquire*/ false, /*release*/ true,
2924                /*weak*/ false, noreg);
2925   %}
2926 
2927 
2928   // The only difference between aarch64_enc_cmpxchg and
2929   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2930   // CompareAndSwap sequence to serve as a barrier on acquiring a
2931   // lock.
2932   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2933     MacroAssembler _masm(&cbuf);
2934     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2935     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2936                Assembler::xword, /*acquire*/ true, /*release*/ true,
2937                /*weak*/ false, noreg);
2938   %}
2939 
2940   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2941     MacroAssembler _masm(&cbuf);
2942     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2943     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2944                Assembler::word, /*acquire*/ true, /*release*/ true,
2945                /*weak*/ false, noreg);
2946   %}
2947 
2948 
2949   // auxiliary used for CompareAndSwapX to set result register
2950   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2951     MacroAssembler _masm(&cbuf);
2952     Register res_reg = as_Register($res$$reg);
2953     __ cset(res_reg, Assembler::EQ);
2954   %}
2955 
2956   // prefetch encodings
2957 
2958   enc_class aarch64_enc_prefetchw(memory mem) %{
2959     MacroAssembler _masm(&cbuf);
2960     Register base = as_Register($mem$$base);
2961     int index = $mem$$index;
2962     int scale = $mem$$scale;
2963     int disp = $mem$$disp;
2964     if (index == -1) {
2965       __ prfm(Address(base, disp), PSTL1KEEP);
2966     } else {
2967       Register index_reg = as_Register(index);
2968       if (disp == 0) {
2969         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2970       } else {
2971         __ lea(rscratch1, Address(base, disp));
2972         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
2973       }
2974     }
2975   %}
2976 
2977   /// mov envcodings
2978 
2979   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
2980     MacroAssembler _masm(&cbuf);
2981     u_int32_t con = (u_int32_t)$src$$constant;
2982     Register dst_reg = as_Register($dst$$reg);
2983     if (con == 0) {
2984       __ movw(dst_reg, zr);
2985     } else {
2986       __ movw(dst_reg, con);
2987     }
2988   %}
2989 
2990   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
2991     MacroAssembler _masm(&cbuf);
2992     Register dst_reg = as_Register($dst$$reg);
2993     u_int64_t con = (u_int64_t)$src$$constant;
2994     if (con == 0) {
2995       __ mov(dst_reg, zr);
2996     } else {
2997       __ mov(dst_reg, con);
2998     }
2999   %}
3000 
3001   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3002     MacroAssembler _masm(&cbuf);
3003     Register dst_reg = as_Register($dst$$reg);
3004     address con = (address)$src$$constant;
3005     if (con == NULL || con == (address)1) {
3006       ShouldNotReachHere();
3007     } else {
3008       relocInfo::relocType rtype = $src->constant_reloc();
3009       if (rtype == relocInfo::oop_type) {
3010         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3011       } else if (rtype == relocInfo::metadata_type) {
3012         __ mov_metadata(dst_reg, (Metadata*)con);
3013       } else {
3014         assert(rtype == relocInfo::none, "unexpected reloc type");
3015         if (con < (address)(uintptr_t)os::vm_page_size()) {
3016           __ mov(dst_reg, con);
3017         } else {
3018           unsigned long offset;
3019           __ adrp(dst_reg, con, offset);
3020           __ add(dst_reg, dst_reg, offset);
3021         }
3022       }
3023     }
3024   %}
3025 
3026   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3027     MacroAssembler _masm(&cbuf);
3028     Register dst_reg = as_Register($dst$$reg);
3029     __ mov(dst_reg, zr);
3030   %}
3031 
3032   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3033     MacroAssembler _masm(&cbuf);
3034     Register dst_reg = as_Register($dst$$reg);
3035     __ mov(dst_reg, (u_int64_t)1);
3036   %}
3037 
3038   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3039     MacroAssembler _masm(&cbuf);
3040     address page = (address)$src$$constant;
3041     Register dst_reg = as_Register($dst$$reg);
3042     unsigned long off;
3043     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3044     assert(off == 0, "assumed offset == 0");
3045   %}
3046 
3047   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3048     MacroAssembler _masm(&cbuf);
3049     __ load_byte_map_base($dst$$Register);
3050   %}
3051 
3052   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3053     MacroAssembler _masm(&cbuf);
3054     Register dst_reg = as_Register($dst$$reg);
3055     address con = (address)$src$$constant;
3056     if (con == NULL) {
3057       ShouldNotReachHere();
3058     } else {
3059       relocInfo::relocType rtype = $src->constant_reloc();
3060       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3061       __ set_narrow_oop(dst_reg, (jobject)con);
3062     }
3063   %}
3064 
3065   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3066     MacroAssembler _masm(&cbuf);
3067     Register dst_reg = as_Register($dst$$reg);
3068     __ mov(dst_reg, zr);
3069   %}
3070 
3071   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3072     MacroAssembler _masm(&cbuf);
3073     Register dst_reg = as_Register($dst$$reg);
3074     address con = (address)$src$$constant;
3075     if (con == NULL) {
3076       ShouldNotReachHere();
3077     } else {
3078       relocInfo::relocType rtype = $src->constant_reloc();
3079       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3080       __ set_narrow_klass(dst_reg, (Klass *)con);
3081     }
3082   %}
3083 
3084   // arithmetic encodings
3085 
3086   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3087     MacroAssembler _masm(&cbuf);
3088     Register dst_reg = as_Register($dst$$reg);
3089     Register src_reg = as_Register($src1$$reg);
3090     int32_t con = (int32_t)$src2$$constant;
3091     // add has primary == 0, subtract has primary == 1
3092     if ($primary) { con = -con; }
3093     if (con < 0) {
3094       __ subw(dst_reg, src_reg, -con);
3095     } else {
3096       __ addw(dst_reg, src_reg, con);
3097     }
3098   %}
3099 
3100   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3101     MacroAssembler _masm(&cbuf);
3102     Register dst_reg = as_Register($dst$$reg);
3103     Register src_reg = as_Register($src1$$reg);
3104     int32_t con = (int32_t)$src2$$constant;
3105     // add has primary == 0, subtract has primary == 1
3106     if ($primary) { con = -con; }
3107     if (con < 0) {
3108       __ sub(dst_reg, src_reg, -con);
3109     } else {
3110       __ add(dst_reg, src_reg, con);
3111     }
3112   %}
3113 
3114   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3115     MacroAssembler _masm(&cbuf);
3116    Register dst_reg = as_Register($dst$$reg);
3117    Register src1_reg = as_Register($src1$$reg);
3118    Register src2_reg = as_Register($src2$$reg);
3119     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3120   %}
3121 
3122   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3123     MacroAssembler _masm(&cbuf);
3124    Register dst_reg = as_Register($dst$$reg);
3125    Register src1_reg = as_Register($src1$$reg);
3126    Register src2_reg = as_Register($src2$$reg);
3127     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3128   %}
3129 
3130   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3131     MacroAssembler _masm(&cbuf);
3132    Register dst_reg = as_Register($dst$$reg);
3133    Register src1_reg = as_Register($src1$$reg);
3134    Register src2_reg = as_Register($src2$$reg);
3135     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3136   %}
3137 
3138   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3139     MacroAssembler _masm(&cbuf);
3140    Register dst_reg = as_Register($dst$$reg);
3141    Register src1_reg = as_Register($src1$$reg);
3142    Register src2_reg = as_Register($src2$$reg);
3143     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3144   %}
3145 
3146   // compare instruction encodings
3147 
3148   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3149     MacroAssembler _masm(&cbuf);
3150     Register reg1 = as_Register($src1$$reg);
3151     Register reg2 = as_Register($src2$$reg);
3152     __ cmpw(reg1, reg2);
3153   %}
3154 
3155   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3156     MacroAssembler _masm(&cbuf);
3157     Register reg = as_Register($src1$$reg);
3158     int32_t val = $src2$$constant;
3159     if (val >= 0) {
3160       __ subsw(zr, reg, val);
3161     } else {
3162       __ addsw(zr, reg, -val);
3163     }
3164   %}
3165 
3166   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3167     MacroAssembler _masm(&cbuf);
3168     Register reg1 = as_Register($src1$$reg);
3169     u_int32_t val = (u_int32_t)$src2$$constant;
3170     __ movw(rscratch1, val);
3171     __ cmpw(reg1, rscratch1);
3172   %}
3173 
3174   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3175     MacroAssembler _masm(&cbuf);
3176     Register reg1 = as_Register($src1$$reg);
3177     Register reg2 = as_Register($src2$$reg);
3178     __ cmp(reg1, reg2);
3179   %}
3180 
3181   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3182     MacroAssembler _masm(&cbuf);
3183     Register reg = as_Register($src1$$reg);
3184     int64_t val = $src2$$constant;
3185     if (val >= 0) {
3186       __ subs(zr, reg, val);
3187     } else if (val != -val) {
3188       __ adds(zr, reg, -val);
3189     } else {
3190     // aargh, Long.MIN_VALUE is a special case
3191       __ orr(rscratch1, zr, (u_int64_t)val);
3192       __ subs(zr, reg, rscratch1);
3193     }
3194   %}
3195 
3196   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3197     MacroAssembler _masm(&cbuf);
3198     Register reg1 = as_Register($src1$$reg);
3199     u_int64_t val = (u_int64_t)$src2$$constant;
3200     __ mov(rscratch1, val);
3201     __ cmp(reg1, rscratch1);
3202   %}
3203 
3204   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3205     MacroAssembler _masm(&cbuf);
3206     Register reg1 = as_Register($src1$$reg);
3207     Register reg2 = as_Register($src2$$reg);
3208     __ cmp(reg1, reg2);
3209   %}
3210 
3211   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3212     MacroAssembler _masm(&cbuf);
3213     Register reg1 = as_Register($src1$$reg);
3214     Register reg2 = as_Register($src2$$reg);
3215     __ cmpw(reg1, reg2);
3216   %}
3217 
3218   enc_class aarch64_enc_testp(iRegP src) %{
3219     MacroAssembler _masm(&cbuf);
3220     Register reg = as_Register($src$$reg);
3221     __ cmp(reg, zr);
3222   %}
3223 
3224   enc_class aarch64_enc_testn(iRegN src) %{
3225     MacroAssembler _masm(&cbuf);
3226     Register reg = as_Register($src$$reg);
3227     __ cmpw(reg, zr);
3228   %}
3229 
3230   enc_class aarch64_enc_b(label lbl) %{
3231     MacroAssembler _masm(&cbuf);
3232     Label *L = $lbl$$label;
3233     __ b(*L);
3234   %}
3235 
3236   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3237     MacroAssembler _masm(&cbuf);
3238     Label *L = $lbl$$label;
3239     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3240   %}
3241 
3242   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3243     MacroAssembler _masm(&cbuf);
3244     Label *L = $lbl$$label;
3245     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3246   %}
3247 
3248   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3249   %{
3250      Register sub_reg = as_Register($sub$$reg);
3251      Register super_reg = as_Register($super$$reg);
3252      Register temp_reg = as_Register($temp$$reg);
3253      Register result_reg = as_Register($result$$reg);
3254 
3255      Label miss;
3256      MacroAssembler _masm(&cbuf);
3257      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3258                                      NULL, &miss,
3259                                      /*set_cond_codes:*/ true);
3260      if ($primary) {
3261        __ mov(result_reg, zr);
3262      }
3263      __ bind(miss);
3264   %}
3265 
3266   enc_class aarch64_enc_java_static_call(method meth) %{
3267     MacroAssembler _masm(&cbuf);
3268 
3269     address addr = (address)$meth$$method;
3270     address call;
3271     if (!_method) {
3272       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3273       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3274     } else {
3275       int method_index = resolved_method_index(cbuf);
3276       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3277                                                   : static_call_Relocation::spec(method_index);
3278       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3279 
3280       // Emit stub for static call
3281       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3282       if (stub == NULL) {
3283         ciEnv::current()->record_failure("CodeCache is full");
3284         return;
3285       }
3286     }
3287     if (call == NULL) {
3288       ciEnv::current()->record_failure("CodeCache is full");
3289       return;
3290     }
3291   %}
3292 
3293   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3294     MacroAssembler _masm(&cbuf);
3295     int method_index = resolved_method_index(cbuf);
3296     address call = __ ic_call((address)$meth$$method, method_index);
3297     if (call == NULL) {
3298       ciEnv::current()->record_failure("CodeCache is full");
3299       return;
3300     }
3301   %}
3302 
3303   enc_class aarch64_enc_call_epilog() %{
3304     MacroAssembler _masm(&cbuf);
3305     if (VerifyStackAtCalls) {
3306       // Check that stack depth is unchanged: find majik cookie on stack
3307       __ call_Unimplemented();
3308     }
3309   %}
3310 
3311   enc_class aarch64_enc_java_to_runtime(method meth) %{
3312     MacroAssembler _masm(&cbuf);
3313 
3314     // some calls to generated routines (arraycopy code) are scheduled
3315     // by C2 as runtime calls. if so we can call them using a br (they
3316     // will be in a reachable segment) otherwise we have to use a blrt
3317     // which loads the absolute address into a register.
3318     address entry = (address)$meth$$method;
3319     CodeBlob *cb = CodeCache::find_blob(entry);
3320     if (cb) {
3321       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3322       if (call == NULL) {
3323         ciEnv::current()->record_failure("CodeCache is full");
3324         return;
3325       }
3326     } else {
3327       int gpcnt;
3328       int fpcnt;
3329       int rtype;
3330       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3331       Label retaddr;
3332       __ adr(rscratch2, retaddr);
3333       __ lea(rscratch1, RuntimeAddress(entry));
3334       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3335       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3336       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3337       __ bind(retaddr);
3338       __ add(sp, sp, 2 * wordSize);
3339     }
3340   %}
3341 
3342   enc_class aarch64_enc_rethrow() %{
3343     MacroAssembler _masm(&cbuf);
3344     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3345   %}
3346 
3347   enc_class aarch64_enc_ret() %{
3348     MacroAssembler _masm(&cbuf);
3349     __ ret(lr);
3350   %}
3351 
3352   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3353     MacroAssembler _masm(&cbuf);
3354     Register target_reg = as_Register($jump_target$$reg);
3355     __ br(target_reg);
3356   %}
3357 
3358   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3359     MacroAssembler _masm(&cbuf);
3360     Register target_reg = as_Register($jump_target$$reg);
3361     // exception oop should be in r0
3362     // ret addr has been popped into lr
3363     // callee expects it in r3
3364     __ mov(r3, lr);
3365     __ br(target_reg);
3366   %}
3367 
3368   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3369     MacroAssembler _masm(&cbuf);
3370     Register oop = as_Register($object$$reg);
3371     Register box = as_Register($box$$reg);
3372     Register disp_hdr = as_Register($tmp$$reg);
3373     Register tmp = as_Register($tmp2$$reg);
3374     Label cont;
3375     Label object_has_monitor;
3376     Label cas_failed;
3377 
3378     assert_different_registers(oop, box, tmp, disp_hdr);
3379 
3380     // Load markOop from object into displaced_header.
3381     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3382 
3383     if (UseBiasedLocking && !UseOptoBiasInlining) {
3384       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3385     }
3386 
3387     // Handle existing monitor
3388     // we can use AArch64's bit test and branch here but
3389     // markoopDesc does not define a bit index just the bit value
3390     // so assert in case the bit pos changes
3391 #   define __monitor_value_log2 1
3392     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3393     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3394 #   undef __monitor_value_log2
3395 
3396     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3397     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3398 
3399     // Load Compare Value application register.
3400 
3401     // Initialize the box. (Must happen before we update the object mark!)
3402     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3403 
3404     // Compare object markOop with mark and if equal exchange scratch1
3405     // with object markOop.
3406     if (UseLSE) {
3407       __ mov(tmp, disp_hdr);
3408       __ casal(Assembler::xword, tmp, box, oop);
3409       __ cmp(tmp, disp_hdr);
3410       __ br(Assembler::EQ, cont);
3411     } else {
3412       Label retry_load;
3413       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3414         __ prfm(Address(oop), PSTL1STRM);
3415       __ bind(retry_load);
3416       __ ldaxr(tmp, oop);
3417       __ cmp(tmp, disp_hdr);
3418       __ br(Assembler::NE, cas_failed);
3419       // use stlxr to ensure update is immediately visible
3420       __ stlxr(tmp, box, oop);
3421       __ cbzw(tmp, cont);
3422       __ b(retry_load);
3423     }
3424 
3425     // Formerly:
3426     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3427     //               /*newv=*/box,
3428     //               /*addr=*/oop,
3429     //               /*tmp=*/tmp,
3430     //               cont,
3431     //               /*fail*/NULL);
3432 
3433     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3434 
3435     // If the compare-and-exchange succeeded, then we found an unlocked
3436     // object, will have now locked it will continue at label cont
3437 
3438     __ bind(cas_failed);
3439     // We did not see an unlocked object so try the fast recursive case.
3440 
3441     // Check if the owner is self by comparing the value in the
3442     // markOop of object (disp_hdr) with the stack pointer.
3443     __ mov(rscratch1, sp);
3444     __ sub(disp_hdr, disp_hdr, rscratch1);
3445     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3446     // If condition is true we are cont and hence we can store 0 as the
3447     // displaced header in the box, which indicates that it is a recursive lock.
3448     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3449     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3450 
3451     // Handle existing monitor.
3452     __ b(cont);
3453 
3454     __ bind(object_has_monitor);
3455     // The object's monitor m is unlocked iff m->owner == NULL,
3456     // otherwise m->owner may contain a thread or a stack address.
3457     //
3458     // Try to CAS m->owner from NULL to current thread.
3459     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3460     __ mov(disp_hdr, zr);
3461 
3462     if (UseLSE) {
3463       __ mov(rscratch1, disp_hdr);
3464       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3465       __ cmp(rscratch1, disp_hdr);
3466     } else {
3467       Label retry_load, fail;
3468       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3469         __ prfm(Address(tmp), PSTL1STRM);
3470       }
3471       __ bind(retry_load);
3472       __ ldaxr(rscratch1, tmp);
3473       __ cmp(disp_hdr, rscratch1);
3474       __ br(Assembler::NE, fail);
3475       // use stlxr to ensure update is immediately visible
3476       __ stlxr(rscratch1, rthread, tmp);
3477       __ cbnzw(rscratch1, retry_load);
3478       __ bind(fail);
3479     }
3480 
3481     // Label next;
3482     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3483     //               /*newv=*/rthread,
3484     //               /*addr=*/tmp,
3485     //               /*tmp=*/rscratch1,
3486     //               /*succeed*/next,
3487     //               /*fail*/NULL);
3488     // __ bind(next);
3489 
3490     // store a non-null value into the box.
3491     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3492 
3493     // PPC port checks the following invariants
3494     // #ifdef ASSERT
3495     // bne(flag, cont);
3496     // We have acquired the monitor, check some invariants.
3497     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3498     // Invariant 1: _recursions should be 0.
3499     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3500     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3501     //                        "monitor->_recursions should be 0", -1);
3502     // Invariant 2: OwnerIsThread shouldn't be 0.
3503     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3504     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3505     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3506     // #endif
3507 
3508     __ bind(cont);
3509     // flag == EQ indicates success
3510     // flag == NE indicates failure
3511 
3512   %}
3513 
3514   // TODO
3515   // reimplement this with custom cmpxchgptr code
3516   // which avoids some of the unnecessary branching
3517   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3518     MacroAssembler _masm(&cbuf);
3519     Register oop = as_Register($object$$reg);
3520     Register box = as_Register($box$$reg);
3521     Register disp_hdr = as_Register($tmp$$reg);
3522     Register tmp = as_Register($tmp2$$reg);
3523     Label cont;
3524     Label object_has_monitor;
3525     Label cas_failed;
3526 
3527     assert_different_registers(oop, box, tmp, disp_hdr);
3528 
3529     if (UseBiasedLocking && !UseOptoBiasInlining) {
3530       __ biased_locking_exit(oop, tmp, cont);
3531     }
3532 
3533     // Find the lock address and load the displaced header from the stack.
3534     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3535 
3536     // If the displaced header is 0, we have a recursive unlock.
3537     __ cmp(disp_hdr, zr);
3538     __ br(Assembler::EQ, cont);
3539 
3540 
3541     // Handle existing monitor.
3542     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3543     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3544 
3545     // Check if it is still a light weight lock, this is is true if we
3546     // see the stack address of the basicLock in the markOop of the
3547     // object.
3548 
3549       if (UseLSE) {
3550         __ mov(tmp, box);
3551         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3552         __ cmp(tmp, box);
3553       } else {
3554         Label retry_load;
3555         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3556           __ prfm(Address(oop), PSTL1STRM);
3557         __ bind(retry_load);
3558         __ ldxr(tmp, oop);
3559         __ cmp(box, tmp);
3560         __ br(Assembler::NE, cas_failed);
3561         // use stlxr to ensure update is immediately visible
3562         __ stlxr(tmp, disp_hdr, oop);
3563         __ cbzw(tmp, cont);
3564         __ b(retry_load);
3565       }
3566 
3567     // __ cmpxchgptr(/*compare_value=*/box,
3568     //               /*exchange_value=*/disp_hdr,
3569     //               /*where=*/oop,
3570     //               /*result=*/tmp,
3571     //               cont,
3572     //               /*cas_failed*/NULL);
3573     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3574 
3575     __ bind(cas_failed);
3576 
3577     // Handle existing monitor.
3578     __ b(cont);
3579 
3580     __ bind(object_has_monitor);
3581     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3582     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3583     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3584     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3585     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3586     __ cmp(rscratch1, zr);
3587     __ br(Assembler::NE, cont);
3588 
3589     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3590     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3591     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3592     __ cmp(rscratch1, zr);
3593     __ cbnz(rscratch1, cont);
3594     // need a release store here
3595     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3596     __ stlr(rscratch1, tmp); // rscratch1 is zero
3597 
3598     __ bind(cont);
3599     // flag == EQ indicates success
3600     // flag == NE indicates failure
3601   %}
3602 
3603 %}
3604 
3605 //----------FRAME--------------------------------------------------------------
3606 // Definition of frame structure and management information.
3607 //
3608 //  S T A C K   L A Y O U T    Allocators stack-slot number
3609 //                             |   (to get allocators register number
3610 //  G  Owned by    |        |  v    add OptoReg::stack0())
3611 //  r   CALLER     |        |
3612 //  o     |        +--------+      pad to even-align allocators stack-slot
3613 //  w     V        |  pad0  |        numbers; owned by CALLER
3614 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3615 //  h     ^        |   in   |  5
3616 //        |        |  args  |  4   Holes in incoming args owned by SELF
3617 //  |     |        |        |  3
3618 //  |     |        +--------+
3619 //  V     |        | old out|      Empty on Intel, window on Sparc
3620 //        |    old |preserve|      Must be even aligned.
3621 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3622 //        |        |   in   |  3   area for Intel ret address
3623 //     Owned by    |preserve|      Empty on Sparc.
3624 //       SELF      +--------+
3625 //        |        |  pad2  |  2   pad to align old SP
3626 //        |        +--------+  1
3627 //        |        | locks  |  0
3628 //        |        +--------+----> OptoReg::stack0(), even aligned
3629 //        |        |  pad1  | 11   pad to align new SP
3630 //        |        +--------+
3631 //        |        |        | 10
3632 //        |        | spills |  9   spills
3633 //        V        |        |  8   (pad0 slot for callee)
3634 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3635 //        ^        |  out   |  7
3636 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3637 //     Owned by    +--------+
3638 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3639 //        |    new |preserve|      Must be even-aligned.
3640 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3641 //        |        |        |
3642 //
3643 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3644 //         known from SELF's arguments and the Java calling convention.
3645 //         Region 6-7 is determined per call site.
3646 // Note 2: If the calling convention leaves holes in the incoming argument
3647 //         area, those holes are owned by SELF.  Holes in the outgoing area
3648 //         are owned by the CALLEE.  Holes should not be nessecary in the
3649 //         incoming area, as the Java calling convention is completely under
3650 //         the control of the AD file.  Doubles can be sorted and packed to
3651 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3652 //         varargs C calling conventions.
3653 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3654 //         even aligned with pad0 as needed.
3655 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3656 //           (the latter is true on Intel but is it false on AArch64?)
3657 //         region 6-11 is even aligned; it may be padded out more so that
3658 //         the region from SP to FP meets the minimum stack alignment.
3659 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3660 //         alignment.  Region 11, pad1, may be dynamically extended so that
3661 //         SP meets the minimum alignment.
3662 
3663 frame %{
3664   // What direction does stack grow in (assumed to be same for C & Java)
3665   stack_direction(TOWARDS_LOW);
3666 
3667   // These three registers define part of the calling convention
3668   // between compiled code and the interpreter.
3669 
3670   // Inline Cache Register or methodOop for I2C.
3671   inline_cache_reg(R12);
3672 
3673   // Method Oop Register when calling interpreter.
3674   interpreter_method_oop_reg(R12);
3675 
3676   // Number of stack slots consumed by locking an object
3677   sync_stack_slots(2);
3678 
3679   // Compiled code's Frame Pointer
3680   frame_pointer(R31);
3681 
3682   // Interpreter stores its frame pointer in a register which is
3683   // stored to the stack by I2CAdaptors.
3684   // I2CAdaptors convert from interpreted java to compiled java.
3685   interpreter_frame_pointer(R29);
3686 
3687   // Stack alignment requirement
3688   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3689 
3690   // Number of stack slots between incoming argument block and the start of
3691   // a new frame.  The PROLOG must add this many slots to the stack.  The
3692   // EPILOG must remove this many slots. aarch64 needs two slots for
3693   // return address and fp.
3694   // TODO think this is correct but check
3695   in_preserve_stack_slots(4);
3696 
3697   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3698   // for calls to C.  Supports the var-args backing area for register parms.
3699   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3700 
3701   // The after-PROLOG location of the return address.  Location of
3702   // return address specifies a type (REG or STACK) and a number
3703   // representing the register number (i.e. - use a register name) or
3704   // stack slot.
3705   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3706   // Otherwise, it is above the locks and verification slot and alignment word
3707   // TODO this may well be correct but need to check why that - 2 is there
3708   // ppc port uses 0 but we definitely need to allow for fixed_slots
3709   // which folds in the space used for monitors
3710   return_addr(STACK - 2 +
3711               align_up((Compile::current()->in_preserve_stack_slots() +
3712                         Compile::current()->fixed_slots()),
3713                        stack_alignment_in_slots()));
3714 
3715   // Body of function which returns an integer array locating
3716   // arguments either in registers or in stack slots.  Passed an array
3717   // of ideal registers called "sig" and a "length" count.  Stack-slot
3718   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3719   // arguments for a CALLEE.  Incoming stack arguments are
3720   // automatically biased by the preserve_stack_slots field above.
3721 
3722   calling_convention
3723   %{
3724     // No difference between ingoing/outgoing just pass false
3725     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3726   %}
3727 
3728   c_calling_convention
3729   %{
3730     // This is obviously always outgoing
3731     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3732   %}
3733 
3734   // Location of compiled Java return values.  Same as C for now.
3735   return_value
3736   %{
3737     // TODO do we allow ideal_reg == Op_RegN???
3738     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3739            "only return normal values");
3740 
3741     static const int lo[Op_RegL + 1] = { // enum name
3742       0,                                 // Op_Node
3743       0,                                 // Op_Set
3744       R0_num,                            // Op_RegN
3745       R0_num,                            // Op_RegI
3746       R0_num,                            // Op_RegP
3747       V0_num,                            // Op_RegF
3748       V0_num,                            // Op_RegD
3749       R0_num                             // Op_RegL
3750     };
3751 
3752     static const int hi[Op_RegL + 1] = { // enum name
3753       0,                                 // Op_Node
3754       0,                                 // Op_Set
3755       OptoReg::Bad,                       // Op_RegN
3756       OptoReg::Bad,                      // Op_RegI
3757       R0_H_num,                          // Op_RegP
3758       OptoReg::Bad,                      // Op_RegF
3759       V0_H_num,                          // Op_RegD
3760       R0_H_num                           // Op_RegL
3761     };
3762 
3763     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3764   %}
3765 %}
3766 
3767 //----------ATTRIBUTES---------------------------------------------------------
3768 //----------Operand Attributes-------------------------------------------------
3769 op_attrib op_cost(1);        // Required cost attribute
3770 
3771 //----------Instruction Attributes---------------------------------------------
3772 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3773 ins_attrib ins_size(32);        // Required size attribute (in bits)
3774 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3775                                 // a non-matching short branch variant
3776                                 // of some long branch?
3777 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3778                                 // be a power of 2) specifies the
3779                                 // alignment that some part of the
3780                                 // instruction (not necessarily the
3781                                 // start) requires.  If > 1, a
3782                                 // compute_padding() function must be
3783                                 // provided for the instruction
3784 
3785 //----------OPERANDS-----------------------------------------------------------
3786 // Operand definitions must precede instruction definitions for correct parsing
3787 // in the ADLC because operands constitute user defined types which are used in
3788 // instruction definitions.
3789 
3790 //----------Simple Operands----------------------------------------------------
3791 
3792 // Integer operands 32 bit
3793 // 32 bit immediate
3794 operand immI()
3795 %{
3796   match(ConI);
3797 
3798   op_cost(0);
3799   format %{ %}
3800   interface(CONST_INTER);
3801 %}
3802 
3803 // 32 bit zero
3804 operand immI0()
3805 %{
3806   predicate(n->get_int() == 0);
3807   match(ConI);
3808 
3809   op_cost(0);
3810   format %{ %}
3811   interface(CONST_INTER);
3812 %}
3813 
3814 // 32 bit unit increment
3815 operand immI_1()
3816 %{
3817   predicate(n->get_int() == 1);
3818   match(ConI);
3819 
3820   op_cost(0);
3821   format %{ %}
3822   interface(CONST_INTER);
3823 %}
3824 
3825 // 32 bit unit decrement
3826 operand immI_M1()
3827 %{
3828   predicate(n->get_int() == -1);
3829   match(ConI);
3830 
3831   op_cost(0);
3832   format %{ %}
3833   interface(CONST_INTER);
3834 %}
3835 
3836 // Shift values for add/sub extension shift
3837 operand immIExt()
3838 %{
3839   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3840   match(ConI);
3841 
3842   op_cost(0);
3843   format %{ %}
3844   interface(CONST_INTER);
3845 %}
3846 
3847 operand immI_le_4()
3848 %{
3849   predicate(n->get_int() <= 4);
3850   match(ConI);
3851 
3852   op_cost(0);
3853   format %{ %}
3854   interface(CONST_INTER);
3855 %}
3856 
3857 operand immI_31()
3858 %{
3859   predicate(n->get_int() == 31);
3860   match(ConI);
3861 
3862   op_cost(0);
3863   format %{ %}
3864   interface(CONST_INTER);
3865 %}
3866 
3867 operand immI_8()
3868 %{
3869   predicate(n->get_int() == 8);
3870   match(ConI);
3871 
3872   op_cost(0);
3873   format %{ %}
3874   interface(CONST_INTER);
3875 %}
3876 
3877 operand immI_16()
3878 %{
3879   predicate(n->get_int() == 16);
3880   match(ConI);
3881 
3882   op_cost(0);
3883   format %{ %}
3884   interface(CONST_INTER);
3885 %}
3886 
3887 operand immI_24()
3888 %{
3889   predicate(n->get_int() == 24);
3890   match(ConI);
3891 
3892   op_cost(0);
3893   format %{ %}
3894   interface(CONST_INTER);
3895 %}
3896 
3897 operand immI_32()
3898 %{
3899   predicate(n->get_int() == 32);
3900   match(ConI);
3901 
3902   op_cost(0);
3903   format %{ %}
3904   interface(CONST_INTER);
3905 %}
3906 
3907 operand immI_48()
3908 %{
3909   predicate(n->get_int() == 48);
3910   match(ConI);
3911 
3912   op_cost(0);
3913   format %{ %}
3914   interface(CONST_INTER);
3915 %}
3916 
3917 operand immI_56()
3918 %{
3919   predicate(n->get_int() == 56);
3920   match(ConI);
3921 
3922   op_cost(0);
3923   format %{ %}
3924   interface(CONST_INTER);
3925 %}
3926 
3927 operand immI_63()
3928 %{
3929   predicate(n->get_int() == 63);
3930   match(ConI);
3931 
3932   op_cost(0);
3933   format %{ %}
3934   interface(CONST_INTER);
3935 %}
3936 
3937 operand immI_64()
3938 %{
3939   predicate(n->get_int() == 64);
3940   match(ConI);
3941 
3942   op_cost(0);
3943   format %{ %}
3944   interface(CONST_INTER);
3945 %}
3946 
3947 operand immI_255()
3948 %{
3949   predicate(n->get_int() == 255);
3950   match(ConI);
3951 
3952   op_cost(0);
3953   format %{ %}
3954   interface(CONST_INTER);
3955 %}
3956 
3957 operand immI_65535()
3958 %{
3959   predicate(n->get_int() == 65535);
3960   match(ConI);
3961 
3962   op_cost(0);
3963   format %{ %}
3964   interface(CONST_INTER);
3965 %}
3966 
3967 operand immL_255()
3968 %{
3969   predicate(n->get_long() == 255L);
3970   match(ConL);
3971 
3972   op_cost(0);
3973   format %{ %}
3974   interface(CONST_INTER);
3975 %}
3976 
3977 operand immL_65535()
3978 %{
3979   predicate(n->get_long() == 65535L);
3980   match(ConL);
3981 
3982   op_cost(0);
3983   format %{ %}
3984   interface(CONST_INTER);
3985 %}
3986 
3987 operand immL_4294967295()
3988 %{
3989   predicate(n->get_long() == 4294967295L);
3990   match(ConL);
3991 
3992   op_cost(0);
3993   format %{ %}
3994   interface(CONST_INTER);
3995 %}
3996 
3997 operand immL_bitmask()
3998 %{
3999   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4000             && is_power_of_2(n->get_long() + 1));
4001   match(ConL);
4002 
4003   op_cost(0);
4004   format %{ %}
4005   interface(CONST_INTER);
4006 %}
4007 
4008 operand immI_bitmask()
4009 %{
4010   predicate(((n->get_int() & 0xc0000000) == 0)
4011             && is_power_of_2(n->get_int() + 1));
4012   match(ConI);
4013 
4014   op_cost(0);
4015   format %{ %}
4016   interface(CONST_INTER);
4017 %}
4018 
4019 // Scale values for scaled offset addressing modes (up to long but not quad)
4020 operand immIScale()
4021 %{
4022   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4023   match(ConI);
4024 
4025   op_cost(0);
4026   format %{ %}
4027   interface(CONST_INTER);
4028 %}
4029 
4030 // 26 bit signed offset -- for pc-relative branches
4031 operand immI26()
4032 %{
4033   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4034   match(ConI);
4035 
4036   op_cost(0);
4037   format %{ %}
4038   interface(CONST_INTER);
4039 %}
4040 
4041 // 19 bit signed offset -- for pc-relative loads
4042 operand immI19()
4043 %{
4044   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 // 12 bit unsigned offset -- for base plus immediate loads
4053 operand immIU12()
4054 %{
4055   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4056   match(ConI);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 operand immLU12()
4064 %{
4065   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4066   match(ConL);
4067 
4068   op_cost(0);
4069   format %{ %}
4070   interface(CONST_INTER);
4071 %}
4072 
4073 // Offset for scaled or unscaled immediate loads and stores
4074 operand immIOffset()
4075 %{
4076   predicate(Address::offset_ok_for_immed(n->get_int()));
4077   match(ConI);
4078 
4079   op_cost(0);
4080   format %{ %}
4081   interface(CONST_INTER);
4082 %}
4083 
4084 operand immIOffset4()
4085 %{
4086   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4087   match(ConI);
4088 
4089   op_cost(0);
4090   format %{ %}
4091   interface(CONST_INTER);
4092 %}
4093 
4094 operand immIOffset8()
4095 %{
4096   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4097   match(ConI);
4098 
4099   op_cost(0);
4100   format %{ %}
4101   interface(CONST_INTER);
4102 %}
4103 
4104 operand immIOffset16()
4105 %{
4106   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4107   match(ConI);
4108 
4109   op_cost(0);
4110   format %{ %}
4111   interface(CONST_INTER);
4112 %}
4113 
4114 operand immLoffset()
4115 %{
4116   predicate(Address::offset_ok_for_immed(n->get_long()));
4117   match(ConL);
4118 
4119   op_cost(0);
4120   format %{ %}
4121   interface(CONST_INTER);
4122 %}
4123 
4124 operand immLoffset4()
4125 %{
4126   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4127   match(ConL);
4128 
4129   op_cost(0);
4130   format %{ %}
4131   interface(CONST_INTER);
4132 %}
4133 
4134 operand immLoffset8()
4135 %{
4136   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4137   match(ConL);
4138 
4139   op_cost(0);
4140   format %{ %}
4141   interface(CONST_INTER);
4142 %}
4143 
4144 operand immLoffset16()
4145 %{
4146   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4147   match(ConL);
4148 
4149   op_cost(0);
4150   format %{ %}
4151   interface(CONST_INTER);
4152 %}
4153 
4154 // 32 bit integer valid for add sub immediate
4155 operand immIAddSub()
4156 %{
4157   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4158   match(ConI);
4159   op_cost(0);
4160   format %{ %}
4161   interface(CONST_INTER);
4162 %}
4163 
4164 // 32 bit unsigned integer valid for logical immediate
4165 // TODO -- check this is right when e.g the mask is 0x80000000
4166 operand immILog()
4167 %{
4168   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4169   match(ConI);
4170 
4171   op_cost(0);
4172   format %{ %}
4173   interface(CONST_INTER);
4174 %}
4175 
4176 // Integer operands 64 bit
4177 // 64 bit immediate
4178 operand immL()
4179 %{
4180   match(ConL);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 // 64 bit zero
4188 operand immL0()
4189 %{
4190   predicate(n->get_long() == 0);
4191   match(ConL);
4192 
4193   op_cost(0);
4194   format %{ %}
4195   interface(CONST_INTER);
4196 %}
4197 
4198 // 64 bit unit increment
4199 operand immL_1()
4200 %{
4201   predicate(n->get_long() == 1);
4202   match(ConL);
4203 
4204   op_cost(0);
4205   format %{ %}
4206   interface(CONST_INTER);
4207 %}
4208 
4209 // 64 bit unit decrement
4210 operand immL_M1()
4211 %{
4212   predicate(n->get_long() == -1);
4213   match(ConL);
4214 
4215   op_cost(0);
4216   format %{ %}
4217   interface(CONST_INTER);
4218 %}
4219 
4220 // 32 bit offset of pc in thread anchor
4221 
4222 operand immL_pc_off()
4223 %{
4224   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4225                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4226   match(ConL);
4227 
4228   op_cost(0);
4229   format %{ %}
4230   interface(CONST_INTER);
4231 %}
4232 
4233 // 64 bit integer valid for add sub immediate
4234 operand immLAddSub()
4235 %{
4236   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4237   match(ConL);
4238   op_cost(0);
4239   format %{ %}
4240   interface(CONST_INTER);
4241 %}
4242 
4243 // 64 bit integer valid for logical immediate
4244 operand immLLog()
4245 %{
4246   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4247   match(ConL);
4248   op_cost(0);
4249   format %{ %}
4250   interface(CONST_INTER);
4251 %}
4252 
4253 // Long Immediate: low 32-bit mask
4254 operand immL_32bits()
4255 %{
4256   predicate(n->get_long() == 0xFFFFFFFFL);
4257   match(ConL);
4258   op_cost(0);
4259   format %{ %}
4260   interface(CONST_INTER);
4261 %}
4262 
4263 // Pointer operands
4264 // Pointer Immediate
4265 operand immP()
4266 %{
4267   match(ConP);
4268 
4269   op_cost(0);
4270   format %{ %}
4271   interface(CONST_INTER);
4272 %}
4273 
4274 // NULL Pointer Immediate
4275 operand immP0()
4276 %{
4277   predicate(n->get_ptr() == 0);
4278   match(ConP);
4279 
4280   op_cost(0);
4281   format %{ %}
4282   interface(CONST_INTER);
4283 %}
4284 
4285 // Pointer Immediate One
4286 // this is used in object initialization (initial object header)
4287 operand immP_1()
4288 %{
4289   predicate(n->get_ptr() == 1);
4290   match(ConP);
4291 
4292   op_cost(0);
4293   format %{ %}
4294   interface(CONST_INTER);
4295 %}
4296 
4297 // Polling Page Pointer Immediate
4298 operand immPollPage()
4299 %{
4300   predicate((address)n->get_ptr() == os::get_polling_page());
4301   match(ConP);
4302 
4303   op_cost(0);
4304   format %{ %}
4305   interface(CONST_INTER);
4306 %}
4307 
4308 // Card Table Byte Map Base
4309 operand immByteMapBase()
4310 %{
4311   // Get base of card map
4312   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4313             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4314   match(ConP);
4315 
4316   op_cost(0);
4317   format %{ %}
4318   interface(CONST_INTER);
4319 %}
4320 
4321 // Pointer Immediate Minus One
4322 // this is used when we want to write the current PC to the thread anchor
4323 operand immP_M1()
4324 %{
4325   predicate(n->get_ptr() == -1);
4326   match(ConP);
4327 
4328   op_cost(0);
4329   format %{ %}
4330   interface(CONST_INTER);
4331 %}
4332 
4333 // Pointer Immediate Minus Two
4334 // this is used when we want to write the current PC to the thread anchor
4335 operand immP_M2()
4336 %{
4337   predicate(n->get_ptr() == -2);
4338   match(ConP);
4339 
4340   op_cost(0);
4341   format %{ %}
4342   interface(CONST_INTER);
4343 %}
4344 
4345 // Float and Double operands
4346 // Double Immediate
4347 operand immD()
4348 %{
4349   match(ConD);
4350   op_cost(0);
4351   format %{ %}
4352   interface(CONST_INTER);
4353 %}
4354 
4355 // Double Immediate: +0.0d
4356 operand immD0()
4357 %{
4358   predicate(jlong_cast(n->getd()) == 0);
4359   match(ConD);
4360 
4361   op_cost(0);
4362   format %{ %}
4363   interface(CONST_INTER);
4364 %}
4365 
4366 // constant 'double +0.0'.
4367 operand immDPacked()
4368 %{
4369   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4370   match(ConD);
4371   op_cost(0);
4372   format %{ %}
4373   interface(CONST_INTER);
4374 %}
4375 
4376 // Float Immediate
4377 operand immF()
4378 %{
4379   match(ConF);
4380   op_cost(0);
4381   format %{ %}
4382   interface(CONST_INTER);
4383 %}
4384 
4385 // Float Immediate: +0.0f.
4386 operand immF0()
4387 %{
4388   predicate(jint_cast(n->getf()) == 0);
4389   match(ConF);
4390 
4391   op_cost(0);
4392   format %{ %}
4393   interface(CONST_INTER);
4394 %}
4395 
4396 //
4397 operand immFPacked()
4398 %{
4399   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4400   match(ConF);
4401   op_cost(0);
4402   format %{ %}
4403   interface(CONST_INTER);
4404 %}
4405 
4406 // Narrow pointer operands
4407 // Narrow Pointer Immediate
4408 operand immN()
4409 %{
4410   match(ConN);
4411 
4412   op_cost(0);
4413   format %{ %}
4414   interface(CONST_INTER);
4415 %}
4416 
4417 // Narrow NULL Pointer Immediate
4418 operand immN0()
4419 %{
4420   predicate(n->get_narrowcon() == 0);
4421   match(ConN);
4422 
4423   op_cost(0);
4424   format %{ %}
4425   interface(CONST_INTER);
4426 %}
4427 
4428 operand immNKlass()
4429 %{
4430   match(ConNKlass);
4431 
4432   op_cost(0);
4433   format %{ %}
4434   interface(CONST_INTER);
4435 %}
4436 
4437 // Integer 32 bit Register Operands
4438 // Integer 32 bitRegister (excludes SP)
4439 operand iRegI()
4440 %{
4441   constraint(ALLOC_IN_RC(any_reg32));
4442   match(RegI);
4443   match(iRegINoSp);
4444   op_cost(0);
4445   format %{ %}
4446   interface(REG_INTER);
4447 %}
4448 
4449 // Integer 32 bit Register not Special
4450 operand iRegINoSp()
4451 %{
4452   constraint(ALLOC_IN_RC(no_special_reg32));
4453   match(RegI);
4454   op_cost(0);
4455   format %{ %}
4456   interface(REG_INTER);
4457 %}
4458 
4459 // Integer 64 bit Register Operands
4460 // Integer 64 bit Register (includes SP)
4461 operand iRegL()
4462 %{
4463   constraint(ALLOC_IN_RC(any_reg));
4464   match(RegL);
4465   match(iRegLNoSp);
4466   op_cost(0);
4467   format %{ %}
4468   interface(REG_INTER);
4469 %}
4470 
4471 // Integer 64 bit Register not Special
4472 operand iRegLNoSp()
4473 %{
4474   constraint(ALLOC_IN_RC(no_special_reg));
4475   match(RegL);
4476   match(iRegL_R0);
4477   format %{ %}
4478   interface(REG_INTER);
4479 %}
4480 
4481 // Pointer Register Operands
4482 // Pointer Register
4483 operand iRegP()
4484 %{
4485   constraint(ALLOC_IN_RC(ptr_reg));
4486   match(RegP);
4487   match(iRegPNoSp);
4488   match(iRegP_R0);
4489   //match(iRegP_R2);
4490   //match(iRegP_R4);
4491   //match(iRegP_R5);
4492   match(thread_RegP);
4493   op_cost(0);
4494   format %{ %}
4495   interface(REG_INTER);
4496 %}
4497 
4498 // Pointer 64 bit Register not Special
4499 operand iRegPNoSp()
4500 %{
4501   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4502   match(RegP);
4503   // match(iRegP);
4504   // match(iRegP_R0);
4505   // match(iRegP_R2);
4506   // match(iRegP_R4);
4507   // match(iRegP_R5);
4508   // match(thread_RegP);
4509   op_cost(0);
4510   format %{ %}
4511   interface(REG_INTER);
4512 %}
4513 
4514 // Pointer 64 bit Register R0 only
4515 operand iRegP_R0()
4516 %{
4517   constraint(ALLOC_IN_RC(r0_reg));
4518   match(RegP);
4519   // match(iRegP);
4520   match(iRegPNoSp);
4521   op_cost(0);
4522   format %{ %}
4523   interface(REG_INTER);
4524 %}
4525 
4526 // Pointer 64 bit Register R1 only
4527 operand iRegP_R1()
4528 %{
4529   constraint(ALLOC_IN_RC(r1_reg));
4530   match(RegP);
4531   // match(iRegP);
4532   match(iRegPNoSp);
4533   op_cost(0);
4534   format %{ %}
4535   interface(REG_INTER);
4536 %}
4537 
4538 // Pointer 64 bit Register R2 only
4539 operand iRegP_R2()
4540 %{
4541   constraint(ALLOC_IN_RC(r2_reg));
4542   match(RegP);
4543   // match(iRegP);
4544   match(iRegPNoSp);
4545   op_cost(0);
4546   format %{ %}
4547   interface(REG_INTER);
4548 %}
4549 
4550 // Pointer 64 bit Register R3 only
4551 operand iRegP_R3()
4552 %{
4553   constraint(ALLOC_IN_RC(r3_reg));
4554   match(RegP);
4555   // match(iRegP);
4556   match(iRegPNoSp);
4557   op_cost(0);
4558   format %{ %}
4559   interface(REG_INTER);
4560 %}
4561 
4562 // Pointer 64 bit Register R4 only
4563 operand iRegP_R4()
4564 %{
4565   constraint(ALLOC_IN_RC(r4_reg));
4566   match(RegP);
4567   // match(iRegP);
4568   match(iRegPNoSp);
4569   op_cost(0);
4570   format %{ %}
4571   interface(REG_INTER);
4572 %}
4573 
4574 // Pointer 64 bit Register R5 only
4575 operand iRegP_R5()
4576 %{
4577   constraint(ALLOC_IN_RC(r5_reg));
4578   match(RegP);
4579   // match(iRegP);
4580   match(iRegPNoSp);
4581   op_cost(0);
4582   format %{ %}
4583   interface(REG_INTER);
4584 %}
4585 
4586 // Pointer 64 bit Register R10 only
4587 operand iRegP_R10()
4588 %{
4589   constraint(ALLOC_IN_RC(r10_reg));
4590   match(RegP);
4591   // match(iRegP);
4592   match(iRegPNoSp);
4593   op_cost(0);
4594   format %{ %}
4595   interface(REG_INTER);
4596 %}
4597 
4598 // Long 64 bit Register R0 only
4599 operand iRegL_R0()
4600 %{
4601   constraint(ALLOC_IN_RC(r0_reg));
4602   match(RegL);
4603   match(iRegLNoSp);
4604   op_cost(0);
4605   format %{ %}
4606   interface(REG_INTER);
4607 %}
4608 
4609 // Long 64 bit Register R2 only
4610 operand iRegL_R2()
4611 %{
4612   constraint(ALLOC_IN_RC(r2_reg));
4613   match(RegL);
4614   match(iRegLNoSp);
4615   op_cost(0);
4616   format %{ %}
4617   interface(REG_INTER);
4618 %}
4619 
4620 // Long 64 bit Register R3 only
4621 operand iRegL_R3()
4622 %{
4623   constraint(ALLOC_IN_RC(r3_reg));
4624   match(RegL);
4625   match(iRegLNoSp);
4626   op_cost(0);
4627   format %{ %}
4628   interface(REG_INTER);
4629 %}
4630 
4631 // Long 64 bit Register R11 only
4632 operand iRegL_R11()
4633 %{
4634   constraint(ALLOC_IN_RC(r11_reg));
4635   match(RegL);
4636   match(iRegLNoSp);
4637   op_cost(0);
4638   format %{ %}
4639   interface(REG_INTER);
4640 %}
4641 
4642 // Pointer 64 bit Register FP only
4643 operand iRegP_FP()
4644 %{
4645   constraint(ALLOC_IN_RC(fp_reg));
4646   match(RegP);
4647   // match(iRegP);
4648   op_cost(0);
4649   format %{ %}
4650   interface(REG_INTER);
4651 %}
4652 
4653 // Register R0 only
4654 operand iRegI_R0()
4655 %{
4656   constraint(ALLOC_IN_RC(int_r0_reg));
4657   match(RegI);
4658   match(iRegINoSp);
4659   op_cost(0);
4660   format %{ %}
4661   interface(REG_INTER);
4662 %}
4663 
4664 // Register R2 only
4665 operand iRegI_R2()
4666 %{
4667   constraint(ALLOC_IN_RC(int_r2_reg));
4668   match(RegI);
4669   match(iRegINoSp);
4670   op_cost(0);
4671   format %{ %}
4672   interface(REG_INTER);
4673 %}
4674 
4675 // Register R3 only
4676 operand iRegI_R3()
4677 %{
4678   constraint(ALLOC_IN_RC(int_r3_reg));
4679   match(RegI);
4680   match(iRegINoSp);
4681   op_cost(0);
4682   format %{ %}
4683   interface(REG_INTER);
4684 %}
4685 
4686 
4687 // Register R4 only
4688 operand iRegI_R4()
4689 %{
4690   constraint(ALLOC_IN_RC(int_r4_reg));
4691   match(RegI);
4692   match(iRegINoSp);
4693   op_cost(0);
4694   format %{ %}
4695   interface(REG_INTER);
4696 %}
4697 
4698 
4699 // Pointer Register Operands
4700 // Narrow Pointer Register
4701 operand iRegN()
4702 %{
4703   constraint(ALLOC_IN_RC(any_reg32));
4704   match(RegN);
4705   match(iRegNNoSp);
4706   op_cost(0);
4707   format %{ %}
4708   interface(REG_INTER);
4709 %}
4710 
4711 operand iRegN_R0()
4712 %{
4713   constraint(ALLOC_IN_RC(r0_reg));
4714   match(iRegN);
4715   op_cost(0);
4716   format %{ %}
4717   interface(REG_INTER);
4718 %}
4719 
4720 operand iRegN_R2()
4721 %{
4722   constraint(ALLOC_IN_RC(r2_reg));
4723   match(iRegN);
4724   op_cost(0);
4725   format %{ %}
4726   interface(REG_INTER);
4727 %}
4728 
4729 operand iRegN_R3()
4730 %{
4731   constraint(ALLOC_IN_RC(r3_reg));
4732   match(iRegN);
4733   op_cost(0);
4734   format %{ %}
4735   interface(REG_INTER);
4736 %}
4737 
4738 // Integer 64 bit Register not Special
4739 operand iRegNNoSp()
4740 %{
4741   constraint(ALLOC_IN_RC(no_special_reg32));
4742   match(RegN);
4743   op_cost(0);
4744   format %{ %}
4745   interface(REG_INTER);
4746 %}
4747 
4748 // heap base register -- used for encoding immN0
4749 
4750 operand iRegIHeapbase()
4751 %{
4752   constraint(ALLOC_IN_RC(heapbase_reg));
4753   match(RegI);
4754   op_cost(0);
4755   format %{ %}
4756   interface(REG_INTER);
4757 %}
4758 
4759 // Float Register
4760 // Float register operands
4761 operand vRegF()
4762 %{
4763   constraint(ALLOC_IN_RC(float_reg));
4764   match(RegF);
4765 
4766   op_cost(0);
4767   format %{ %}
4768   interface(REG_INTER);
4769 %}
4770 
4771 // Double Register
4772 // Double register operands
4773 operand vRegD()
4774 %{
4775   constraint(ALLOC_IN_RC(double_reg));
4776   match(RegD);
4777 
4778   op_cost(0);
4779   format %{ %}
4780   interface(REG_INTER);
4781 %}
4782 
4783 operand vecD()
4784 %{
4785   constraint(ALLOC_IN_RC(vectord_reg));
4786   match(VecD);
4787 
4788   op_cost(0);
4789   format %{ %}
4790   interface(REG_INTER);
4791 %}
4792 
4793 operand vecX()
4794 %{
4795   constraint(ALLOC_IN_RC(vectorx_reg));
4796   match(VecX);
4797 
4798   op_cost(0);
4799   format %{ %}
4800   interface(REG_INTER);
4801 %}
4802 
4803 operand vRegD_V0()
4804 %{
4805   constraint(ALLOC_IN_RC(v0_reg));
4806   match(RegD);
4807   op_cost(0);
4808   format %{ %}
4809   interface(REG_INTER);
4810 %}
4811 
4812 operand vRegD_V1()
4813 %{
4814   constraint(ALLOC_IN_RC(v1_reg));
4815   match(RegD);
4816   op_cost(0);
4817   format %{ %}
4818   interface(REG_INTER);
4819 %}
4820 
4821 operand vRegD_V2()
4822 %{
4823   constraint(ALLOC_IN_RC(v2_reg));
4824   match(RegD);
4825   op_cost(0);
4826   format %{ %}
4827   interface(REG_INTER);
4828 %}
4829 
4830 operand vRegD_V3()
4831 %{
4832   constraint(ALLOC_IN_RC(v3_reg));
4833   match(RegD);
4834   op_cost(0);
4835   format %{ %}
4836   interface(REG_INTER);
4837 %}
4838 
4839 // Flags register, used as output of signed compare instructions
4840 
4841 // note that on AArch64 we also use this register as the output for
4842 // for floating point compare instructions (CmpF CmpD). this ensures
4843 // that ordered inequality tests use GT, GE, LT or LE none of which
4844 // pass through cases where the result is unordered i.e. one or both
4845 // inputs to the compare is a NaN. this means that the ideal code can
4846 // replace e.g. a GT with an LE and not end up capturing the NaN case
4847 // (where the comparison should always fail). EQ and NE tests are
4848 // always generated in ideal code so that unordered folds into the NE
4849 // case, matching the behaviour of AArch64 NE.
4850 //
4851 // This differs from x86 where the outputs of FP compares use a
4852 // special FP flags registers and where compares based on this
4853 // register are distinguished into ordered inequalities (cmpOpUCF) and
4854 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4855 // to explicitly handle the unordered case in branches. x86 also has
4856 // to include extra CMoveX rules to accept a cmpOpUCF input.
4857 
4858 operand rFlagsReg()
4859 %{
4860   constraint(ALLOC_IN_RC(int_flags));
4861   match(RegFlags);
4862 
4863   op_cost(0);
4864   format %{ "RFLAGS" %}
4865   interface(REG_INTER);
4866 %}
4867 
4868 // Flags register, used as output of unsigned compare instructions
4869 operand rFlagsRegU()
4870 %{
4871   constraint(ALLOC_IN_RC(int_flags));
4872   match(RegFlags);
4873 
4874   op_cost(0);
4875   format %{ "RFLAGSU" %}
4876   interface(REG_INTER);
4877 %}
4878 
4879 // Special Registers
4880 
4881 // Method Register
4882 operand inline_cache_RegP(iRegP reg)
4883 %{
4884   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4885   match(reg);
4886   match(iRegPNoSp);
4887   op_cost(0);
4888   format %{ %}
4889   interface(REG_INTER);
4890 %}
4891 
4892 operand interpreter_method_oop_RegP(iRegP reg)
4893 %{
4894   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4895   match(reg);
4896   match(iRegPNoSp);
4897   op_cost(0);
4898   format %{ %}
4899   interface(REG_INTER);
4900 %}
4901 
4902 // Thread Register
4903 operand thread_RegP(iRegP reg)
4904 %{
4905   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4906   match(reg);
4907   op_cost(0);
4908   format %{ %}
4909   interface(REG_INTER);
4910 %}
4911 
4912 operand lr_RegP(iRegP reg)
4913 %{
4914   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4915   match(reg);
4916   op_cost(0);
4917   format %{ %}
4918   interface(REG_INTER);
4919 %}
4920 
4921 //----------Memory Operands----------------------------------------------------
4922 
4923 operand indirect(iRegP reg)
4924 %{
4925   constraint(ALLOC_IN_RC(ptr_reg));
4926   match(reg);
4927   op_cost(0);
4928   format %{ "[$reg]" %}
4929   interface(MEMORY_INTER) %{
4930     base($reg);
4931     index(0xffffffff);
4932     scale(0x0);
4933     disp(0x0);
4934   %}
4935 %}
4936 
4937 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4938 %{
4939   constraint(ALLOC_IN_RC(ptr_reg));
4940   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4941   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4942   op_cost(0);
4943   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4944   interface(MEMORY_INTER) %{
4945     base($reg);
4946     index($ireg);
4947     scale($scale);
4948     disp(0x0);
4949   %}
4950 %}
4951 
4952 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4953 %{
4954   constraint(ALLOC_IN_RC(ptr_reg));
4955   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4956   match(AddP reg (LShiftL lreg scale));
4957   op_cost(0);
4958   format %{ "$reg, $lreg lsl($scale)" %}
4959   interface(MEMORY_INTER) %{
4960     base($reg);
4961     index($lreg);
4962     scale($scale);
4963     disp(0x0);
4964   %}
4965 %}
4966 
4967 operand indIndexI2L(iRegP reg, iRegI ireg)
4968 %{
4969   constraint(ALLOC_IN_RC(ptr_reg));
4970   match(AddP reg (ConvI2L ireg));
4971   op_cost(0);
4972   format %{ "$reg, $ireg, 0, I2L" %}
4973   interface(MEMORY_INTER) %{
4974     base($reg);
4975     index($ireg);
4976     scale(0x0);
4977     disp(0x0);
4978   %}
4979 %}
4980 
4981 operand indIndex(iRegP reg, iRegL lreg)
4982 %{
4983   constraint(ALLOC_IN_RC(ptr_reg));
4984   match(AddP reg lreg);
4985   op_cost(0);
4986   format %{ "$reg, $lreg" %}
4987   interface(MEMORY_INTER) %{
4988     base($reg);
4989     index($lreg);
4990     scale(0x0);
4991     disp(0x0);
4992   %}
4993 %}
4994 
4995 operand indOffI(iRegP reg, immIOffset off)
4996 %{
4997   constraint(ALLOC_IN_RC(ptr_reg));
4998   match(AddP reg off);
4999   op_cost(0);
5000   format %{ "[$reg, $off]" %}
5001   interface(MEMORY_INTER) %{
5002     base($reg);
5003     index(0xffffffff);
5004     scale(0x0);
5005     disp($off);
5006   %}
5007 %}
5008 
5009 operand indOffI4(iRegP reg, immIOffset4 off)
5010 %{
5011   constraint(ALLOC_IN_RC(ptr_reg));
5012   match(AddP reg off);
5013   op_cost(0);
5014   format %{ "[$reg, $off]" %}
5015   interface(MEMORY_INTER) %{
5016     base($reg);
5017     index(0xffffffff);
5018     scale(0x0);
5019     disp($off);
5020   %}
5021 %}
5022 
5023 operand indOffI8(iRegP reg, immIOffset8 off)
5024 %{
5025   constraint(ALLOC_IN_RC(ptr_reg));
5026   match(AddP reg off);
5027   op_cost(0);
5028   format %{ "[$reg, $off]" %}
5029   interface(MEMORY_INTER) %{
5030     base($reg);
5031     index(0xffffffff);
5032     scale(0x0);
5033     disp($off);
5034   %}
5035 %}
5036 
5037 operand indOffI16(iRegP reg, immIOffset16 off)
5038 %{
5039   constraint(ALLOC_IN_RC(ptr_reg));
5040   match(AddP reg off);
5041   op_cost(0);
5042   format %{ "[$reg, $off]" %}
5043   interface(MEMORY_INTER) %{
5044     base($reg);
5045     index(0xffffffff);
5046     scale(0x0);
5047     disp($off);
5048   %}
5049 %}
5050 
5051 operand indOffL(iRegP reg, immLoffset off)
5052 %{
5053   constraint(ALLOC_IN_RC(ptr_reg));
5054   match(AddP reg off);
5055   op_cost(0);
5056   format %{ "[$reg, $off]" %}
5057   interface(MEMORY_INTER) %{
5058     base($reg);
5059     index(0xffffffff);
5060     scale(0x0);
5061     disp($off);
5062   %}
5063 %}
5064 
5065 operand indOffL4(iRegP reg, immLoffset4 off)
5066 %{
5067   constraint(ALLOC_IN_RC(ptr_reg));
5068   match(AddP reg off);
5069   op_cost(0);
5070   format %{ "[$reg, $off]" %}
5071   interface(MEMORY_INTER) %{
5072     base($reg);
5073     index(0xffffffff);
5074     scale(0x0);
5075     disp($off);
5076   %}
5077 %}
5078 
5079 operand indOffL8(iRegP reg, immLoffset8 off)
5080 %{
5081   constraint(ALLOC_IN_RC(ptr_reg));
5082   match(AddP reg off);
5083   op_cost(0);
5084   format %{ "[$reg, $off]" %}
5085   interface(MEMORY_INTER) %{
5086     base($reg);
5087     index(0xffffffff);
5088     scale(0x0);
5089     disp($off);
5090   %}
5091 %}
5092 
5093 operand indOffL16(iRegP reg, immLoffset16 off)
5094 %{
5095   constraint(ALLOC_IN_RC(ptr_reg));
5096   match(AddP reg off);
5097   op_cost(0);
5098   format %{ "[$reg, $off]" %}
5099   interface(MEMORY_INTER) %{
5100     base($reg);
5101     index(0xffffffff);
5102     scale(0x0);
5103     disp($off);
5104   %}
5105 %}
5106 
5107 operand indirectN(iRegN reg)
5108 %{
5109   predicate(Universe::narrow_oop_shift() == 0);
5110   constraint(ALLOC_IN_RC(ptr_reg));
5111   match(DecodeN reg);
5112   op_cost(0);
5113   format %{ "[$reg]\t# narrow" %}
5114   interface(MEMORY_INTER) %{
5115     base($reg);
5116     index(0xffffffff);
5117     scale(0x0);
5118     disp(0x0);
5119   %}
5120 %}
5121 
5122 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5123 %{
5124   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5125   constraint(ALLOC_IN_RC(ptr_reg));
5126   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5127   op_cost(0);
5128   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5129   interface(MEMORY_INTER) %{
5130     base($reg);
5131     index($ireg);
5132     scale($scale);
5133     disp(0x0);
5134   %}
5135 %}
5136 
5137 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5138 %{
5139   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5140   constraint(ALLOC_IN_RC(ptr_reg));
5141   match(AddP (DecodeN reg) (LShiftL lreg scale));
5142   op_cost(0);
5143   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5144   interface(MEMORY_INTER) %{
5145     base($reg);
5146     index($lreg);
5147     scale($scale);
5148     disp(0x0);
5149   %}
5150 %}
5151 
5152 operand indIndexI2LN(iRegN reg, iRegI ireg)
5153 %{
5154   predicate(Universe::narrow_oop_shift() == 0);
5155   constraint(ALLOC_IN_RC(ptr_reg));
5156   match(AddP (DecodeN reg) (ConvI2L ireg));
5157   op_cost(0);
5158   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5159   interface(MEMORY_INTER) %{
5160     base($reg);
5161     index($ireg);
5162     scale(0x0);
5163     disp(0x0);
5164   %}
5165 %}
5166 
5167 operand indIndexN(iRegN reg, iRegL lreg)
5168 %{
5169   predicate(Universe::narrow_oop_shift() == 0);
5170   constraint(ALLOC_IN_RC(ptr_reg));
5171   match(AddP (DecodeN reg) lreg);
5172   op_cost(0);
5173   format %{ "$reg, $lreg\t# narrow" %}
5174   interface(MEMORY_INTER) %{
5175     base($reg);
5176     index($lreg);
5177     scale(0x0);
5178     disp(0x0);
5179   %}
5180 %}
5181 
5182 operand indOffIN(iRegN reg, immIOffset off)
5183 %{
5184   predicate(Universe::narrow_oop_shift() == 0);
5185   constraint(ALLOC_IN_RC(ptr_reg));
5186   match(AddP (DecodeN reg) off);
5187   op_cost(0);
5188   format %{ "[$reg, $off]\t# narrow" %}
5189   interface(MEMORY_INTER) %{
5190     base($reg);
5191     index(0xffffffff);
5192     scale(0x0);
5193     disp($off);
5194   %}
5195 %}
5196 
5197 operand indOffLN(iRegN reg, immLoffset off)
5198 %{
5199   predicate(Universe::narrow_oop_shift() == 0);
5200   constraint(ALLOC_IN_RC(ptr_reg));
5201   match(AddP (DecodeN reg) off);
5202   op_cost(0);
5203   format %{ "[$reg, $off]\t# narrow" %}
5204   interface(MEMORY_INTER) %{
5205     base($reg);
5206     index(0xffffffff);
5207     scale(0x0);
5208     disp($off);
5209   %}
5210 %}
5211 
5212 
5213 
5214 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5215 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5216 %{
5217   constraint(ALLOC_IN_RC(ptr_reg));
5218   match(AddP reg off);
5219   op_cost(0);
5220   format %{ "[$reg, $off]" %}
5221   interface(MEMORY_INTER) %{
5222     base($reg);
5223     index(0xffffffff);
5224     scale(0x0);
5225     disp($off);
5226   %}
5227 %}
5228 
5229 //----------Special Memory Operands--------------------------------------------
5230 // Stack Slot Operand - This operand is used for loading and storing temporary
5231 //                      values on the stack where a match requires a value to
5232 //                      flow through memory.
5233 operand stackSlotP(sRegP reg)
5234 %{
5235   constraint(ALLOC_IN_RC(stack_slots));
5236   op_cost(100);
5237   // No match rule because this operand is only generated in matching
5238   // match(RegP);
5239   format %{ "[$reg]" %}
5240   interface(MEMORY_INTER) %{
5241     base(0x1e);  // RSP
5242     index(0x0);  // No Index
5243     scale(0x0);  // No Scale
5244     disp($reg);  // Stack Offset
5245   %}
5246 %}
5247 
5248 operand stackSlotI(sRegI reg)
5249 %{
5250   constraint(ALLOC_IN_RC(stack_slots));
5251   // No match rule because this operand is only generated in matching
5252   // match(RegI);
5253   format %{ "[$reg]" %}
5254   interface(MEMORY_INTER) %{
5255     base(0x1e);  // RSP
5256     index(0x0);  // No Index
5257     scale(0x0);  // No Scale
5258     disp($reg);  // Stack Offset
5259   %}
5260 %}
5261 
5262 operand stackSlotF(sRegF reg)
5263 %{
5264   constraint(ALLOC_IN_RC(stack_slots));
5265   // No match rule because this operand is only generated in matching
5266   // match(RegF);
5267   format %{ "[$reg]" %}
5268   interface(MEMORY_INTER) %{
5269     base(0x1e);  // RSP
5270     index(0x0);  // No Index
5271     scale(0x0);  // No Scale
5272     disp($reg);  // Stack Offset
5273   %}
5274 %}
5275 
5276 operand stackSlotD(sRegD reg)
5277 %{
5278   constraint(ALLOC_IN_RC(stack_slots));
5279   // No match rule because this operand is only generated in matching
5280   // match(RegD);
5281   format %{ "[$reg]" %}
5282   interface(MEMORY_INTER) %{
5283     base(0x1e);  // RSP
5284     index(0x0);  // No Index
5285     scale(0x0);  // No Scale
5286     disp($reg);  // Stack Offset
5287   %}
5288 %}
5289 
5290 operand stackSlotL(sRegL reg)
5291 %{
5292   constraint(ALLOC_IN_RC(stack_slots));
5293   // No match rule because this operand is only generated in matching
5294   // match(RegL);
5295   format %{ "[$reg]" %}
5296   interface(MEMORY_INTER) %{
5297     base(0x1e);  // RSP
5298     index(0x0);  // No Index
5299     scale(0x0);  // No Scale
5300     disp($reg);  // Stack Offset
5301   %}
5302 %}
5303 
5304 // Operands for expressing Control Flow
5305 // NOTE: Label is a predefined operand which should not be redefined in
5306 //       the AD file. It is generically handled within the ADLC.
5307 
5308 //----------Conditional Branch Operands----------------------------------------
5309 // Comparison Op  - This is the operation of the comparison, and is limited to
5310 //                  the following set of codes:
5311 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5312 //
5313 // Other attributes of the comparison, such as unsignedness, are specified
5314 // by the comparison instruction that sets a condition code flags register.
5315 // That result is represented by a flags operand whose subtype is appropriate
5316 // to the unsignedness (etc.) of the comparison.
5317 //
5318 // Later, the instruction which matches both the Comparison Op (a Bool) and
5319 // the flags (produced by the Cmp) specifies the coding of the comparison op
5320 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5321 
5322 // used for signed integral comparisons and fp comparisons
5323 
5324 operand cmpOp()
5325 %{
5326   match(Bool);
5327 
5328   format %{ "" %}
5329   interface(COND_INTER) %{
5330     equal(0x0, "eq");
5331     not_equal(0x1, "ne");
5332     less(0xb, "lt");
5333     greater_equal(0xa, "ge");
5334     less_equal(0xd, "le");
5335     greater(0xc, "gt");
5336     overflow(0x6, "vs");
5337     no_overflow(0x7, "vc");
5338   %}
5339 %}
5340 
5341 // used for unsigned integral comparisons
5342 
5343 operand cmpOpU()
5344 %{
5345   match(Bool);
5346 
5347   format %{ "" %}
5348   interface(COND_INTER) %{
5349     equal(0x0, "eq");
5350     not_equal(0x1, "ne");
5351     less(0x3, "lo");
5352     greater_equal(0x2, "hs");
5353     less_equal(0x9, "ls");
5354     greater(0x8, "hi");
5355     overflow(0x6, "vs");
5356     no_overflow(0x7, "vc");
5357   %}
5358 %}
5359 
5360 // used for certain integral comparisons which can be
5361 // converted to cbxx or tbxx instructions
5362 
5363 operand cmpOpEqNe()
5364 %{
5365   match(Bool);
5366   match(CmpOp);
5367   op_cost(0);
5368   predicate(n->as_Bool()->_test._test == BoolTest::ne
5369             || n->as_Bool()->_test._test == BoolTest::eq);
5370 
5371   format %{ "" %}
5372   interface(COND_INTER) %{
5373     equal(0x0, "eq");
5374     not_equal(0x1, "ne");
5375     less(0xb, "lt");
5376     greater_equal(0xa, "ge");
5377     less_equal(0xd, "le");
5378     greater(0xc, "gt");
5379     overflow(0x6, "vs");
5380     no_overflow(0x7, "vc");
5381   %}
5382 %}
5383 
5384 // used for certain integral comparisons which can be
5385 // converted to cbxx or tbxx instructions
5386 
5387 operand cmpOpLtGe()
5388 %{
5389   match(Bool);
5390   match(CmpOp);
5391   op_cost(0);
5392 
5393   predicate(n->as_Bool()->_test._test == BoolTest::lt
5394             || n->as_Bool()->_test._test == BoolTest::ge);
5395 
5396   format %{ "" %}
5397   interface(COND_INTER) %{
5398     equal(0x0, "eq");
5399     not_equal(0x1, "ne");
5400     less(0xb, "lt");
5401     greater_equal(0xa, "ge");
5402     less_equal(0xd, "le");
5403     greater(0xc, "gt");
5404     overflow(0x6, "vs");
5405     no_overflow(0x7, "vc");
5406   %}
5407 %}
5408 
5409 // used for certain unsigned integral comparisons which can be
5410 // converted to cbxx or tbxx instructions
5411 
5412 operand cmpOpUEqNeLtGe()
5413 %{
5414   match(Bool);
5415   match(CmpOp);
5416   op_cost(0);
5417 
5418   predicate(n->as_Bool()->_test._test == BoolTest::eq
5419             || n->as_Bool()->_test._test == BoolTest::ne
5420             || n->as_Bool()->_test._test == BoolTest::lt
5421             || n->as_Bool()->_test._test == BoolTest::ge);
5422 
5423   format %{ "" %}
5424   interface(COND_INTER) %{
5425     equal(0x0, "eq");
5426     not_equal(0x1, "ne");
5427     less(0xb, "lt");
5428     greater_equal(0xa, "ge");
5429     less_equal(0xd, "le");
5430     greater(0xc, "gt");
5431     overflow(0x6, "vs");
5432     no_overflow(0x7, "vc");
5433   %}
5434 %}
5435 
5436 // Special operand allowing long args to int ops to be truncated for free
5437 
5438 operand iRegL2I(iRegL reg) %{
5439 
5440   op_cost(0);
5441 
5442   match(ConvL2I reg);
5443 
5444   format %{ "l2i($reg)" %}
5445 
5446   interface(REG_INTER)
5447 %}
5448 
5449 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5450 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5451 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5452 
5453 //----------OPERAND CLASSES----------------------------------------------------
5454 // Operand Classes are groups of operands that are used as to simplify
5455 // instruction definitions by not requiring the AD writer to specify
5456 // separate instructions for every form of operand when the
5457 // instruction accepts multiple operand types with the same basic
5458 // encoding and format. The classic case of this is memory operands.
5459 
5460 // memory is used to define read/write location for load/store
5461 // instruction defs. we can turn a memory op into an Address
5462 
5463 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5464                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5465 
5466 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5467 // operations. it allows the src to be either an iRegI or a (ConvL2I
5468 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5469 // can be elided because the 32-bit instruction will just employ the
5470 // lower 32 bits anyway.
5471 //
5472 // n.b. this does not elide all L2I conversions. if the truncated
5473 // value is consumed by more than one operation then the ConvL2I
5474 // cannot be bundled into the consuming nodes so an l2i gets planted
5475 // (actually a movw $dst $src) and the downstream instructions consume
5476 // the result of the l2i as an iRegI input. That's a shame since the
5477 // movw is actually redundant but its not too costly.
5478 
5479 opclass iRegIorL2I(iRegI, iRegL2I);
5480 
5481 //----------PIPELINE-----------------------------------------------------------
5482 // Rules which define the behavior of the target architectures pipeline.
5483 
5484 // For specific pipelines, eg A53, define the stages of that pipeline
5485 //pipe_desc(ISS, EX1, EX2, WR);
5486 #define ISS S0
5487 #define EX1 S1
5488 #define EX2 S2
5489 #define WR  S3
5490 
5491 // Integer ALU reg operation
5492 pipeline %{
5493 
5494 attributes %{
5495   // ARM instructions are of fixed length
5496   fixed_size_instructions;        // Fixed size instructions TODO does
5497   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5498   // ARM instructions come in 32-bit word units
5499   instruction_unit_size = 4;         // An instruction is 4 bytes long
5500   instruction_fetch_unit_size = 64;  // The processor fetches one line
5501   instruction_fetch_units = 1;       // of 64 bytes
5502 
5503   // List of nop instructions
5504   nops( MachNop );
5505 %}
5506 
5507 // We don't use an actual pipeline model so don't care about resources
5508 // or description. we do use pipeline classes to introduce fixed
5509 // latencies
5510 
5511 //----------RESOURCES----------------------------------------------------------
5512 // Resources are the functional units available to the machine
5513 
5514 resources( INS0, INS1, INS01 = INS0 | INS1,
5515            ALU0, ALU1, ALU = ALU0 | ALU1,
5516            MAC,
5517            DIV,
5518            BRANCH,
5519            LDST,
5520            NEON_FP);
5521 
5522 //----------PIPELINE DESCRIPTION-----------------------------------------------
5523 // Pipeline Description specifies the stages in the machine's pipeline
5524 
5525 // Define the pipeline as a generic 6 stage pipeline
5526 pipe_desc(S0, S1, S2, S3, S4, S5);
5527 
5528 //----------PIPELINE CLASSES---------------------------------------------------
5529 // Pipeline Classes describe the stages in which input and output are
5530 // referenced by the hardware pipeline.
5531 
5532 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5533 %{
5534   single_instruction;
5535   src1   : S1(read);
5536   src2   : S2(read);
5537   dst    : S5(write);
5538   INS01  : ISS;
5539   NEON_FP : S5;
5540 %}
5541 
5542 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5543 %{
5544   single_instruction;
5545   src1   : S1(read);
5546   src2   : S2(read);
5547   dst    : S5(write);
5548   INS01  : ISS;
5549   NEON_FP : S5;
5550 %}
5551 
5552 pipe_class fp_uop_s(vRegF dst, vRegF src)
5553 %{
5554   single_instruction;
5555   src    : S1(read);
5556   dst    : S5(write);
5557   INS01  : ISS;
5558   NEON_FP : S5;
5559 %}
5560 
5561 pipe_class fp_uop_d(vRegD dst, vRegD src)
5562 %{
5563   single_instruction;
5564   src    : S1(read);
5565   dst    : S5(write);
5566   INS01  : ISS;
5567   NEON_FP : S5;
5568 %}
5569 
5570 pipe_class fp_d2f(vRegF dst, vRegD src)
5571 %{
5572   single_instruction;
5573   src    : S1(read);
5574   dst    : S5(write);
5575   INS01  : ISS;
5576   NEON_FP : S5;
5577 %}
5578 
5579 pipe_class fp_f2d(vRegD dst, vRegF src)
5580 %{
5581   single_instruction;
5582   src    : S1(read);
5583   dst    : S5(write);
5584   INS01  : ISS;
5585   NEON_FP : S5;
5586 %}
5587 
5588 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5589 %{
5590   single_instruction;
5591   src    : S1(read);
5592   dst    : S5(write);
5593   INS01  : ISS;
5594   NEON_FP : S5;
5595 %}
5596 
5597 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5598 %{
5599   single_instruction;
5600   src    : S1(read);
5601   dst    : S5(write);
5602   INS01  : ISS;
5603   NEON_FP : S5;
5604 %}
5605 
5606 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5607 %{
5608   single_instruction;
5609   src    : S1(read);
5610   dst    : S5(write);
5611   INS01  : ISS;
5612   NEON_FP : S5;
5613 %}
5614 
5615 pipe_class fp_l2f(vRegF dst, iRegL src)
5616 %{
5617   single_instruction;
5618   src    : S1(read);
5619   dst    : S5(write);
5620   INS01  : ISS;
5621   NEON_FP : S5;
5622 %}
5623 
5624 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5625 %{
5626   single_instruction;
5627   src    : S1(read);
5628   dst    : S5(write);
5629   INS01  : ISS;
5630   NEON_FP : S5;
5631 %}
5632 
5633 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5634 %{
5635   single_instruction;
5636   src    : S1(read);
5637   dst    : S5(write);
5638   INS01  : ISS;
5639   NEON_FP : S5;
5640 %}
5641 
5642 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5643 %{
5644   single_instruction;
5645   src    : S1(read);
5646   dst    : S5(write);
5647   INS01  : ISS;
5648   NEON_FP : S5;
5649 %}
5650 
5651 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5652 %{
5653   single_instruction;
5654   src    : S1(read);
5655   dst    : S5(write);
5656   INS01  : ISS;
5657   NEON_FP : S5;
5658 %}
5659 
5660 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5661 %{
5662   single_instruction;
5663   src1   : S1(read);
5664   src2   : S2(read);
5665   dst    : S5(write);
5666   INS0   : ISS;
5667   NEON_FP : S5;
5668 %}
5669 
5670 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5671 %{
5672   single_instruction;
5673   src1   : S1(read);
5674   src2   : S2(read);
5675   dst    : S5(write);
5676   INS0   : ISS;
5677   NEON_FP : S5;
5678 %}
5679 
5680 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5681 %{
5682   single_instruction;
5683   cr     : S1(read);
5684   src1   : S1(read);
5685   src2   : S1(read);
5686   dst    : S3(write);
5687   INS01  : ISS;
5688   NEON_FP : S3;
5689 %}
5690 
5691 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5692 %{
5693   single_instruction;
5694   cr     : S1(read);
5695   src1   : S1(read);
5696   src2   : S1(read);
5697   dst    : S3(write);
5698   INS01  : ISS;
5699   NEON_FP : S3;
5700 %}
5701 
5702 pipe_class fp_imm_s(vRegF dst)
5703 %{
5704   single_instruction;
5705   dst    : S3(write);
5706   INS01  : ISS;
5707   NEON_FP : S3;
5708 %}
5709 
5710 pipe_class fp_imm_d(vRegD dst)
5711 %{
5712   single_instruction;
5713   dst    : S3(write);
5714   INS01  : ISS;
5715   NEON_FP : S3;
5716 %}
5717 
5718 pipe_class fp_load_constant_s(vRegF dst)
5719 %{
5720   single_instruction;
5721   dst    : S4(write);
5722   INS01  : ISS;
5723   NEON_FP : S4;
5724 %}
5725 
5726 pipe_class fp_load_constant_d(vRegD dst)
5727 %{
5728   single_instruction;
5729   dst    : S4(write);
5730   INS01  : ISS;
5731   NEON_FP : S4;
5732 %}
5733 
5734 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5735 %{
5736   single_instruction;
5737   dst    : S5(write);
5738   src1   : S1(read);
5739   src2   : S1(read);
5740   INS01  : ISS;
5741   NEON_FP : S5;
5742 %}
5743 
5744 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5745 %{
5746   single_instruction;
5747   dst    : S5(write);
5748   src1   : S1(read);
5749   src2   : S1(read);
5750   INS0   : ISS;
5751   NEON_FP : S5;
5752 %}
5753 
5754 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5755 %{
5756   single_instruction;
5757   dst    : S5(write);
5758   src1   : S1(read);
5759   src2   : S1(read);
5760   dst    : S1(read);
5761   INS01  : ISS;
5762   NEON_FP : S5;
5763 %}
5764 
5765 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5766 %{
5767   single_instruction;
5768   dst    : S5(write);
5769   src1   : S1(read);
5770   src2   : S1(read);
5771   dst    : S1(read);
5772   INS0   : ISS;
5773   NEON_FP : S5;
5774 %}
5775 
5776 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5777 %{
5778   single_instruction;
5779   dst    : S4(write);
5780   src1   : S2(read);
5781   src2   : S2(read);
5782   INS01  : ISS;
5783   NEON_FP : S4;
5784 %}
5785 
5786 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5787 %{
5788   single_instruction;
5789   dst    : S4(write);
5790   src1   : S2(read);
5791   src2   : S2(read);
5792   INS0   : ISS;
5793   NEON_FP : S4;
5794 %}
5795 
5796 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5797 %{
5798   single_instruction;
5799   dst    : S3(write);
5800   src1   : S2(read);
5801   src2   : S2(read);
5802   INS01  : ISS;
5803   NEON_FP : S3;
5804 %}
5805 
5806 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5807 %{
5808   single_instruction;
5809   dst    : S3(write);
5810   src1   : S2(read);
5811   src2   : S2(read);
5812   INS0   : ISS;
5813   NEON_FP : S3;
5814 %}
5815 
5816 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5817 %{
5818   single_instruction;
5819   dst    : S3(write);
5820   src    : S1(read);
5821   shift  : S1(read);
5822   INS01  : ISS;
5823   NEON_FP : S3;
5824 %}
5825 
5826 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5827 %{
5828   single_instruction;
5829   dst    : S3(write);
5830   src    : S1(read);
5831   shift  : S1(read);
5832   INS0   : ISS;
5833   NEON_FP : S3;
5834 %}
5835 
5836 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5837 %{
5838   single_instruction;
5839   dst    : S3(write);
5840   src    : S1(read);
5841   INS01  : ISS;
5842   NEON_FP : S3;
5843 %}
5844 
5845 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5846 %{
5847   single_instruction;
5848   dst    : S3(write);
5849   src    : S1(read);
5850   INS0   : ISS;
5851   NEON_FP : S3;
5852 %}
5853 
5854 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5855 %{
5856   single_instruction;
5857   dst    : S5(write);
5858   src1   : S1(read);
5859   src2   : S1(read);
5860   INS01  : ISS;
5861   NEON_FP : S5;
5862 %}
5863 
5864 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5865 %{
5866   single_instruction;
5867   dst    : S5(write);
5868   src1   : S1(read);
5869   src2   : S1(read);
5870   INS0   : ISS;
5871   NEON_FP : S5;
5872 %}
5873 
5874 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5875 %{
5876   single_instruction;
5877   dst    : S5(write);
5878   src1   : S1(read);
5879   src2   : S1(read);
5880   INS0   : ISS;
5881   NEON_FP : S5;
5882 %}
5883 
5884 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5885 %{
5886   single_instruction;
5887   dst    : S5(write);
5888   src1   : S1(read);
5889   src2   : S1(read);
5890   INS0   : ISS;
5891   NEON_FP : S5;
5892 %}
5893 
5894 pipe_class vsqrt_fp128(vecX dst, vecX src)
5895 %{
5896   single_instruction;
5897   dst    : S5(write);
5898   src    : S1(read);
5899   INS0   : ISS;
5900   NEON_FP : S5;
5901 %}
5902 
5903 pipe_class vunop_fp64(vecD dst, vecD src)
5904 %{
5905   single_instruction;
5906   dst    : S5(write);
5907   src    : S1(read);
5908   INS01  : ISS;
5909   NEON_FP : S5;
5910 %}
5911 
5912 pipe_class vunop_fp128(vecX dst, vecX src)
5913 %{
5914   single_instruction;
5915   dst    : S5(write);
5916   src    : S1(read);
5917   INS0   : ISS;
5918   NEON_FP : S5;
5919 %}
5920 
5921 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5922 %{
5923   single_instruction;
5924   dst    : S3(write);
5925   src    : S1(read);
5926   INS01  : ISS;
5927   NEON_FP : S3;
5928 %}
5929 
5930 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5931 %{
5932   single_instruction;
5933   dst    : S3(write);
5934   src    : S1(read);
5935   INS01  : ISS;
5936   NEON_FP : S3;
5937 %}
5938 
5939 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5940 %{
5941   single_instruction;
5942   dst    : S3(write);
5943   src    : S1(read);
5944   INS01  : ISS;
5945   NEON_FP : S3;
5946 %}
5947 
5948 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5949 %{
5950   single_instruction;
5951   dst    : S3(write);
5952   src    : S1(read);
5953   INS01  : ISS;
5954   NEON_FP : S3;
5955 %}
5956 
5957 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5958 %{
5959   single_instruction;
5960   dst    : S3(write);
5961   src    : S1(read);
5962   INS01  : ISS;
5963   NEON_FP : S3;
5964 %}
5965 
5966 pipe_class vmovi_reg_imm64(vecD dst)
5967 %{
5968   single_instruction;
5969   dst    : S3(write);
5970   INS01  : ISS;
5971   NEON_FP : S3;
5972 %}
5973 
5974 pipe_class vmovi_reg_imm128(vecX dst)
5975 %{
5976   single_instruction;
5977   dst    : S3(write);
5978   INS0   : ISS;
5979   NEON_FP : S3;
5980 %}
5981 
5982 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
5983 %{
5984   single_instruction;
5985   dst    : S5(write);
5986   mem    : ISS(read);
5987   INS01  : ISS;
5988   NEON_FP : S3;
5989 %}
5990 
5991 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
5992 %{
5993   single_instruction;
5994   dst    : S5(write);
5995   mem    : ISS(read);
5996   INS01  : ISS;
5997   NEON_FP : S3;
5998 %}
5999 
6000 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6001 %{
6002   single_instruction;
6003   mem    : ISS(read);
6004   src    : S2(read);
6005   INS01  : ISS;
6006   NEON_FP : S3;
6007 %}
6008 
6009 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6010 %{
6011   single_instruction;
6012   mem    : ISS(read);
6013   src    : S2(read);
6014   INS01  : ISS;
6015   NEON_FP : S3;
6016 %}
6017 
6018 //------- Integer ALU operations --------------------------
6019 
6020 // Integer ALU reg-reg operation
6021 // Operands needed in EX1, result generated in EX2
6022 // Eg.  ADD     x0, x1, x2
6023 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6024 %{
6025   single_instruction;
6026   dst    : EX2(write);
6027   src1   : EX1(read);
6028   src2   : EX1(read);
6029   INS01  : ISS; // Dual issue as instruction 0 or 1
6030   ALU    : EX2;
6031 %}
6032 
6033 // Integer ALU reg-reg operation with constant shift
6034 // Shifted register must be available in LATE_ISS instead of EX1
6035 // Eg.  ADD     x0, x1, x2, LSL #2
6036 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6037 %{
6038   single_instruction;
6039   dst    : EX2(write);
6040   src1   : EX1(read);
6041   src2   : ISS(read);
6042   INS01  : ISS;
6043   ALU    : EX2;
6044 %}
6045 
6046 // Integer ALU reg operation with constant shift
6047 // Eg.  LSL     x0, x1, #shift
6048 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6049 %{
6050   single_instruction;
6051   dst    : EX2(write);
6052   src1   : ISS(read);
6053   INS01  : ISS;
6054   ALU    : EX2;
6055 %}
6056 
6057 // Integer ALU reg-reg operation with variable shift
6058 // Both operands must be available in LATE_ISS instead of EX1
6059 // Result is available in EX1 instead of EX2
6060 // Eg.  LSLV    x0, x1, x2
6061 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6062 %{
6063   single_instruction;
6064   dst    : EX1(write);
6065   src1   : ISS(read);
6066   src2   : ISS(read);
6067   INS01  : ISS;
6068   ALU    : EX1;
6069 %}
6070 
6071 // Integer ALU reg-reg operation with extract
6072 // As for _vshift above, but result generated in EX2
6073 // Eg.  EXTR    x0, x1, x2, #N
6074 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6075 %{
6076   single_instruction;
6077   dst    : EX2(write);
6078   src1   : ISS(read);
6079   src2   : ISS(read);
6080   INS1   : ISS; // Can only dual issue as Instruction 1
6081   ALU    : EX1;
6082 %}
6083 
6084 // Integer ALU reg operation
6085 // Eg.  NEG     x0, x1
6086 pipe_class ialu_reg(iRegI dst, iRegI src)
6087 %{
6088   single_instruction;
6089   dst    : EX2(write);
6090   src    : EX1(read);
6091   INS01  : ISS;
6092   ALU    : EX2;
6093 %}
6094 
6095 // Integer ALU reg mmediate operation
6096 // Eg.  ADD     x0, x1, #N
6097 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6098 %{
6099   single_instruction;
6100   dst    : EX2(write);
6101   src1   : EX1(read);
6102   INS01  : ISS;
6103   ALU    : EX2;
6104 %}
6105 
6106 // Integer ALU immediate operation (no source operands)
6107 // Eg.  MOV     x0, #N
6108 pipe_class ialu_imm(iRegI dst)
6109 %{
6110   single_instruction;
6111   dst    : EX1(write);
6112   INS01  : ISS;
6113   ALU    : EX1;
6114 %}
6115 
6116 //------- Compare operation -------------------------------
6117 
6118 // Compare reg-reg
6119 // Eg.  CMP     x0, x1
6120 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6121 %{
6122   single_instruction;
6123 //  fixed_latency(16);
6124   cr     : EX2(write);
6125   op1    : EX1(read);
6126   op2    : EX1(read);
6127   INS01  : ISS;
6128   ALU    : EX2;
6129 %}
6130 
6131 // Compare reg-reg
6132 // Eg.  CMP     x0, #N
6133 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6134 %{
6135   single_instruction;
6136 //  fixed_latency(16);
6137   cr     : EX2(write);
6138   op1    : EX1(read);
6139   INS01  : ISS;
6140   ALU    : EX2;
6141 %}
6142 
6143 //------- Conditional instructions ------------------------
6144 
6145 // Conditional no operands
6146 // Eg.  CSINC   x0, zr, zr, <cond>
6147 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6148 %{
6149   single_instruction;
6150   cr     : EX1(read);
6151   dst    : EX2(write);
6152   INS01  : ISS;
6153   ALU    : EX2;
6154 %}
6155 
6156 // Conditional 2 operand
6157 // EG.  CSEL    X0, X1, X2, <cond>
6158 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6159 %{
6160   single_instruction;
6161   cr     : EX1(read);
6162   src1   : EX1(read);
6163   src2   : EX1(read);
6164   dst    : EX2(write);
6165   INS01  : ISS;
6166   ALU    : EX2;
6167 %}
6168 
6169 // Conditional 2 operand
6170 // EG.  CSEL    X0, X1, X2, <cond>
6171 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6172 %{
6173   single_instruction;
6174   cr     : EX1(read);
6175   src    : EX1(read);
6176   dst    : EX2(write);
6177   INS01  : ISS;
6178   ALU    : EX2;
6179 %}
6180 
6181 //------- Multiply pipeline operations --------------------
6182 
6183 // Multiply reg-reg
6184 // Eg.  MUL     w0, w1, w2
6185 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6186 %{
6187   single_instruction;
6188   dst    : WR(write);
6189   src1   : ISS(read);
6190   src2   : ISS(read);
6191   INS01  : ISS;
6192   MAC    : WR;
6193 %}
6194 
6195 // Multiply accumulate
6196 // Eg.  MADD    w0, w1, w2, w3
6197 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6198 %{
6199   single_instruction;
6200   dst    : WR(write);
6201   src1   : ISS(read);
6202   src2   : ISS(read);
6203   src3   : ISS(read);
6204   INS01  : ISS;
6205   MAC    : WR;
6206 %}
6207 
6208 // Eg.  MUL     w0, w1, w2
6209 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6210 %{
6211   single_instruction;
6212   fixed_latency(3); // Maximum latency for 64 bit mul
6213   dst    : WR(write);
6214   src1   : ISS(read);
6215   src2   : ISS(read);
6216   INS01  : ISS;
6217   MAC    : WR;
6218 %}
6219 
6220 // Multiply accumulate
6221 // Eg.  MADD    w0, w1, w2, w3
6222 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6223 %{
6224   single_instruction;
6225   fixed_latency(3); // Maximum latency for 64 bit mul
6226   dst    : WR(write);
6227   src1   : ISS(read);
6228   src2   : ISS(read);
6229   src3   : ISS(read);
6230   INS01  : ISS;
6231   MAC    : WR;
6232 %}
6233 
6234 //------- Divide pipeline operations --------------------
6235 
6236 // Eg.  SDIV    w0, w1, w2
6237 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6238 %{
6239   single_instruction;
6240   fixed_latency(8); // Maximum latency for 32 bit divide
6241   dst    : WR(write);
6242   src1   : ISS(read);
6243   src2   : ISS(read);
6244   INS0   : ISS; // Can only dual issue as instruction 0
6245   DIV    : WR;
6246 %}
6247 
6248 // Eg.  SDIV    x0, x1, x2
6249 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6250 %{
6251   single_instruction;
6252   fixed_latency(16); // Maximum latency for 64 bit divide
6253   dst    : WR(write);
6254   src1   : ISS(read);
6255   src2   : ISS(read);
6256   INS0   : ISS; // Can only dual issue as instruction 0
6257   DIV    : WR;
6258 %}
6259 
6260 //------- Load pipeline operations ------------------------
6261 
6262 // Load - prefetch
6263 // Eg.  PFRM    <mem>
6264 pipe_class iload_prefetch(memory mem)
6265 %{
6266   single_instruction;
6267   mem    : ISS(read);
6268   INS01  : ISS;
6269   LDST   : WR;
6270 %}
6271 
6272 // Load - reg, mem
6273 // Eg.  LDR     x0, <mem>
6274 pipe_class iload_reg_mem(iRegI dst, memory mem)
6275 %{
6276   single_instruction;
6277   dst    : WR(write);
6278   mem    : ISS(read);
6279   INS01  : ISS;
6280   LDST   : WR;
6281 %}
6282 
6283 // Load - reg, reg
6284 // Eg.  LDR     x0, [sp, x1]
6285 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6286 %{
6287   single_instruction;
6288   dst    : WR(write);
6289   src    : ISS(read);
6290   INS01  : ISS;
6291   LDST   : WR;
6292 %}
6293 
6294 //------- Store pipeline operations -----------------------
6295 
6296 // Store - zr, mem
6297 // Eg.  STR     zr, <mem>
6298 pipe_class istore_mem(memory mem)
6299 %{
6300   single_instruction;
6301   mem    : ISS(read);
6302   INS01  : ISS;
6303   LDST   : WR;
6304 %}
6305 
6306 // Store - reg, mem
6307 // Eg.  STR     x0, <mem>
6308 pipe_class istore_reg_mem(iRegI src, memory mem)
6309 %{
6310   single_instruction;
6311   mem    : ISS(read);
6312   src    : EX2(read);
6313   INS01  : ISS;
6314   LDST   : WR;
6315 %}
6316 
6317 // Store - reg, reg
6318 // Eg. STR      x0, [sp, x1]
6319 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6320 %{
6321   single_instruction;
6322   dst    : ISS(read);
6323   src    : EX2(read);
6324   INS01  : ISS;
6325   LDST   : WR;
6326 %}
6327 
6328 //------- Store pipeline operations -----------------------
6329 
6330 // Branch
6331 pipe_class pipe_branch()
6332 %{
6333   single_instruction;
6334   INS01  : ISS;
6335   BRANCH : EX1;
6336 %}
6337 
6338 // Conditional branch
6339 pipe_class pipe_branch_cond(rFlagsReg cr)
6340 %{
6341   single_instruction;
6342   cr     : EX1(read);
6343   INS01  : ISS;
6344   BRANCH : EX1;
6345 %}
6346 
6347 // Compare & Branch
6348 // EG.  CBZ/CBNZ
6349 pipe_class pipe_cmp_branch(iRegI op1)
6350 %{
6351   single_instruction;
6352   op1    : EX1(read);
6353   INS01  : ISS;
6354   BRANCH : EX1;
6355 %}
6356 
6357 //------- Synchronisation operations ----------------------
6358 
6359 // Any operation requiring serialization.
6360 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6361 pipe_class pipe_serial()
6362 %{
6363   single_instruction;
6364   force_serialization;
6365   fixed_latency(16);
6366   INS01  : ISS(2); // Cannot dual issue with any other instruction
6367   LDST   : WR;
6368 %}
6369 
6370 // Generic big/slow expanded idiom - also serialized
6371 pipe_class pipe_slow()
6372 %{
6373   instruction_count(10);
6374   multiple_bundles;
6375   force_serialization;
6376   fixed_latency(16);
6377   INS01  : ISS(2); // Cannot dual issue with any other instruction
6378   LDST   : WR;
6379 %}
6380 
6381 // Empty pipeline class
6382 pipe_class pipe_class_empty()
6383 %{
6384   single_instruction;
6385   fixed_latency(0);
6386 %}
6387 
6388 // Default pipeline class.
6389 pipe_class pipe_class_default()
6390 %{
6391   single_instruction;
6392   fixed_latency(2);
6393 %}
6394 
6395 // Pipeline class for compares.
6396 pipe_class pipe_class_compare()
6397 %{
6398   single_instruction;
6399   fixed_latency(16);
6400 %}
6401 
6402 // Pipeline class for memory operations.
6403 pipe_class pipe_class_memory()
6404 %{
6405   single_instruction;
6406   fixed_latency(16);
6407 %}
6408 
6409 // Pipeline class for call.
6410 pipe_class pipe_class_call()
6411 %{
6412   single_instruction;
6413   fixed_latency(100);
6414 %}
6415 
6416 // Define the class for the Nop node.
6417 define %{
6418    MachNop = pipe_class_empty;
6419 %}
6420 
6421 %}
6422 //----------INSTRUCTIONS-------------------------------------------------------
6423 //
6424 // match      -- States which machine-independent subtree may be replaced
6425 //               by this instruction.
6426 // ins_cost   -- The estimated cost of this instruction is used by instruction
6427 //               selection to identify a minimum cost tree of machine
6428 //               instructions that matches a tree of machine-independent
6429 //               instructions.
6430 // format     -- A string providing the disassembly for this instruction.
6431 //               The value of an instruction's operand may be inserted
6432 //               by referring to it with a '$' prefix.
6433 // opcode     -- Three instruction opcodes may be provided.  These are referred
6434 //               to within an encode class as $primary, $secondary, and $tertiary
6435 //               rrspectively.  The primary opcode is commonly used to
6436 //               indicate the type of machine instruction, while secondary
6437 //               and tertiary are often used for prefix options or addressing
6438 //               modes.
6439 // ins_encode -- A list of encode classes with parameters. The encode class
6440 //               name must have been defined in an 'enc_class' specification
6441 //               in the encode section of the architecture description.
6442 
6443 // ============================================================================
6444 // Memory (Load/Store) Instructions
6445 
6446 // Load Instructions
6447 
6448 // Load Byte (8 bit signed)
6449 instruct loadB(iRegINoSp dst, memory mem)
6450 %{
6451   match(Set dst (LoadB mem));
6452   predicate(!needs_acquiring_load(n));
6453 
6454   ins_cost(4 * INSN_COST);
6455   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6456 
6457   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6458 
6459   ins_pipe(iload_reg_mem);
6460 %}
6461 
6462 // Load Byte (8 bit signed) into long
6463 instruct loadB2L(iRegLNoSp dst, memory mem)
6464 %{
6465   match(Set dst (ConvI2L (LoadB mem)));
6466   predicate(!needs_acquiring_load(n->in(1)));
6467 
6468   ins_cost(4 * INSN_COST);
6469   format %{ "ldrsb  $dst, $mem\t# byte" %}
6470 
6471   ins_encode(aarch64_enc_ldrsb(dst, mem));
6472 
6473   ins_pipe(iload_reg_mem);
6474 %}
6475 
6476 // Load Byte (8 bit unsigned)
6477 instruct loadUB(iRegINoSp dst, memory mem)
6478 %{
6479   match(Set dst (LoadUB mem));
6480   predicate(!needs_acquiring_load(n));
6481 
6482   ins_cost(4 * INSN_COST);
6483   format %{ "ldrbw  $dst, $mem\t# byte" %}
6484 
6485   ins_encode(aarch64_enc_ldrb(dst, mem));
6486 
6487   ins_pipe(iload_reg_mem);
6488 %}
6489 
6490 // Load Byte (8 bit unsigned) into long
6491 instruct loadUB2L(iRegLNoSp dst, memory mem)
6492 %{
6493   match(Set dst (ConvI2L (LoadUB mem)));
6494   predicate(!needs_acquiring_load(n->in(1)));
6495 
6496   ins_cost(4 * INSN_COST);
6497   format %{ "ldrb  $dst, $mem\t# byte" %}
6498 
6499   ins_encode(aarch64_enc_ldrb(dst, mem));
6500 
6501   ins_pipe(iload_reg_mem);
6502 %}
6503 
6504 // Load Short (16 bit signed)
6505 instruct loadS(iRegINoSp dst, memory mem)
6506 %{
6507   match(Set dst (LoadS mem));
6508   predicate(!needs_acquiring_load(n));
6509 
6510   ins_cost(4 * INSN_COST);
6511   format %{ "ldrshw  $dst, $mem\t# short" %}
6512 
6513   ins_encode(aarch64_enc_ldrshw(dst, mem));
6514 
6515   ins_pipe(iload_reg_mem);
6516 %}
6517 
6518 // Load Short (16 bit signed) into long
6519 instruct loadS2L(iRegLNoSp dst, memory mem)
6520 %{
6521   match(Set dst (ConvI2L (LoadS mem)));
6522   predicate(!needs_acquiring_load(n->in(1)));
6523 
6524   ins_cost(4 * INSN_COST);
6525   format %{ "ldrsh  $dst, $mem\t# short" %}
6526 
6527   ins_encode(aarch64_enc_ldrsh(dst, mem));
6528 
6529   ins_pipe(iload_reg_mem);
6530 %}
6531 
6532 // Load Char (16 bit unsigned)
6533 instruct loadUS(iRegINoSp dst, memory mem)
6534 %{
6535   match(Set dst (LoadUS mem));
6536   predicate(!needs_acquiring_load(n));
6537 
6538   ins_cost(4 * INSN_COST);
6539   format %{ "ldrh  $dst, $mem\t# short" %}
6540 
6541   ins_encode(aarch64_enc_ldrh(dst, mem));
6542 
6543   ins_pipe(iload_reg_mem);
6544 %}
6545 
6546 // Load Short/Char (16 bit unsigned) into long
6547 instruct loadUS2L(iRegLNoSp dst, memory mem)
6548 %{
6549   match(Set dst (ConvI2L (LoadUS mem)));
6550   predicate(!needs_acquiring_load(n->in(1)));
6551 
6552   ins_cost(4 * INSN_COST);
6553   format %{ "ldrh  $dst, $mem\t# short" %}
6554 
6555   ins_encode(aarch64_enc_ldrh(dst, mem));
6556 
6557   ins_pipe(iload_reg_mem);
6558 %}
6559 
6560 // Load Integer (32 bit signed)
6561 instruct loadI(iRegINoSp dst, memory mem)
6562 %{
6563   match(Set dst (LoadI mem));
6564   predicate(!needs_acquiring_load(n));
6565 
6566   ins_cost(4 * INSN_COST);
6567   format %{ "ldrw  $dst, $mem\t# int" %}
6568 
6569   ins_encode(aarch64_enc_ldrw(dst, mem));
6570 
6571   ins_pipe(iload_reg_mem);
6572 %}
6573 
6574 // Load Integer (32 bit signed) into long
6575 instruct loadI2L(iRegLNoSp dst, memory mem)
6576 %{
6577   match(Set dst (ConvI2L (LoadI mem)));
6578   predicate(!needs_acquiring_load(n->in(1)));
6579 
6580   ins_cost(4 * INSN_COST);
6581   format %{ "ldrsw  $dst, $mem\t# int" %}
6582 
6583   ins_encode(aarch64_enc_ldrsw(dst, mem));
6584 
6585   ins_pipe(iload_reg_mem);
6586 %}
6587 
6588 // Load Integer (32 bit unsigned) into long
6589 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6590 %{
6591   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6592   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6593 
6594   ins_cost(4 * INSN_COST);
6595   format %{ "ldrw  $dst, $mem\t# int" %}
6596 
6597   ins_encode(aarch64_enc_ldrw(dst, mem));
6598 
6599   ins_pipe(iload_reg_mem);
6600 %}
6601 
6602 // Load Long (64 bit signed)
6603 instruct loadL(iRegLNoSp dst, memory mem)
6604 %{
6605   match(Set dst (LoadL mem));
6606   predicate(!needs_acquiring_load(n));
6607 
6608   ins_cost(4 * INSN_COST);
6609   format %{ "ldr  $dst, $mem\t# int" %}
6610 
6611   ins_encode(aarch64_enc_ldr(dst, mem));
6612 
6613   ins_pipe(iload_reg_mem);
6614 %}
6615 
6616 // Load Range
6617 instruct loadRange(iRegINoSp dst, memory mem)
6618 %{
6619   match(Set dst (LoadRange mem));
6620 
6621   ins_cost(4 * INSN_COST);
6622   format %{ "ldrw  $dst, $mem\t# range" %}
6623 
6624   ins_encode(aarch64_enc_ldrw(dst, mem));
6625 
6626   ins_pipe(iload_reg_mem);
6627 %}
6628 
6629 // Load Pointer
6630 instruct loadP(iRegPNoSp dst, memory mem)
6631 %{
6632   match(Set dst (LoadP mem));
6633   predicate(!needs_acquiring_load(n));
6634 
6635   ins_cost(4 * INSN_COST);
6636   format %{ "ldr  $dst, $mem\t# ptr" %}
6637 
6638   ins_encode(aarch64_enc_ldr(dst, mem));
6639 
6640   ins_pipe(iload_reg_mem);
6641 %}
6642 
6643 // Load Compressed Pointer
6644 instruct loadN(iRegNNoSp dst, memory mem)
6645 %{
6646   match(Set dst (LoadN mem));
6647   predicate(!needs_acquiring_load(n));
6648 
6649   ins_cost(4 * INSN_COST);
6650   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6651 
6652   ins_encode(aarch64_enc_ldrw(dst, mem));
6653 
6654   ins_pipe(iload_reg_mem);
6655 %}
6656 
6657 // Load Klass Pointer
6658 instruct loadKlass(iRegPNoSp dst, memory mem)
6659 %{
6660   match(Set dst (LoadKlass mem));
6661   predicate(!needs_acquiring_load(n));
6662 
6663   ins_cost(4 * INSN_COST);
6664   format %{ "ldr  $dst, $mem\t# class" %}
6665 
6666   ins_encode(aarch64_enc_ldr(dst, mem));
6667 
6668   ins_pipe(iload_reg_mem);
6669 %}
6670 
6671 // Load Narrow Klass Pointer
6672 instruct loadNKlass(iRegNNoSp dst, memory mem)
6673 %{
6674   match(Set dst (LoadNKlass mem));
6675   predicate(!needs_acquiring_load(n));
6676 
6677   ins_cost(4 * INSN_COST);
6678   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6679 
6680   ins_encode(aarch64_enc_ldrw(dst, mem));
6681 
6682   ins_pipe(iload_reg_mem);
6683 %}
6684 
6685 // Load Float
6686 instruct loadF(vRegF dst, memory mem)
6687 %{
6688   match(Set dst (LoadF mem));
6689   predicate(!needs_acquiring_load(n));
6690 
6691   ins_cost(4 * INSN_COST);
6692   format %{ "ldrs  $dst, $mem\t# float" %}
6693 
6694   ins_encode( aarch64_enc_ldrs(dst, mem) );
6695 
6696   ins_pipe(pipe_class_memory);
6697 %}
6698 
6699 // Load Double
6700 instruct loadD(vRegD dst, memory mem)
6701 %{
6702   match(Set dst (LoadD mem));
6703   predicate(!needs_acquiring_load(n));
6704 
6705   ins_cost(4 * INSN_COST);
6706   format %{ "ldrd  $dst, $mem\t# double" %}
6707 
6708   ins_encode( aarch64_enc_ldrd(dst, mem) );
6709 
6710   ins_pipe(pipe_class_memory);
6711 %}
6712 
6713 
6714 // Load Int Constant
6715 instruct loadConI(iRegINoSp dst, immI src)
6716 %{
6717   match(Set dst src);
6718 
6719   ins_cost(INSN_COST);
6720   format %{ "mov $dst, $src\t# int" %}
6721 
6722   ins_encode( aarch64_enc_movw_imm(dst, src) );
6723 
6724   ins_pipe(ialu_imm);
6725 %}
6726 
6727 // Load Long Constant
6728 instruct loadConL(iRegLNoSp dst, immL src)
6729 %{
6730   match(Set dst src);
6731 
6732   ins_cost(INSN_COST);
6733   format %{ "mov $dst, $src\t# long" %}
6734 
6735   ins_encode( aarch64_enc_mov_imm(dst, src) );
6736 
6737   ins_pipe(ialu_imm);
6738 %}
6739 
6740 // Load Pointer Constant
6741 
6742 instruct loadConP(iRegPNoSp dst, immP con)
6743 %{
6744   match(Set dst con);
6745 
6746   ins_cost(INSN_COST * 4);
6747   format %{
6748     "mov  $dst, $con\t# ptr\n\t"
6749   %}
6750 
6751   ins_encode(aarch64_enc_mov_p(dst, con));
6752 
6753   ins_pipe(ialu_imm);
6754 %}
6755 
6756 // Load Null Pointer Constant
6757 
6758 instruct loadConP0(iRegPNoSp dst, immP0 con)
6759 %{
6760   match(Set dst con);
6761 
6762   ins_cost(INSN_COST);
6763   format %{ "mov  $dst, $con\t# NULL ptr" %}
6764 
6765   ins_encode(aarch64_enc_mov_p0(dst, con));
6766 
6767   ins_pipe(ialu_imm);
6768 %}
6769 
6770 // Load Pointer Constant One
6771 
6772 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6773 %{
6774   match(Set dst con);
6775 
6776   ins_cost(INSN_COST);
6777   format %{ "mov  $dst, $con\t# NULL ptr" %}
6778 
6779   ins_encode(aarch64_enc_mov_p1(dst, con));
6780 
6781   ins_pipe(ialu_imm);
6782 %}
6783 
6784 // Load Poll Page Constant
6785 
6786 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6787 %{
6788   match(Set dst con);
6789 
6790   ins_cost(INSN_COST);
6791   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6792 
6793   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6794 
6795   ins_pipe(ialu_imm);
6796 %}
6797 
6798 // Load Byte Map Base Constant
6799 
6800 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6801 %{
6802   match(Set dst con);
6803 
6804   ins_cost(INSN_COST);
6805   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6806 
6807   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6808 
6809   ins_pipe(ialu_imm);
6810 %}
6811 
6812 // Load Narrow Pointer Constant
6813 
6814 instruct loadConN(iRegNNoSp dst, immN con)
6815 %{
6816   match(Set dst con);
6817 
6818   ins_cost(INSN_COST * 4);
6819   format %{ "mov  $dst, $con\t# compressed ptr" %}
6820 
6821   ins_encode(aarch64_enc_mov_n(dst, con));
6822 
6823   ins_pipe(ialu_imm);
6824 %}
6825 
6826 // Load Narrow Null Pointer Constant
6827 
6828 instruct loadConN0(iRegNNoSp dst, immN0 con)
6829 %{
6830   match(Set dst con);
6831 
6832   ins_cost(INSN_COST);
6833   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6834 
6835   ins_encode(aarch64_enc_mov_n0(dst, con));
6836 
6837   ins_pipe(ialu_imm);
6838 %}
6839 
6840 // Load Narrow Klass Constant
6841 
6842 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6843 %{
6844   match(Set dst con);
6845 
6846   ins_cost(INSN_COST);
6847   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6848 
6849   ins_encode(aarch64_enc_mov_nk(dst, con));
6850 
6851   ins_pipe(ialu_imm);
6852 %}
6853 
6854 // Load Packed Float Constant
6855 
6856 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6857   match(Set dst con);
6858   ins_cost(INSN_COST * 4);
6859   format %{ "fmovs  $dst, $con"%}
6860   ins_encode %{
6861     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6862   %}
6863 
6864   ins_pipe(fp_imm_s);
6865 %}
6866 
6867 // Load Float Constant
6868 
6869 instruct loadConF(vRegF dst, immF con) %{
6870   match(Set dst con);
6871 
6872   ins_cost(INSN_COST * 4);
6873 
6874   format %{
6875     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6876   %}
6877 
6878   ins_encode %{
6879     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6880   %}
6881 
6882   ins_pipe(fp_load_constant_s);
6883 %}
6884 
6885 // Load Packed Double Constant
6886 
6887 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6888   match(Set dst con);
6889   ins_cost(INSN_COST);
6890   format %{ "fmovd  $dst, $con"%}
6891   ins_encode %{
6892     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6893   %}
6894 
6895   ins_pipe(fp_imm_d);
6896 %}
6897 
6898 // Load Double Constant
6899 
6900 instruct loadConD(vRegD dst, immD con) %{
6901   match(Set dst con);
6902 
6903   ins_cost(INSN_COST * 5);
6904   format %{
6905     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6906   %}
6907 
6908   ins_encode %{
6909     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6910   %}
6911 
6912   ins_pipe(fp_load_constant_d);
6913 %}
6914 
6915 // Store Instructions
6916 
6917 // Store CMS card-mark Immediate
6918 instruct storeimmCM0(immI0 zero, memory mem)
6919 %{
6920   match(Set mem (StoreCM mem zero));
6921   predicate(unnecessary_storestore(n));
6922 
6923   ins_cost(INSN_COST);
6924   format %{ "storestore (elided)\n\t"
6925             "strb zr, $mem\t# byte" %}
6926 
6927   ins_encode(aarch64_enc_strb0(mem));
6928 
6929   ins_pipe(istore_mem);
6930 %}
6931 
6932 // Store CMS card-mark Immediate with intervening StoreStore
6933 // needed when using CMS with no conditional card marking
6934 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6935 %{
6936   match(Set mem (StoreCM mem zero));
6937 
6938   ins_cost(INSN_COST * 2);
6939   format %{ "storestore\n\t"
6940             "dmb ishst"
6941             "\n\tstrb zr, $mem\t# byte" %}
6942 
6943   ins_encode(aarch64_enc_strb0_ordered(mem));
6944 
6945   ins_pipe(istore_mem);
6946 %}
6947 
6948 // Store Byte
6949 instruct storeB(iRegIorL2I src, memory mem)
6950 %{
6951   match(Set mem (StoreB mem src));
6952   predicate(!needs_releasing_store(n));
6953 
6954   ins_cost(INSN_COST);
6955   format %{ "strb  $src, $mem\t# byte" %}
6956 
6957   ins_encode(aarch64_enc_strb(src, mem));
6958 
6959   ins_pipe(istore_reg_mem);
6960 %}
6961 
6962 
6963 instruct storeimmB0(immI0 zero, memory mem)
6964 %{
6965   match(Set mem (StoreB mem zero));
6966   predicate(!needs_releasing_store(n));
6967 
6968   ins_cost(INSN_COST);
6969   format %{ "strb rscractch2, $mem\t# byte" %}
6970 
6971   ins_encode(aarch64_enc_strb0(mem));
6972 
6973   ins_pipe(istore_mem);
6974 %}
6975 
6976 // Store Char/Short
6977 instruct storeC(iRegIorL2I src, memory mem)
6978 %{
6979   match(Set mem (StoreC mem src));
6980   predicate(!needs_releasing_store(n));
6981 
6982   ins_cost(INSN_COST);
6983   format %{ "strh  $src, $mem\t# short" %}
6984 
6985   ins_encode(aarch64_enc_strh(src, mem));
6986 
6987   ins_pipe(istore_reg_mem);
6988 %}
6989 
6990 instruct storeimmC0(immI0 zero, memory mem)
6991 %{
6992   match(Set mem (StoreC mem zero));
6993   predicate(!needs_releasing_store(n));
6994 
6995   ins_cost(INSN_COST);
6996   format %{ "strh  zr, $mem\t# short" %}
6997 
6998   ins_encode(aarch64_enc_strh0(mem));
6999 
7000   ins_pipe(istore_mem);
7001 %}
7002 
7003 // Store Integer
7004 
7005 instruct storeI(iRegIorL2I src, memory mem)
7006 %{
7007   match(Set mem(StoreI mem src));
7008   predicate(!needs_releasing_store(n));
7009 
7010   ins_cost(INSN_COST);
7011   format %{ "strw  $src, $mem\t# int" %}
7012 
7013   ins_encode(aarch64_enc_strw(src, mem));
7014 
7015   ins_pipe(istore_reg_mem);
7016 %}
7017 
7018 instruct storeimmI0(immI0 zero, memory mem)
7019 %{
7020   match(Set mem(StoreI mem zero));
7021   predicate(!needs_releasing_store(n));
7022 
7023   ins_cost(INSN_COST);
7024   format %{ "strw  zr, $mem\t# int" %}
7025 
7026   ins_encode(aarch64_enc_strw0(mem));
7027 
7028   ins_pipe(istore_mem);
7029 %}
7030 
7031 // Store Long (64 bit signed)
7032 instruct storeL(iRegL src, memory mem)
7033 %{
7034   match(Set mem (StoreL mem src));
7035   predicate(!needs_releasing_store(n));
7036 
7037   ins_cost(INSN_COST);
7038   format %{ "str  $src, $mem\t# int" %}
7039 
7040   ins_encode(aarch64_enc_str(src, mem));
7041 
7042   ins_pipe(istore_reg_mem);
7043 %}
7044 
7045 // Store Long (64 bit signed)
7046 instruct storeimmL0(immL0 zero, memory mem)
7047 %{
7048   match(Set mem (StoreL mem zero));
7049   predicate(!needs_releasing_store(n));
7050 
7051   ins_cost(INSN_COST);
7052   format %{ "str  zr, $mem\t# int" %}
7053 
7054   ins_encode(aarch64_enc_str0(mem));
7055 
7056   ins_pipe(istore_mem);
7057 %}
7058 
7059 // Store Pointer
7060 instruct storeP(iRegP src, memory mem)
7061 %{
7062   match(Set mem (StoreP mem src));
7063   predicate(!needs_releasing_store(n));
7064 
7065   ins_cost(INSN_COST);
7066   format %{ "str  $src, $mem\t# ptr" %}
7067 
7068   ins_encode(aarch64_enc_str(src, mem));
7069 
7070   ins_pipe(istore_reg_mem);
7071 %}
7072 
7073 // Store Pointer
7074 instruct storeimmP0(immP0 zero, memory mem)
7075 %{
7076   match(Set mem (StoreP mem zero));
7077   predicate(!needs_releasing_store(n));
7078 
7079   ins_cost(INSN_COST);
7080   format %{ "str zr, $mem\t# ptr" %}
7081 
7082   ins_encode(aarch64_enc_str0(mem));
7083 
7084   ins_pipe(istore_mem);
7085 %}
7086 
7087 // Store Compressed Pointer
7088 instruct storeN(iRegN src, memory mem)
7089 %{
7090   match(Set mem (StoreN mem src));
7091   predicate(!needs_releasing_store(n));
7092 
7093   ins_cost(INSN_COST);
7094   format %{ "strw  $src, $mem\t# compressed ptr" %}
7095 
7096   ins_encode(aarch64_enc_strw(src, mem));
7097 
7098   ins_pipe(istore_reg_mem);
7099 %}
7100 
7101 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7102 %{
7103   match(Set mem (StoreN mem zero));
7104   predicate(Universe::narrow_oop_base() == NULL &&
7105             Universe::narrow_klass_base() == NULL &&
7106             (!needs_releasing_store(n)));
7107 
7108   ins_cost(INSN_COST);
7109   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7110 
7111   ins_encode(aarch64_enc_strw(heapbase, mem));
7112 
7113   ins_pipe(istore_reg_mem);
7114 %}
7115 
7116 // Store Float
7117 instruct storeF(vRegF src, memory mem)
7118 %{
7119   match(Set mem (StoreF mem src));
7120   predicate(!needs_releasing_store(n));
7121 
7122   ins_cost(INSN_COST);
7123   format %{ "strs  $src, $mem\t# float" %}
7124 
7125   ins_encode( aarch64_enc_strs(src, mem) );
7126 
7127   ins_pipe(pipe_class_memory);
7128 %}
7129 
7130 // TODO
7131 // implement storeImmF0 and storeFImmPacked
7132 
7133 // Store Double
7134 instruct storeD(vRegD src, memory mem)
7135 %{
7136   match(Set mem (StoreD mem src));
7137   predicate(!needs_releasing_store(n));
7138 
7139   ins_cost(INSN_COST);
7140   format %{ "strd  $src, $mem\t# double" %}
7141 
7142   ins_encode( aarch64_enc_strd(src, mem) );
7143 
7144   ins_pipe(pipe_class_memory);
7145 %}
7146 
7147 // Store Compressed Klass Pointer
7148 instruct storeNKlass(iRegN src, memory mem)
7149 %{
7150   predicate(!needs_releasing_store(n));
7151   match(Set mem (StoreNKlass mem src));
7152 
7153   ins_cost(INSN_COST);
7154   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7155 
7156   ins_encode(aarch64_enc_strw(src, mem));
7157 
7158   ins_pipe(istore_reg_mem);
7159 %}
7160 
7161 // TODO
7162 // implement storeImmD0 and storeDImmPacked
7163 
7164 // prefetch instructions
7165 // Must be safe to execute with invalid address (cannot fault).
7166 
7167 instruct prefetchalloc( memory mem ) %{
7168   match(PrefetchAllocation mem);
7169 
7170   ins_cost(INSN_COST);
7171   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7172 
7173   ins_encode( aarch64_enc_prefetchw(mem) );
7174 
7175   ins_pipe(iload_prefetch);
7176 %}
7177 
7178 //  ---------------- volatile loads and stores ----------------
7179 
7180 // Load Byte (8 bit signed)
7181 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7182 %{
7183   match(Set dst (LoadB mem));
7184 
7185   ins_cost(VOLATILE_REF_COST);
7186   format %{ "ldarsb  $dst, $mem\t# byte" %}
7187 
7188   ins_encode(aarch64_enc_ldarsb(dst, mem));
7189 
7190   ins_pipe(pipe_serial);
7191 %}
7192 
7193 // Load Byte (8 bit signed) into long
7194 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7195 %{
7196   match(Set dst (ConvI2L (LoadB mem)));
7197 
7198   ins_cost(VOLATILE_REF_COST);
7199   format %{ "ldarsb  $dst, $mem\t# byte" %}
7200 
7201   ins_encode(aarch64_enc_ldarsb(dst, mem));
7202 
7203   ins_pipe(pipe_serial);
7204 %}
7205 
7206 // Load Byte (8 bit unsigned)
7207 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7208 %{
7209   match(Set dst (LoadUB mem));
7210 
7211   ins_cost(VOLATILE_REF_COST);
7212   format %{ "ldarb  $dst, $mem\t# byte" %}
7213 
7214   ins_encode(aarch64_enc_ldarb(dst, mem));
7215 
7216   ins_pipe(pipe_serial);
7217 %}
7218 
7219 // Load Byte (8 bit unsigned) into long
7220 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7221 %{
7222   match(Set dst (ConvI2L (LoadUB mem)));
7223 
7224   ins_cost(VOLATILE_REF_COST);
7225   format %{ "ldarb  $dst, $mem\t# byte" %}
7226 
7227   ins_encode(aarch64_enc_ldarb(dst, mem));
7228 
7229   ins_pipe(pipe_serial);
7230 %}
7231 
7232 // Load Short (16 bit signed)
7233 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7234 %{
7235   match(Set dst (LoadS mem));
7236 
7237   ins_cost(VOLATILE_REF_COST);
7238   format %{ "ldarshw  $dst, $mem\t# short" %}
7239 
7240   ins_encode(aarch64_enc_ldarshw(dst, mem));
7241 
7242   ins_pipe(pipe_serial);
7243 %}
7244 
7245 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7246 %{
7247   match(Set dst (LoadUS mem));
7248 
7249   ins_cost(VOLATILE_REF_COST);
7250   format %{ "ldarhw  $dst, $mem\t# short" %}
7251 
7252   ins_encode(aarch64_enc_ldarhw(dst, mem));
7253 
7254   ins_pipe(pipe_serial);
7255 %}
7256 
7257 // Load Short/Char (16 bit unsigned) into long
7258 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7259 %{
7260   match(Set dst (ConvI2L (LoadUS mem)));
7261 
7262   ins_cost(VOLATILE_REF_COST);
7263   format %{ "ldarh  $dst, $mem\t# short" %}
7264 
7265   ins_encode(aarch64_enc_ldarh(dst, mem));
7266 
7267   ins_pipe(pipe_serial);
7268 %}
7269 
7270 // Load Short/Char (16 bit signed) into long
7271 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7272 %{
7273   match(Set dst (ConvI2L (LoadS mem)));
7274 
7275   ins_cost(VOLATILE_REF_COST);
7276   format %{ "ldarh  $dst, $mem\t# short" %}
7277 
7278   ins_encode(aarch64_enc_ldarsh(dst, mem));
7279 
7280   ins_pipe(pipe_serial);
7281 %}
7282 
7283 // Load Integer (32 bit signed)
7284 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7285 %{
7286   match(Set dst (LoadI mem));
7287 
7288   ins_cost(VOLATILE_REF_COST);
7289   format %{ "ldarw  $dst, $mem\t# int" %}
7290 
7291   ins_encode(aarch64_enc_ldarw(dst, mem));
7292 
7293   ins_pipe(pipe_serial);
7294 %}
7295 
7296 // Load Integer (32 bit unsigned) into long
7297 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7298 %{
7299   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7300 
7301   ins_cost(VOLATILE_REF_COST);
7302   format %{ "ldarw  $dst, $mem\t# int" %}
7303 
7304   ins_encode(aarch64_enc_ldarw(dst, mem));
7305 
7306   ins_pipe(pipe_serial);
7307 %}
7308 
7309 // Load Long (64 bit signed)
7310 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7311 %{
7312   match(Set dst (LoadL mem));
7313 
7314   ins_cost(VOLATILE_REF_COST);
7315   format %{ "ldar  $dst, $mem\t# int" %}
7316 
7317   ins_encode(aarch64_enc_ldar(dst, mem));
7318 
7319   ins_pipe(pipe_serial);
7320 %}
7321 
7322 // Load Pointer
7323 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7324 %{
7325   match(Set dst (LoadP mem));
7326 
7327   ins_cost(VOLATILE_REF_COST);
7328   format %{ "ldar  $dst, $mem\t# ptr" %}
7329 
7330   ins_encode(aarch64_enc_ldar(dst, mem));
7331 
7332   ins_pipe(pipe_serial);
7333 %}
7334 
7335 // Load Compressed Pointer
7336 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7337 %{
7338   match(Set dst (LoadN mem));
7339 
7340   ins_cost(VOLATILE_REF_COST);
7341   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7342 
7343   ins_encode(aarch64_enc_ldarw(dst, mem));
7344 
7345   ins_pipe(pipe_serial);
7346 %}
7347 
7348 // Load Float
7349 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7350 %{
7351   match(Set dst (LoadF mem));
7352 
7353   ins_cost(VOLATILE_REF_COST);
7354   format %{ "ldars  $dst, $mem\t# float" %}
7355 
7356   ins_encode( aarch64_enc_fldars(dst, mem) );
7357 
7358   ins_pipe(pipe_serial);
7359 %}
7360 
7361 // Load Double
7362 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7363 %{
7364   match(Set dst (LoadD mem));
7365 
7366   ins_cost(VOLATILE_REF_COST);
7367   format %{ "ldard  $dst, $mem\t# double" %}
7368 
7369   ins_encode( aarch64_enc_fldard(dst, mem) );
7370 
7371   ins_pipe(pipe_serial);
7372 %}
7373 
7374 // Store Byte
7375 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7376 %{
7377   match(Set mem (StoreB mem src));
7378 
7379   ins_cost(VOLATILE_REF_COST);
7380   format %{ "stlrb  $src, $mem\t# byte" %}
7381 
7382   ins_encode(aarch64_enc_stlrb(src, mem));
7383 
7384   ins_pipe(pipe_class_memory);
7385 %}
7386 
7387 // Store Char/Short
7388 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7389 %{
7390   match(Set mem (StoreC mem src));
7391 
7392   ins_cost(VOLATILE_REF_COST);
7393   format %{ "stlrh  $src, $mem\t# short" %}
7394 
7395   ins_encode(aarch64_enc_stlrh(src, mem));
7396 
7397   ins_pipe(pipe_class_memory);
7398 %}
7399 
7400 // Store Integer
7401 
7402 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7403 %{
7404   match(Set mem(StoreI mem src));
7405 
7406   ins_cost(VOLATILE_REF_COST);
7407   format %{ "stlrw  $src, $mem\t# int" %}
7408 
7409   ins_encode(aarch64_enc_stlrw(src, mem));
7410 
7411   ins_pipe(pipe_class_memory);
7412 %}
7413 
7414 // Store Long (64 bit signed)
7415 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7416 %{
7417   match(Set mem (StoreL mem src));
7418 
7419   ins_cost(VOLATILE_REF_COST);
7420   format %{ "stlr  $src, $mem\t# int" %}
7421 
7422   ins_encode(aarch64_enc_stlr(src, mem));
7423 
7424   ins_pipe(pipe_class_memory);
7425 %}
7426 
7427 // Store Pointer
7428 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7429 %{
7430   match(Set mem (StoreP mem src));
7431 
7432   ins_cost(VOLATILE_REF_COST);
7433   format %{ "stlr  $src, $mem\t# ptr" %}
7434 
7435   ins_encode(aarch64_enc_stlr(src, mem));
7436 
7437   ins_pipe(pipe_class_memory);
7438 %}
7439 
7440 // Store Compressed Pointer
7441 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7442 %{
7443   match(Set mem (StoreN mem src));
7444 
7445   ins_cost(VOLATILE_REF_COST);
7446   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7447 
7448   ins_encode(aarch64_enc_stlrw(src, mem));
7449 
7450   ins_pipe(pipe_class_memory);
7451 %}
7452 
7453 // Store Float
7454 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7455 %{
7456   match(Set mem (StoreF mem src));
7457 
7458   ins_cost(VOLATILE_REF_COST);
7459   format %{ "stlrs  $src, $mem\t# float" %}
7460 
7461   ins_encode( aarch64_enc_fstlrs(src, mem) );
7462 
7463   ins_pipe(pipe_class_memory);
7464 %}
7465 
7466 // TODO
7467 // implement storeImmF0 and storeFImmPacked
7468 
7469 // Store Double
7470 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7471 %{
7472   match(Set mem (StoreD mem src));
7473 
7474   ins_cost(VOLATILE_REF_COST);
7475   format %{ "stlrd  $src, $mem\t# double" %}
7476 
7477   ins_encode( aarch64_enc_fstlrd(src, mem) );
7478 
7479   ins_pipe(pipe_class_memory);
7480 %}
7481 
7482 //  ---------------- end of volatile loads and stores ----------------
7483 
7484 // ============================================================================
7485 // BSWAP Instructions
7486 
7487 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7488   match(Set dst (ReverseBytesI src));
7489 
7490   ins_cost(INSN_COST);
7491   format %{ "revw  $dst, $src" %}
7492 
7493   ins_encode %{
7494     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7495   %}
7496 
7497   ins_pipe(ialu_reg);
7498 %}
7499 
7500 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7501   match(Set dst (ReverseBytesL src));
7502 
7503   ins_cost(INSN_COST);
7504   format %{ "rev  $dst, $src" %}
7505 
7506   ins_encode %{
7507     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7508   %}
7509 
7510   ins_pipe(ialu_reg);
7511 %}
7512 
7513 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7514   match(Set dst (ReverseBytesUS src));
7515 
7516   ins_cost(INSN_COST);
7517   format %{ "rev16w  $dst, $src" %}
7518 
7519   ins_encode %{
7520     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7521   %}
7522 
7523   ins_pipe(ialu_reg);
7524 %}
7525 
7526 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7527   match(Set dst (ReverseBytesS src));
7528 
7529   ins_cost(INSN_COST);
7530   format %{ "rev16w  $dst, $src\n\t"
7531             "sbfmw $dst, $dst, #0, #15" %}
7532 
7533   ins_encode %{
7534     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7535     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7536   %}
7537 
7538   ins_pipe(ialu_reg);
7539 %}
7540 
7541 // ============================================================================
7542 // Zero Count Instructions
7543 
7544 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7545   match(Set dst (CountLeadingZerosI src));
7546 
7547   ins_cost(INSN_COST);
7548   format %{ "clzw  $dst, $src" %}
7549   ins_encode %{
7550     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7551   %}
7552 
7553   ins_pipe(ialu_reg);
7554 %}
7555 
7556 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7557   match(Set dst (CountLeadingZerosL src));
7558 
7559   ins_cost(INSN_COST);
7560   format %{ "clz   $dst, $src" %}
7561   ins_encode %{
7562     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7563   %}
7564 
7565   ins_pipe(ialu_reg);
7566 %}
7567 
7568 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7569   match(Set dst (CountTrailingZerosI src));
7570 
7571   ins_cost(INSN_COST * 2);
7572   format %{ "rbitw  $dst, $src\n\t"
7573             "clzw   $dst, $dst" %}
7574   ins_encode %{
7575     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7576     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7577   %}
7578 
7579   ins_pipe(ialu_reg);
7580 %}
7581 
7582 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7583   match(Set dst (CountTrailingZerosL src));
7584 
7585   ins_cost(INSN_COST * 2);
7586   format %{ "rbit   $dst, $src\n\t"
7587             "clz    $dst, $dst" %}
7588   ins_encode %{
7589     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7590     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7591   %}
7592 
7593   ins_pipe(ialu_reg);
7594 %}
7595 
7596 //---------- Population Count Instructions -------------------------------------
7597 //
7598 
7599 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7600   predicate(UsePopCountInstruction);
7601   match(Set dst (PopCountI src));
7602   effect(TEMP tmp);
7603   ins_cost(INSN_COST * 13);
7604 
7605   format %{ "movw   $src, $src\n\t"
7606             "mov    $tmp, $src\t# vector (1D)\n\t"
7607             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7608             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7609             "mov    $dst, $tmp\t# vector (1D)" %}
7610   ins_encode %{
7611     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7612     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7613     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7614     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7615     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7616   %}
7617 
7618   ins_pipe(pipe_class_default);
7619 %}
7620 
7621 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7622   predicate(UsePopCountInstruction);
7623   match(Set dst (PopCountI (LoadI mem)));
7624   effect(TEMP tmp);
7625   ins_cost(INSN_COST * 13);
7626 
7627   format %{ "ldrs   $tmp, $mem\n\t"
7628             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7629             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7630             "mov    $dst, $tmp\t# vector (1D)" %}
7631   ins_encode %{
7632     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7633     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7634                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7635     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7636     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7637     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7638   %}
7639 
7640   ins_pipe(pipe_class_default);
7641 %}
7642 
7643 // Note: Long.bitCount(long) returns an int.
7644 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7645   predicate(UsePopCountInstruction);
7646   match(Set dst (PopCountL src));
7647   effect(TEMP tmp);
7648   ins_cost(INSN_COST * 13);
7649 
7650   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7651             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7652             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7653             "mov    $dst, $tmp\t# vector (1D)" %}
7654   ins_encode %{
7655     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7656     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7657     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7658     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7659   %}
7660 
7661   ins_pipe(pipe_class_default);
7662 %}
7663 
7664 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7665   predicate(UsePopCountInstruction);
7666   match(Set dst (PopCountL (LoadL mem)));
7667   effect(TEMP tmp);
7668   ins_cost(INSN_COST * 13);
7669 
7670   format %{ "ldrd   $tmp, $mem\n\t"
7671             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7672             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7673             "mov    $dst, $tmp\t# vector (1D)" %}
7674   ins_encode %{
7675     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7676     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7677                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7678     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7679     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7680     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7681   %}
7682 
7683   ins_pipe(pipe_class_default);
7684 %}
7685 
7686 // ============================================================================
7687 // MemBar Instruction
7688 
7689 instruct load_fence() %{
7690   match(LoadFence);
7691   ins_cost(VOLATILE_REF_COST);
7692 
7693   format %{ "load_fence" %}
7694 
7695   ins_encode %{
7696     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7697   %}
7698   ins_pipe(pipe_serial);
7699 %}
7700 
7701 instruct unnecessary_membar_acquire() %{
7702   predicate(unnecessary_acquire(n));
7703   match(MemBarAcquire);
7704   ins_cost(0);
7705 
7706   format %{ "membar_acquire (elided)" %}
7707 
7708   ins_encode %{
7709     __ block_comment("membar_acquire (elided)");
7710   %}
7711 
7712   ins_pipe(pipe_class_empty);
7713 %}
7714 
7715 instruct membar_acquire() %{
7716   match(MemBarAcquire);
7717   ins_cost(VOLATILE_REF_COST);
7718 
7719   format %{ "membar_acquire\n\t"
7720             "dmb ish" %}
7721 
7722   ins_encode %{
7723     __ block_comment("membar_acquire");
7724     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7725   %}
7726 
7727   ins_pipe(pipe_serial);
7728 %}
7729 
7730 
7731 instruct membar_acquire_lock() %{
7732   match(MemBarAcquireLock);
7733   ins_cost(VOLATILE_REF_COST);
7734 
7735   format %{ "membar_acquire_lock (elided)" %}
7736 
7737   ins_encode %{
7738     __ block_comment("membar_acquire_lock (elided)");
7739   %}
7740 
7741   ins_pipe(pipe_serial);
7742 %}
7743 
7744 instruct store_fence() %{
7745   match(StoreFence);
7746   ins_cost(VOLATILE_REF_COST);
7747 
7748   format %{ "store_fence" %}
7749 
7750   ins_encode %{
7751     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7752   %}
7753   ins_pipe(pipe_serial);
7754 %}
7755 
7756 instruct unnecessary_membar_release() %{
7757   predicate(unnecessary_release(n));
7758   match(MemBarRelease);
7759   ins_cost(0);
7760 
7761   format %{ "membar_release (elided)" %}
7762 
7763   ins_encode %{
7764     __ block_comment("membar_release (elided)");
7765   %}
7766   ins_pipe(pipe_serial);
7767 %}
7768 
7769 instruct membar_release() %{
7770   match(MemBarRelease);
7771   ins_cost(VOLATILE_REF_COST);
7772 
7773   format %{ "membar_release\n\t"
7774             "dmb ish" %}
7775 
7776   ins_encode %{
7777     __ block_comment("membar_release");
7778     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7779   %}
7780   ins_pipe(pipe_serial);
7781 %}
7782 
7783 instruct membar_storestore() %{
7784   match(MemBarStoreStore);
7785   ins_cost(VOLATILE_REF_COST);
7786 
7787   format %{ "MEMBAR-store-store" %}
7788 
7789   ins_encode %{
7790     __ membar(Assembler::StoreStore);
7791   %}
7792   ins_pipe(pipe_serial);
7793 %}
7794 
7795 instruct membar_release_lock() %{
7796   match(MemBarReleaseLock);
7797   ins_cost(VOLATILE_REF_COST);
7798 
7799   format %{ "membar_release_lock (elided)" %}
7800 
7801   ins_encode %{
7802     __ block_comment("membar_release_lock (elided)");
7803   %}
7804 
7805   ins_pipe(pipe_serial);
7806 %}
7807 
7808 instruct unnecessary_membar_volatile() %{
7809   predicate(unnecessary_volatile(n));
7810   match(MemBarVolatile);
7811   ins_cost(0);
7812 
7813   format %{ "membar_volatile (elided)" %}
7814 
7815   ins_encode %{
7816     __ block_comment("membar_volatile (elided)");
7817   %}
7818 
7819   ins_pipe(pipe_serial);
7820 %}
7821 
7822 instruct membar_volatile() %{
7823   match(MemBarVolatile);
7824   ins_cost(VOLATILE_REF_COST*100);
7825 
7826   format %{ "membar_volatile\n\t"
7827              "dmb ish"%}
7828 
7829   ins_encode %{
7830     __ block_comment("membar_volatile");
7831     __ membar(Assembler::StoreLoad);
7832   %}
7833 
7834   ins_pipe(pipe_serial);
7835 %}
7836 
7837 // ============================================================================
7838 // Cast/Convert Instructions
7839 
7840 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7841   match(Set dst (CastX2P src));
7842 
7843   ins_cost(INSN_COST);
7844   format %{ "mov $dst, $src\t# long -> ptr" %}
7845 
7846   ins_encode %{
7847     if ($dst$$reg != $src$$reg) {
7848       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7849     }
7850   %}
7851 
7852   ins_pipe(ialu_reg);
7853 %}
7854 
7855 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7856   match(Set dst (CastP2X src));
7857 
7858   ins_cost(INSN_COST);
7859   format %{ "mov $dst, $src\t# ptr -> long" %}
7860 
7861   ins_encode %{
7862     if ($dst$$reg != $src$$reg) {
7863       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7864     }
7865   %}
7866 
7867   ins_pipe(ialu_reg);
7868 %}
7869 
7870 // Convert oop into int for vectors alignment masking
7871 instruct convP2I(iRegINoSp dst, iRegP src) %{
7872   match(Set dst (ConvL2I (CastP2X src)));
7873 
7874   ins_cost(INSN_COST);
7875   format %{ "movw $dst, $src\t# ptr -> int" %}
7876   ins_encode %{
7877     __ movw($dst$$Register, $src$$Register);
7878   %}
7879 
7880   ins_pipe(ialu_reg);
7881 %}
7882 
7883 // Convert compressed oop into int for vectors alignment masking
7884 // in case of 32bit oops (heap < 4Gb).
7885 instruct convN2I(iRegINoSp dst, iRegN src)
7886 %{
7887   predicate(Universe::narrow_oop_shift() == 0);
7888   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7889 
7890   ins_cost(INSN_COST);
7891   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7892   ins_encode %{
7893     __ movw($dst$$Register, $src$$Register);
7894   %}
7895 
7896   ins_pipe(ialu_reg);
7897 %}
7898 
7899 
7900 // Convert oop pointer into compressed form
7901 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7902   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7903   match(Set dst (EncodeP src));
7904   effect(KILL cr);
7905   ins_cost(INSN_COST * 3);
7906   format %{ "encode_heap_oop $dst, $src" %}
7907   ins_encode %{
7908     Register s = $src$$Register;
7909     Register d = $dst$$Register;
7910     __ encode_heap_oop(d, s);
7911   %}
7912   ins_pipe(ialu_reg);
7913 %}
7914 
7915 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7916   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7917   match(Set dst (EncodeP src));
7918   ins_cost(INSN_COST * 3);
7919   format %{ "encode_heap_oop_not_null $dst, $src" %}
7920   ins_encode %{
7921     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7922   %}
7923   ins_pipe(ialu_reg);
7924 %}
7925 
7926 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7927   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7928             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7929   match(Set dst (DecodeN src));
7930   ins_cost(INSN_COST * 3);
7931   format %{ "decode_heap_oop $dst, $src" %}
7932   ins_encode %{
7933     Register s = $src$$Register;
7934     Register d = $dst$$Register;
7935     __ decode_heap_oop(d, s);
7936   %}
7937   ins_pipe(ialu_reg);
7938 %}
7939 
7940 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7941   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7942             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7943   match(Set dst (DecodeN src));
7944   ins_cost(INSN_COST * 3);
7945   format %{ "decode_heap_oop_not_null $dst, $src" %}
7946   ins_encode %{
7947     Register s = $src$$Register;
7948     Register d = $dst$$Register;
7949     __ decode_heap_oop_not_null(d, s);
7950   %}
7951   ins_pipe(ialu_reg);
7952 %}
7953 
7954 // n.b. AArch64 implementations of encode_klass_not_null and
7955 // decode_klass_not_null do not modify the flags register so, unlike
7956 // Intel, we don't kill CR as a side effect here
7957 
7958 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7959   match(Set dst (EncodePKlass src));
7960 
7961   ins_cost(INSN_COST * 3);
7962   format %{ "encode_klass_not_null $dst,$src" %}
7963 
7964   ins_encode %{
7965     Register src_reg = as_Register($src$$reg);
7966     Register dst_reg = as_Register($dst$$reg);
7967     __ encode_klass_not_null(dst_reg, src_reg);
7968   %}
7969 
7970    ins_pipe(ialu_reg);
7971 %}
7972 
7973 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7974   match(Set dst (DecodeNKlass src));
7975 
7976   ins_cost(INSN_COST * 3);
7977   format %{ "decode_klass_not_null $dst,$src" %}
7978 
7979   ins_encode %{
7980     Register src_reg = as_Register($src$$reg);
7981     Register dst_reg = as_Register($dst$$reg);
7982     if (dst_reg != src_reg) {
7983       __ decode_klass_not_null(dst_reg, src_reg);
7984     } else {
7985       __ decode_klass_not_null(dst_reg);
7986     }
7987   %}
7988 
7989    ins_pipe(ialu_reg);
7990 %}
7991 
7992 instruct checkCastPP(iRegPNoSp dst)
7993 %{
7994   match(Set dst (CheckCastPP dst));
7995 
7996   size(0);
7997   format %{ "# checkcastPP of $dst" %}
7998   ins_encode(/* empty encoding */);
7999   ins_pipe(pipe_class_empty);
8000 %}
8001 
8002 instruct castPP(iRegPNoSp dst)
8003 %{
8004   match(Set dst (CastPP dst));
8005 
8006   size(0);
8007   format %{ "# castPP of $dst" %}
8008   ins_encode(/* empty encoding */);
8009   ins_pipe(pipe_class_empty);
8010 %}
8011 
8012 instruct castII(iRegI dst)
8013 %{
8014   match(Set dst (CastII dst));
8015 
8016   size(0);
8017   format %{ "# castII of $dst" %}
8018   ins_encode(/* empty encoding */);
8019   ins_cost(0);
8020   ins_pipe(pipe_class_empty);
8021 %}
8022 
8023 // ============================================================================
8024 // Atomic operation instructions
8025 //
8026 // Intel and SPARC both implement Ideal Node LoadPLocked and
8027 // Store{PIL}Conditional instructions using a normal load for the
8028 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8029 //
8030 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8031 // pair to lock object allocations from Eden space when not using
8032 // TLABs.
8033 //
8034 // There does not appear to be a Load{IL}Locked Ideal Node and the
8035 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8036 // and to use StoreIConditional only for 32-bit and StoreLConditional
8037 // only for 64-bit.
8038 //
8039 // We implement LoadPLocked and StorePLocked instructions using,
8040 // respectively the AArch64 hw load-exclusive and store-conditional
8041 // instructions. Whereas we must implement each of
8042 // Store{IL}Conditional using a CAS which employs a pair of
8043 // instructions comprising a load-exclusive followed by a
8044 // store-conditional.
8045 
8046 
8047 // Locked-load (linked load) of the current heap-top
8048 // used when updating the eden heap top
8049 // implemented using ldaxr on AArch64
8050 
8051 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8052 %{
8053   match(Set dst (LoadPLocked mem));
8054 
8055   ins_cost(VOLATILE_REF_COST);
8056 
8057   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8058 
8059   ins_encode(aarch64_enc_ldaxr(dst, mem));
8060 
8061   ins_pipe(pipe_serial);
8062 %}
8063 
8064 // Conditional-store of the updated heap-top.
8065 // Used during allocation of the shared heap.
8066 // Sets flag (EQ) on success.
8067 // implemented using stlxr on AArch64.
8068 
8069 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8070 %{
8071   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8072 
8073   ins_cost(VOLATILE_REF_COST);
8074 
8075  // TODO
8076  // do we need to do a store-conditional release or can we just use a
8077  // plain store-conditional?
8078 
8079   format %{
8080     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8081     "cmpw rscratch1, zr\t# EQ on successful write"
8082   %}
8083 
8084   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8085 
8086   ins_pipe(pipe_serial);
8087 %}
8088 
8089 
8090 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8091 // when attempting to rebias a lock towards the current thread.  We
8092 // must use the acquire form of cmpxchg in order to guarantee acquire
8093 // semantics in this case.
8094 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8095 %{
8096   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8097 
8098   ins_cost(VOLATILE_REF_COST);
8099 
8100   format %{
8101     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8102     "cmpw rscratch1, zr\t# EQ on successful write"
8103   %}
8104 
8105   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8106 
8107   ins_pipe(pipe_slow);
8108 %}
8109 
8110 // storeIConditional also has acquire semantics, for no better reason
8111 // than matching storeLConditional.  At the time of writing this
8112 // comment storeIConditional was not used anywhere by AArch64.
8113 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8114 %{
8115   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8116 
8117   ins_cost(VOLATILE_REF_COST);
8118 
8119   format %{
8120     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8121     "cmpw rscratch1, zr\t# EQ on successful write"
8122   %}
8123 
8124   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8125 
8126   ins_pipe(pipe_slow);
8127 %}
8128 
8129 // standard CompareAndSwapX when we are using barriers
8130 // these have higher priority than the rules selected by a predicate
8131 
8132 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8133 // can't match them
8134 
8135 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8136 
8137   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8138   ins_cost(2 * VOLATILE_REF_COST);
8139 
8140   effect(KILL cr);
8141 
8142   format %{
8143     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8144     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8145   %}
8146 
8147   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8148             aarch64_enc_cset_eq(res));
8149 
8150   ins_pipe(pipe_slow);
8151 %}
8152 
8153 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8154 
8155   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8156   ins_cost(2 * VOLATILE_REF_COST);
8157 
8158   effect(KILL cr);
8159 
8160   format %{
8161     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8162     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8163   %}
8164 
8165   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8166             aarch64_enc_cset_eq(res));
8167 
8168   ins_pipe(pipe_slow);
8169 %}
8170 
8171 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8172 
8173   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8174   ins_cost(2 * VOLATILE_REF_COST);
8175 
8176   effect(KILL cr);
8177 
8178  format %{
8179     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8180     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8181  %}
8182 
8183  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8184             aarch64_enc_cset_eq(res));
8185 
8186   ins_pipe(pipe_slow);
8187 %}
8188 
8189 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8190 
8191   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8192   ins_cost(2 * VOLATILE_REF_COST);
8193 
8194   effect(KILL cr);
8195 
8196  format %{
8197     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8198     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8199  %}
8200 
8201  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8202             aarch64_enc_cset_eq(res));
8203 
8204   ins_pipe(pipe_slow);
8205 %}
8206 
8207 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8208 
8209   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8210   ins_cost(2 * VOLATILE_REF_COST);
8211 
8212   effect(KILL cr);
8213 
8214  format %{
8215     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8216     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8217  %}
8218 
8219  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8220             aarch64_enc_cset_eq(res));
8221 
8222   ins_pipe(pipe_slow);
8223 %}
8224 
8225 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8226 
8227   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8228   ins_cost(2 * VOLATILE_REF_COST);
8229 
8230   effect(KILL cr);
8231 
8232  format %{
8233     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8234     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8235  %}
8236 
8237  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8238             aarch64_enc_cset_eq(res));
8239 
8240   ins_pipe(pipe_slow);
8241 %}
8242 
8243 // alternative CompareAndSwapX when we are eliding barriers
8244 
8245 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8246 
8247   predicate(needs_acquiring_load_exclusive(n));
8248   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8249   ins_cost(VOLATILE_REF_COST);
8250 
8251   effect(KILL cr);
8252 
8253  format %{
8254     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8255     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8256  %}
8257 
8258  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8259             aarch64_enc_cset_eq(res));
8260 
8261   ins_pipe(pipe_slow);
8262 %}
8263 
8264 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8265 
8266   predicate(needs_acquiring_load_exclusive(n));
8267   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8268   ins_cost(VOLATILE_REF_COST);
8269 
8270   effect(KILL cr);
8271 
8272  format %{
8273     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8274     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8275  %}
8276 
8277  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8278             aarch64_enc_cset_eq(res));
8279 
8280   ins_pipe(pipe_slow);
8281 %}
8282 
8283 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8284 
8285   predicate(needs_acquiring_load_exclusive(n));
8286   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8287   ins_cost(VOLATILE_REF_COST);
8288 
8289   effect(KILL cr);
8290 
8291  format %{
8292     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8293     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8294  %}
8295 
8296  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8297             aarch64_enc_cset_eq(res));
8298 
8299   ins_pipe(pipe_slow);
8300 %}
8301 
8302 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8303 
8304   predicate(needs_acquiring_load_exclusive(n));
8305   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8306   ins_cost(VOLATILE_REF_COST);
8307 
8308   effect(KILL cr);
8309 
8310  format %{
8311     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8312     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8313  %}
8314 
8315  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8316             aarch64_enc_cset_eq(res));
8317 
8318   ins_pipe(pipe_slow);
8319 %}
8320 
8321 
8322 // ---------------------------------------------------------------------
8323 
8324 
8325 // BEGIN This section of the file is automatically generated. Do not edit --------------
8326 
8327 // Sundry CAS operations.  Note that release is always true,
8328 // regardless of the memory ordering of the CAS.  This is because we
8329 // need the volatile case to be sequentially consistent but there is
8330 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8331 // can't check the type of memory ordering here, so we always emit a
8332 // STLXR.
8333 
8334 // This section is generated from aarch64_ad_cas.m4
8335 
8336 
8337 
8338 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8339   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8340   ins_cost(2 * VOLATILE_REF_COST);
8341   effect(TEMP_DEF res, KILL cr);
8342   format %{
8343     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8344   %}
8345   ins_encode %{
8346     __ uxtbw(rscratch2, $oldval$$Register);
8347     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8348                Assembler::byte, /*acquire*/ false, /*release*/ true,
8349                /*weak*/ false, $res$$Register);
8350     __ sxtbw($res$$Register, $res$$Register);
8351   %}
8352   ins_pipe(pipe_slow);
8353 %}
8354 
8355 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8356   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8357   ins_cost(2 * VOLATILE_REF_COST);
8358   effect(TEMP_DEF res, KILL cr);
8359   format %{
8360     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8361   %}
8362   ins_encode %{
8363     __ uxthw(rscratch2, $oldval$$Register);
8364     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8365                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8366                /*weak*/ false, $res$$Register);
8367     __ sxthw($res$$Register, $res$$Register);
8368   %}
8369   ins_pipe(pipe_slow);
8370 %}
8371 
8372 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8373   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8374   ins_cost(2 * VOLATILE_REF_COST);
8375   effect(TEMP_DEF res, KILL cr);
8376   format %{
8377     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8378   %}
8379   ins_encode %{
8380     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8381                Assembler::word, /*acquire*/ false, /*release*/ true,
8382                /*weak*/ false, $res$$Register);
8383   %}
8384   ins_pipe(pipe_slow);
8385 %}
8386 
8387 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8388   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8389   ins_cost(2 * VOLATILE_REF_COST);
8390   effect(TEMP_DEF res, KILL cr);
8391   format %{
8392     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8393   %}
8394   ins_encode %{
8395     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8396                Assembler::xword, /*acquire*/ false, /*release*/ true,
8397                /*weak*/ false, $res$$Register);
8398   %}
8399   ins_pipe(pipe_slow);
8400 %}
8401 
8402 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8403   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8404   ins_cost(2 * VOLATILE_REF_COST);
8405   effect(TEMP_DEF res, KILL cr);
8406   format %{
8407     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8408   %}
8409   ins_encode %{
8410     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8411                Assembler::word, /*acquire*/ false, /*release*/ true,
8412                /*weak*/ false, $res$$Register);
8413   %}
8414   ins_pipe(pipe_slow);
8415 %}
8416 
8417 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8418   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8419   ins_cost(2 * VOLATILE_REF_COST);
8420   effect(TEMP_DEF res, KILL cr);
8421   format %{
8422     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8423   %}
8424   ins_encode %{
8425     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8426                Assembler::xword, /*acquire*/ false, /*release*/ true,
8427                /*weak*/ false, $res$$Register);
8428   %}
8429   ins_pipe(pipe_slow);
8430 %}
8431 
8432 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8433   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8434   ins_cost(2 * VOLATILE_REF_COST);
8435   effect(KILL cr);
8436   format %{
8437     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8438     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8439   %}
8440   ins_encode %{
8441     __ uxtbw(rscratch2, $oldval$$Register);
8442     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8443                Assembler::byte, /*acquire*/ false, /*release*/ true,
8444                /*weak*/ true, noreg);
8445     __ csetw($res$$Register, Assembler::EQ);
8446   %}
8447   ins_pipe(pipe_slow);
8448 %}
8449 
8450 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8451   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8452   ins_cost(2 * VOLATILE_REF_COST);
8453   effect(KILL cr);
8454   format %{
8455     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8456     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8457   %}
8458   ins_encode %{
8459     __ uxthw(rscratch2, $oldval$$Register);
8460     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8461                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8462                /*weak*/ true, noreg);
8463     __ csetw($res$$Register, Assembler::EQ);
8464   %}
8465   ins_pipe(pipe_slow);
8466 %}
8467 
8468 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8469   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8470   ins_cost(2 * VOLATILE_REF_COST);
8471   effect(KILL cr);
8472   format %{
8473     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8474     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8475   %}
8476   ins_encode %{
8477     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8478                Assembler::word, /*acquire*/ false, /*release*/ true,
8479                /*weak*/ true, noreg);
8480     __ csetw($res$$Register, Assembler::EQ);
8481   %}
8482   ins_pipe(pipe_slow);
8483 %}
8484 
8485 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8486   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8487   ins_cost(2 * VOLATILE_REF_COST);
8488   effect(KILL cr);
8489   format %{
8490     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8491     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8492   %}
8493   ins_encode %{
8494     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8495                Assembler::xword, /*acquire*/ false, /*release*/ true,
8496                /*weak*/ true, noreg);
8497     __ csetw($res$$Register, Assembler::EQ);
8498   %}
8499   ins_pipe(pipe_slow);
8500 %}
8501 
8502 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8503   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8504   ins_cost(2 * VOLATILE_REF_COST);
8505   effect(KILL cr);
8506   format %{
8507     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8508     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8509   %}
8510   ins_encode %{
8511     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8512                Assembler::word, /*acquire*/ false, /*release*/ true,
8513                /*weak*/ true, noreg);
8514     __ csetw($res$$Register, Assembler::EQ);
8515   %}
8516   ins_pipe(pipe_slow);
8517 %}
8518 
8519 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8520   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8521   ins_cost(2 * VOLATILE_REF_COST);
8522   effect(KILL cr);
8523   format %{
8524     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8525     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8526   %}
8527   ins_encode %{
8528     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8529                Assembler::xword, /*acquire*/ false, /*release*/ true,
8530                /*weak*/ true, noreg);
8531     __ csetw($res$$Register, Assembler::EQ);
8532   %}
8533   ins_pipe(pipe_slow);
8534 %}
8535 
8536 // END This section of the file is automatically generated. Do not edit --------------
8537 // ---------------------------------------------------------------------
8538 
8539 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8540   match(Set prev (GetAndSetI mem newv));
8541   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8542   ins_encode %{
8543     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8544   %}
8545   ins_pipe(pipe_serial);
8546 %}
8547 
8548 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8549   match(Set prev (GetAndSetL mem newv));
8550   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8551   ins_encode %{
8552     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8553   %}
8554   ins_pipe(pipe_serial);
8555 %}
8556 
8557 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8558   match(Set prev (GetAndSetN mem newv));
8559   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8560   ins_encode %{
8561     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8562   %}
8563   ins_pipe(pipe_serial);
8564 %}
8565 
8566 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8567   match(Set prev (GetAndSetP mem newv));
8568   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8569   ins_encode %{
8570     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8571   %}
8572   ins_pipe(pipe_serial);
8573 %}
8574 
8575 
8576 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8577   match(Set newval (GetAndAddL mem incr));
8578   ins_cost(INSN_COST * 10);
8579   format %{ "get_and_addL $newval, [$mem], $incr" %}
8580   ins_encode %{
8581     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8582   %}
8583   ins_pipe(pipe_serial);
8584 %}
8585 
8586 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8587   predicate(n->as_LoadStore()->result_not_used());
8588   match(Set dummy (GetAndAddL mem incr));
8589   ins_cost(INSN_COST * 9);
8590   format %{ "get_and_addL [$mem], $incr" %}
8591   ins_encode %{
8592     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8593   %}
8594   ins_pipe(pipe_serial);
8595 %}
8596 
8597 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8598   match(Set newval (GetAndAddL mem incr));
8599   ins_cost(INSN_COST * 10);
8600   format %{ "get_and_addL $newval, [$mem], $incr" %}
8601   ins_encode %{
8602     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8603   %}
8604   ins_pipe(pipe_serial);
8605 %}
8606 
8607 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8608   predicate(n->as_LoadStore()->result_not_used());
8609   match(Set dummy (GetAndAddL mem incr));
8610   ins_cost(INSN_COST * 9);
8611   format %{ "get_and_addL [$mem], $incr" %}
8612   ins_encode %{
8613     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8614   %}
8615   ins_pipe(pipe_serial);
8616 %}
8617 
8618 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8619   match(Set newval (GetAndAddI mem incr));
8620   ins_cost(INSN_COST * 10);
8621   format %{ "get_and_addI $newval, [$mem], $incr" %}
8622   ins_encode %{
8623     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8624   %}
8625   ins_pipe(pipe_serial);
8626 %}
8627 
8628 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8629   predicate(n->as_LoadStore()->result_not_used());
8630   match(Set dummy (GetAndAddI mem incr));
8631   ins_cost(INSN_COST * 9);
8632   format %{ "get_and_addI [$mem], $incr" %}
8633   ins_encode %{
8634     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8635   %}
8636   ins_pipe(pipe_serial);
8637 %}
8638 
8639 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8640   match(Set newval (GetAndAddI mem incr));
8641   ins_cost(INSN_COST * 10);
8642   format %{ "get_and_addI $newval, [$mem], $incr" %}
8643   ins_encode %{
8644     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8645   %}
8646   ins_pipe(pipe_serial);
8647 %}
8648 
8649 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8650   predicate(n->as_LoadStore()->result_not_used());
8651   match(Set dummy (GetAndAddI mem incr));
8652   ins_cost(INSN_COST * 9);
8653   format %{ "get_and_addI [$mem], $incr" %}
8654   ins_encode %{
8655     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8656   %}
8657   ins_pipe(pipe_serial);
8658 %}
8659 
8660 // Manifest a CmpL result in an integer register.
8661 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8662 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8663 %{
8664   match(Set dst (CmpL3 src1 src2));
8665   effect(KILL flags);
8666 
8667   ins_cost(INSN_COST * 6);
8668   format %{
8669       "cmp $src1, $src2"
8670       "csetw $dst, ne"
8671       "cnegw $dst, lt"
8672   %}
8673   // format %{ "CmpL3 $dst, $src1, $src2" %}
8674   ins_encode %{
8675     __ cmp($src1$$Register, $src2$$Register);
8676     __ csetw($dst$$Register, Assembler::NE);
8677     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8678   %}
8679 
8680   ins_pipe(pipe_class_default);
8681 %}
8682 
8683 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8684 %{
8685   match(Set dst (CmpL3 src1 src2));
8686   effect(KILL flags);
8687 
8688   ins_cost(INSN_COST * 6);
8689   format %{
8690       "cmp $src1, $src2"
8691       "csetw $dst, ne"
8692       "cnegw $dst, lt"
8693   %}
8694   ins_encode %{
8695     int32_t con = (int32_t)$src2$$constant;
8696      if (con < 0) {
8697       __ adds(zr, $src1$$Register, -con);
8698     } else {
8699       __ subs(zr, $src1$$Register, con);
8700     }
8701     __ csetw($dst$$Register, Assembler::NE);
8702     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8703   %}
8704 
8705   ins_pipe(pipe_class_default);
8706 %}
8707 
8708 // ============================================================================
8709 // Conditional Move Instructions
8710 
8711 // n.b. we have identical rules for both a signed compare op (cmpOp)
8712 // and an unsigned compare op (cmpOpU). it would be nice if we could
8713 // define an op class which merged both inputs and use it to type the
8714 // argument to a single rule. unfortunatelyt his fails because the
8715 // opclass does not live up to the COND_INTER interface of its
8716 // component operands. When the generic code tries to negate the
8717 // operand it ends up running the generci Machoper::negate method
8718 // which throws a ShouldNotHappen. So, we have to provide two flavours
8719 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8720 
8721 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8722   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8723 
8724   ins_cost(INSN_COST * 2);
8725   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8726 
8727   ins_encode %{
8728     __ cselw(as_Register($dst$$reg),
8729              as_Register($src2$$reg),
8730              as_Register($src1$$reg),
8731              (Assembler::Condition)$cmp$$cmpcode);
8732   %}
8733 
8734   ins_pipe(icond_reg_reg);
8735 %}
8736 
8737 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8738   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8739 
8740   ins_cost(INSN_COST * 2);
8741   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8742 
8743   ins_encode %{
8744     __ cselw(as_Register($dst$$reg),
8745              as_Register($src2$$reg),
8746              as_Register($src1$$reg),
8747              (Assembler::Condition)$cmp$$cmpcode);
8748   %}
8749 
8750   ins_pipe(icond_reg_reg);
8751 %}
8752 
8753 // special cases where one arg is zero
8754 
8755 // n.b. this is selected in preference to the rule above because it
8756 // avoids loading constant 0 into a source register
8757 
8758 // TODO
8759 // we ought only to be able to cull one of these variants as the ideal
8760 // transforms ought always to order the zero consistently (to left/right?)
8761 
8762 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8763   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8764 
8765   ins_cost(INSN_COST * 2);
8766   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8767 
8768   ins_encode %{
8769     __ cselw(as_Register($dst$$reg),
8770              as_Register($src$$reg),
8771              zr,
8772              (Assembler::Condition)$cmp$$cmpcode);
8773   %}
8774 
8775   ins_pipe(icond_reg);
8776 %}
8777 
8778 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8779   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8780 
8781   ins_cost(INSN_COST * 2);
8782   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8783 
8784   ins_encode %{
8785     __ cselw(as_Register($dst$$reg),
8786              as_Register($src$$reg),
8787              zr,
8788              (Assembler::Condition)$cmp$$cmpcode);
8789   %}
8790 
8791   ins_pipe(icond_reg);
8792 %}
8793 
8794 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8795   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8796 
8797   ins_cost(INSN_COST * 2);
8798   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8799 
8800   ins_encode %{
8801     __ cselw(as_Register($dst$$reg),
8802              zr,
8803              as_Register($src$$reg),
8804              (Assembler::Condition)$cmp$$cmpcode);
8805   %}
8806 
8807   ins_pipe(icond_reg);
8808 %}
8809 
8810 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8811   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8812 
8813   ins_cost(INSN_COST * 2);
8814   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8815 
8816   ins_encode %{
8817     __ cselw(as_Register($dst$$reg),
8818              zr,
8819              as_Register($src$$reg),
8820              (Assembler::Condition)$cmp$$cmpcode);
8821   %}
8822 
8823   ins_pipe(icond_reg);
8824 %}
8825 
8826 // special case for creating a boolean 0 or 1
8827 
8828 // n.b. this is selected in preference to the rule above because it
8829 // avoids loading constants 0 and 1 into a source register
8830 
8831 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8832   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8833 
8834   ins_cost(INSN_COST * 2);
8835   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8836 
8837   ins_encode %{
8838     // equivalently
8839     // cset(as_Register($dst$$reg),
8840     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8841     __ csincw(as_Register($dst$$reg),
8842              zr,
8843              zr,
8844              (Assembler::Condition)$cmp$$cmpcode);
8845   %}
8846 
8847   ins_pipe(icond_none);
8848 %}
8849 
8850 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8851   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8852 
8853   ins_cost(INSN_COST * 2);
8854   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8855 
8856   ins_encode %{
8857     // equivalently
8858     // cset(as_Register($dst$$reg),
8859     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8860     __ csincw(as_Register($dst$$reg),
8861              zr,
8862              zr,
8863              (Assembler::Condition)$cmp$$cmpcode);
8864   %}
8865 
8866   ins_pipe(icond_none);
8867 %}
8868 
8869 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8870   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8871 
8872   ins_cost(INSN_COST * 2);
8873   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8874 
8875   ins_encode %{
8876     __ csel(as_Register($dst$$reg),
8877             as_Register($src2$$reg),
8878             as_Register($src1$$reg),
8879             (Assembler::Condition)$cmp$$cmpcode);
8880   %}
8881 
8882   ins_pipe(icond_reg_reg);
8883 %}
8884 
8885 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8886   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8887 
8888   ins_cost(INSN_COST * 2);
8889   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8890 
8891   ins_encode %{
8892     __ csel(as_Register($dst$$reg),
8893             as_Register($src2$$reg),
8894             as_Register($src1$$reg),
8895             (Assembler::Condition)$cmp$$cmpcode);
8896   %}
8897 
8898   ins_pipe(icond_reg_reg);
8899 %}
8900 
8901 // special cases where one arg is zero
8902 
8903 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8904   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8905 
8906   ins_cost(INSN_COST * 2);
8907   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8908 
8909   ins_encode %{
8910     __ csel(as_Register($dst$$reg),
8911             zr,
8912             as_Register($src$$reg),
8913             (Assembler::Condition)$cmp$$cmpcode);
8914   %}
8915 
8916   ins_pipe(icond_reg);
8917 %}
8918 
8919 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8920   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8921 
8922   ins_cost(INSN_COST * 2);
8923   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8924 
8925   ins_encode %{
8926     __ csel(as_Register($dst$$reg),
8927             zr,
8928             as_Register($src$$reg),
8929             (Assembler::Condition)$cmp$$cmpcode);
8930   %}
8931 
8932   ins_pipe(icond_reg);
8933 %}
8934 
8935 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8936   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8937 
8938   ins_cost(INSN_COST * 2);
8939   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8940 
8941   ins_encode %{
8942     __ csel(as_Register($dst$$reg),
8943             as_Register($src$$reg),
8944             zr,
8945             (Assembler::Condition)$cmp$$cmpcode);
8946   %}
8947 
8948   ins_pipe(icond_reg);
8949 %}
8950 
8951 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8952   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8953 
8954   ins_cost(INSN_COST * 2);
8955   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8956 
8957   ins_encode %{
8958     __ csel(as_Register($dst$$reg),
8959             as_Register($src$$reg),
8960             zr,
8961             (Assembler::Condition)$cmp$$cmpcode);
8962   %}
8963 
8964   ins_pipe(icond_reg);
8965 %}
8966 
8967 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8968   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8969 
8970   ins_cost(INSN_COST * 2);
8971   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8972 
8973   ins_encode %{
8974     __ csel(as_Register($dst$$reg),
8975             as_Register($src2$$reg),
8976             as_Register($src1$$reg),
8977             (Assembler::Condition)$cmp$$cmpcode);
8978   %}
8979 
8980   ins_pipe(icond_reg_reg);
8981 %}
8982 
8983 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8984   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8985 
8986   ins_cost(INSN_COST * 2);
8987   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8988 
8989   ins_encode %{
8990     __ csel(as_Register($dst$$reg),
8991             as_Register($src2$$reg),
8992             as_Register($src1$$reg),
8993             (Assembler::Condition)$cmp$$cmpcode);
8994   %}
8995 
8996   ins_pipe(icond_reg_reg);
8997 %}
8998 
8999 // special cases where one arg is zero
9000 
9001 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9002   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9003 
9004   ins_cost(INSN_COST * 2);
9005   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9006 
9007   ins_encode %{
9008     __ csel(as_Register($dst$$reg),
9009             zr,
9010             as_Register($src$$reg),
9011             (Assembler::Condition)$cmp$$cmpcode);
9012   %}
9013 
9014   ins_pipe(icond_reg);
9015 %}
9016 
9017 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9018   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9019 
9020   ins_cost(INSN_COST * 2);
9021   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9022 
9023   ins_encode %{
9024     __ csel(as_Register($dst$$reg),
9025             zr,
9026             as_Register($src$$reg),
9027             (Assembler::Condition)$cmp$$cmpcode);
9028   %}
9029 
9030   ins_pipe(icond_reg);
9031 %}
9032 
9033 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9034   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9035 
9036   ins_cost(INSN_COST * 2);
9037   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9038 
9039   ins_encode %{
9040     __ csel(as_Register($dst$$reg),
9041             as_Register($src$$reg),
9042             zr,
9043             (Assembler::Condition)$cmp$$cmpcode);
9044   %}
9045 
9046   ins_pipe(icond_reg);
9047 %}
9048 
9049 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9050   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9051 
9052   ins_cost(INSN_COST * 2);
9053   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9054 
9055   ins_encode %{
9056     __ csel(as_Register($dst$$reg),
9057             as_Register($src$$reg),
9058             zr,
9059             (Assembler::Condition)$cmp$$cmpcode);
9060   %}
9061 
9062   ins_pipe(icond_reg);
9063 %}
9064 
9065 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9066   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9067 
9068   ins_cost(INSN_COST * 2);
9069   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9070 
9071   ins_encode %{
9072     __ cselw(as_Register($dst$$reg),
9073              as_Register($src2$$reg),
9074              as_Register($src1$$reg),
9075              (Assembler::Condition)$cmp$$cmpcode);
9076   %}
9077 
9078   ins_pipe(icond_reg_reg);
9079 %}
9080 
9081 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9082   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9083 
9084   ins_cost(INSN_COST * 2);
9085   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9086 
9087   ins_encode %{
9088     __ cselw(as_Register($dst$$reg),
9089              as_Register($src2$$reg),
9090              as_Register($src1$$reg),
9091              (Assembler::Condition)$cmp$$cmpcode);
9092   %}
9093 
9094   ins_pipe(icond_reg_reg);
9095 %}
9096 
9097 // special cases where one arg is zero
9098 
9099 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9100   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9101 
9102   ins_cost(INSN_COST * 2);
9103   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9104 
9105   ins_encode %{
9106     __ cselw(as_Register($dst$$reg),
9107              zr,
9108              as_Register($src$$reg),
9109              (Assembler::Condition)$cmp$$cmpcode);
9110   %}
9111 
9112   ins_pipe(icond_reg);
9113 %}
9114 
9115 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9116   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9117 
9118   ins_cost(INSN_COST * 2);
9119   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9120 
9121   ins_encode %{
9122     __ cselw(as_Register($dst$$reg),
9123              zr,
9124              as_Register($src$$reg),
9125              (Assembler::Condition)$cmp$$cmpcode);
9126   %}
9127 
9128   ins_pipe(icond_reg);
9129 %}
9130 
9131 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9132   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9133 
9134   ins_cost(INSN_COST * 2);
9135   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9136 
9137   ins_encode %{
9138     __ cselw(as_Register($dst$$reg),
9139              as_Register($src$$reg),
9140              zr,
9141              (Assembler::Condition)$cmp$$cmpcode);
9142   %}
9143 
9144   ins_pipe(icond_reg);
9145 %}
9146 
9147 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9148   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9149 
9150   ins_cost(INSN_COST * 2);
9151   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9152 
9153   ins_encode %{
9154     __ cselw(as_Register($dst$$reg),
9155              as_Register($src$$reg),
9156              zr,
9157              (Assembler::Condition)$cmp$$cmpcode);
9158   %}
9159 
9160   ins_pipe(icond_reg);
9161 %}
9162 
9163 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9164 %{
9165   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9166 
9167   ins_cost(INSN_COST * 3);
9168 
9169   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9170   ins_encode %{
9171     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9172     __ fcsels(as_FloatRegister($dst$$reg),
9173               as_FloatRegister($src2$$reg),
9174               as_FloatRegister($src1$$reg),
9175               cond);
9176   %}
9177 
9178   ins_pipe(fp_cond_reg_reg_s);
9179 %}
9180 
9181 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9182 %{
9183   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9184 
9185   ins_cost(INSN_COST * 3);
9186 
9187   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9188   ins_encode %{
9189     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9190     __ fcsels(as_FloatRegister($dst$$reg),
9191               as_FloatRegister($src2$$reg),
9192               as_FloatRegister($src1$$reg),
9193               cond);
9194   %}
9195 
9196   ins_pipe(fp_cond_reg_reg_s);
9197 %}
9198 
9199 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9200 %{
9201   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9202 
9203   ins_cost(INSN_COST * 3);
9204 
9205   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9206   ins_encode %{
9207     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9208     __ fcseld(as_FloatRegister($dst$$reg),
9209               as_FloatRegister($src2$$reg),
9210               as_FloatRegister($src1$$reg),
9211               cond);
9212   %}
9213 
9214   ins_pipe(fp_cond_reg_reg_d);
9215 %}
9216 
9217 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9218 %{
9219   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9220 
9221   ins_cost(INSN_COST * 3);
9222 
9223   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9224   ins_encode %{
9225     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9226     __ fcseld(as_FloatRegister($dst$$reg),
9227               as_FloatRegister($src2$$reg),
9228               as_FloatRegister($src1$$reg),
9229               cond);
9230   %}
9231 
9232   ins_pipe(fp_cond_reg_reg_d);
9233 %}
9234 
9235 // ============================================================================
9236 // Arithmetic Instructions
9237 //
9238 
9239 // Integer Addition
9240 
9241 // TODO
9242 // these currently employ operations which do not set CR and hence are
9243 // not flagged as killing CR but we would like to isolate the cases
9244 // where we want to set flags from those where we don't. need to work
9245 // out how to do that.
9246 
9247 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9248   match(Set dst (AddI src1 src2));
9249 
9250   ins_cost(INSN_COST);
9251   format %{ "addw  $dst, $src1, $src2" %}
9252 
9253   ins_encode %{
9254     __ addw(as_Register($dst$$reg),
9255             as_Register($src1$$reg),
9256             as_Register($src2$$reg));
9257   %}
9258 
9259   ins_pipe(ialu_reg_reg);
9260 %}
9261 
9262 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9263   match(Set dst (AddI src1 src2));
9264 
9265   ins_cost(INSN_COST);
9266   format %{ "addw $dst, $src1, $src2" %}
9267 
9268   // use opcode to indicate that this is an add not a sub
9269   opcode(0x0);
9270 
9271   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9272 
9273   ins_pipe(ialu_reg_imm);
9274 %}
9275 
9276 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9277   match(Set dst (AddI (ConvL2I src1) src2));
9278 
9279   ins_cost(INSN_COST);
9280   format %{ "addw $dst, $src1, $src2" %}
9281 
9282   // use opcode to indicate that this is an add not a sub
9283   opcode(0x0);
9284 
9285   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9286 
9287   ins_pipe(ialu_reg_imm);
9288 %}
9289 
9290 // Pointer Addition
9291 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9292   match(Set dst (AddP src1 src2));
9293 
9294   ins_cost(INSN_COST);
9295   format %{ "add $dst, $src1, $src2\t# ptr" %}
9296 
9297   ins_encode %{
9298     __ add(as_Register($dst$$reg),
9299            as_Register($src1$$reg),
9300            as_Register($src2$$reg));
9301   %}
9302 
9303   ins_pipe(ialu_reg_reg);
9304 %}
9305 
9306 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9307   match(Set dst (AddP src1 (ConvI2L src2)));
9308 
9309   ins_cost(1.9 * INSN_COST);
9310   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9311 
9312   ins_encode %{
9313     __ add(as_Register($dst$$reg),
9314            as_Register($src1$$reg),
9315            as_Register($src2$$reg), ext::sxtw);
9316   %}
9317 
9318   ins_pipe(ialu_reg_reg);
9319 %}
9320 
9321 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9322   match(Set dst (AddP src1 (LShiftL src2 scale)));
9323 
9324   ins_cost(1.9 * INSN_COST);
9325   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9326 
9327   ins_encode %{
9328     __ lea(as_Register($dst$$reg),
9329            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9330                    Address::lsl($scale$$constant)));
9331   %}
9332 
9333   ins_pipe(ialu_reg_reg_shift);
9334 %}
9335 
9336 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9337   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9338 
9339   ins_cost(1.9 * INSN_COST);
9340   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9341 
9342   ins_encode %{
9343     __ lea(as_Register($dst$$reg),
9344            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9345                    Address::sxtw($scale$$constant)));
9346   %}
9347 
9348   ins_pipe(ialu_reg_reg_shift);
9349 %}
9350 
9351 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9352   match(Set dst (LShiftL (ConvI2L src) scale));
9353 
9354   ins_cost(INSN_COST);
9355   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9356 
9357   ins_encode %{
9358     __ sbfiz(as_Register($dst$$reg),
9359           as_Register($src$$reg),
9360           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9361   %}
9362 
9363   ins_pipe(ialu_reg_shift);
9364 %}
9365 
9366 // Pointer Immediate Addition
9367 // n.b. this needs to be more expensive than using an indirect memory
9368 // operand
9369 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9370   match(Set dst (AddP src1 src2));
9371 
9372   ins_cost(INSN_COST);
9373   format %{ "add $dst, $src1, $src2\t# ptr" %}
9374 
9375   // use opcode to indicate that this is an add not a sub
9376   opcode(0x0);
9377 
9378   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9379 
9380   ins_pipe(ialu_reg_imm);
9381 %}
9382 
9383 // Long Addition
9384 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9385 
9386   match(Set dst (AddL src1 src2));
9387 
9388   ins_cost(INSN_COST);
9389   format %{ "add  $dst, $src1, $src2" %}
9390 
9391   ins_encode %{
9392     __ add(as_Register($dst$$reg),
9393            as_Register($src1$$reg),
9394            as_Register($src2$$reg));
9395   %}
9396 
9397   ins_pipe(ialu_reg_reg);
9398 %}
9399 
9400 // No constant pool entries requiredLong Immediate Addition.
9401 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9402   match(Set dst (AddL src1 src2));
9403 
9404   ins_cost(INSN_COST);
9405   format %{ "add $dst, $src1, $src2" %}
9406 
9407   // use opcode to indicate that this is an add not a sub
9408   opcode(0x0);
9409 
9410   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9411 
9412   ins_pipe(ialu_reg_imm);
9413 %}
9414 
9415 // Integer Subtraction
9416 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9417   match(Set dst (SubI src1 src2));
9418 
9419   ins_cost(INSN_COST);
9420   format %{ "subw  $dst, $src1, $src2" %}
9421 
9422   ins_encode %{
9423     __ subw(as_Register($dst$$reg),
9424             as_Register($src1$$reg),
9425             as_Register($src2$$reg));
9426   %}
9427 
9428   ins_pipe(ialu_reg_reg);
9429 %}
9430 
9431 // Immediate Subtraction
9432 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9433   match(Set dst (SubI src1 src2));
9434 
9435   ins_cost(INSN_COST);
9436   format %{ "subw $dst, $src1, $src2" %}
9437 
9438   // use opcode to indicate that this is a sub not an add
9439   opcode(0x1);
9440 
9441   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9442 
9443   ins_pipe(ialu_reg_imm);
9444 %}
9445 
9446 // Long Subtraction
9447 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9448 
9449   match(Set dst (SubL src1 src2));
9450 
9451   ins_cost(INSN_COST);
9452   format %{ "sub  $dst, $src1, $src2" %}
9453 
9454   ins_encode %{
9455     __ sub(as_Register($dst$$reg),
9456            as_Register($src1$$reg),
9457            as_Register($src2$$reg));
9458   %}
9459 
9460   ins_pipe(ialu_reg_reg);
9461 %}
9462 
9463 // No constant pool entries requiredLong Immediate Subtraction.
9464 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9465   match(Set dst (SubL src1 src2));
9466 
9467   ins_cost(INSN_COST);
9468   format %{ "sub$dst, $src1, $src2" %}
9469 
9470   // use opcode to indicate that this is a sub not an add
9471   opcode(0x1);
9472 
9473   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9474 
9475   ins_pipe(ialu_reg_imm);
9476 %}
9477 
9478 // Integer Negation (special case for sub)
9479 
9480 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9481   match(Set dst (SubI zero src));
9482 
9483   ins_cost(INSN_COST);
9484   format %{ "negw $dst, $src\t# int" %}
9485 
9486   ins_encode %{
9487     __ negw(as_Register($dst$$reg),
9488             as_Register($src$$reg));
9489   %}
9490 
9491   ins_pipe(ialu_reg);
9492 %}
9493 
9494 // Long Negation
9495 
9496 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9497   match(Set dst (SubL zero src));
9498 
9499   ins_cost(INSN_COST);
9500   format %{ "neg $dst, $src\t# long" %}
9501 
9502   ins_encode %{
9503     __ neg(as_Register($dst$$reg),
9504            as_Register($src$$reg));
9505   %}
9506 
9507   ins_pipe(ialu_reg);
9508 %}
9509 
9510 // Integer Multiply
9511 
9512 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9513   match(Set dst (MulI src1 src2));
9514 
9515   ins_cost(INSN_COST * 3);
9516   format %{ "mulw  $dst, $src1, $src2" %}
9517 
9518   ins_encode %{
9519     __ mulw(as_Register($dst$$reg),
9520             as_Register($src1$$reg),
9521             as_Register($src2$$reg));
9522   %}
9523 
9524   ins_pipe(imul_reg_reg);
9525 %}
9526 
9527 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9528   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9529 
9530   ins_cost(INSN_COST * 3);
9531   format %{ "smull  $dst, $src1, $src2" %}
9532 
9533   ins_encode %{
9534     __ smull(as_Register($dst$$reg),
9535              as_Register($src1$$reg),
9536              as_Register($src2$$reg));
9537   %}
9538 
9539   ins_pipe(imul_reg_reg);
9540 %}
9541 
9542 // Long Multiply
9543 
9544 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9545   match(Set dst (MulL src1 src2));
9546 
9547   ins_cost(INSN_COST * 5);
9548   format %{ "mul  $dst, $src1, $src2" %}
9549 
9550   ins_encode %{
9551     __ mul(as_Register($dst$$reg),
9552            as_Register($src1$$reg),
9553            as_Register($src2$$reg));
9554   %}
9555 
9556   ins_pipe(lmul_reg_reg);
9557 %}
9558 
9559 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9560 %{
9561   match(Set dst (MulHiL src1 src2));
9562 
9563   ins_cost(INSN_COST * 7);
9564   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9565 
9566   ins_encode %{
9567     __ smulh(as_Register($dst$$reg),
9568              as_Register($src1$$reg),
9569              as_Register($src2$$reg));
9570   %}
9571 
9572   ins_pipe(lmul_reg_reg);
9573 %}
9574 
9575 // Combined Integer Multiply & Add/Sub
9576 
9577 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9578   match(Set dst (AddI src3 (MulI src1 src2)));
9579 
9580   ins_cost(INSN_COST * 3);
9581   format %{ "madd  $dst, $src1, $src2, $src3" %}
9582 
9583   ins_encode %{
9584     __ maddw(as_Register($dst$$reg),
9585              as_Register($src1$$reg),
9586              as_Register($src2$$reg),
9587              as_Register($src3$$reg));
9588   %}
9589 
9590   ins_pipe(imac_reg_reg);
9591 %}
9592 
9593 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9594   match(Set dst (SubI src3 (MulI src1 src2)));
9595 
9596   ins_cost(INSN_COST * 3);
9597   format %{ "msub  $dst, $src1, $src2, $src3" %}
9598 
9599   ins_encode %{
9600     __ msubw(as_Register($dst$$reg),
9601              as_Register($src1$$reg),
9602              as_Register($src2$$reg),
9603              as_Register($src3$$reg));
9604   %}
9605 
9606   ins_pipe(imac_reg_reg);
9607 %}
9608 
9609 // Combined Integer Multiply & Neg
9610 
9611 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
9612   match(Set dst (MulI (SubI zero src1) src2));
9613   match(Set dst (MulI src1 (SubI zero src2)));
9614 
9615   ins_cost(INSN_COST * 3);
9616   format %{ "mneg  $dst, $src1, $src2" %}
9617 
9618   ins_encode %{
9619     __ mnegw(as_Register($dst$$reg),
9620              as_Register($src1$$reg),
9621              as_Register($src2$$reg));
9622   %}
9623 
9624   ins_pipe(imac_reg_reg);
9625 %}
9626 
9627 // Combined Long Multiply & Add/Sub
9628 
9629 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9630   match(Set dst (AddL src3 (MulL src1 src2)));
9631 
9632   ins_cost(INSN_COST * 5);
9633   format %{ "madd  $dst, $src1, $src2, $src3" %}
9634 
9635   ins_encode %{
9636     __ madd(as_Register($dst$$reg),
9637             as_Register($src1$$reg),
9638             as_Register($src2$$reg),
9639             as_Register($src3$$reg));
9640   %}
9641 
9642   ins_pipe(lmac_reg_reg);
9643 %}
9644 
9645 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9646   match(Set dst (SubL src3 (MulL src1 src2)));
9647 
9648   ins_cost(INSN_COST * 5);
9649   format %{ "msub  $dst, $src1, $src2, $src3" %}
9650 
9651   ins_encode %{
9652     __ msub(as_Register($dst$$reg),
9653             as_Register($src1$$reg),
9654             as_Register($src2$$reg),
9655             as_Register($src3$$reg));
9656   %}
9657 
9658   ins_pipe(lmac_reg_reg);
9659 %}
9660 
9661 // Combined Long Multiply & Neg
9662 
9663 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
9664   match(Set dst (MulL (SubL zero src1) src2));
9665   match(Set dst (MulL src1 (SubL zero src2)));
9666 
9667   ins_cost(INSN_COST * 5);
9668   format %{ "mneg  $dst, $src1, $src2" %}
9669 
9670   ins_encode %{
9671     __ mneg(as_Register($dst$$reg),
9672             as_Register($src1$$reg),
9673             as_Register($src2$$reg));
9674   %}
9675 
9676   ins_pipe(lmac_reg_reg);
9677 %}
9678 
9679 // Integer Divide
9680 
9681 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9682   match(Set dst (DivI src1 src2));
9683 
9684   ins_cost(INSN_COST * 19);
9685   format %{ "sdivw  $dst, $src1, $src2" %}
9686 
9687   ins_encode(aarch64_enc_divw(dst, src1, src2));
9688   ins_pipe(idiv_reg_reg);
9689 %}
9690 
9691 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9692   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9693   ins_cost(INSN_COST);
9694   format %{ "lsrw $dst, $src1, $div1" %}
9695   ins_encode %{
9696     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9697   %}
9698   ins_pipe(ialu_reg_shift);
9699 %}
9700 
9701 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9702   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9703   ins_cost(INSN_COST);
9704   format %{ "addw $dst, $src, LSR $div1" %}
9705 
9706   ins_encode %{
9707     __ addw(as_Register($dst$$reg),
9708               as_Register($src$$reg),
9709               as_Register($src$$reg),
9710               Assembler::LSR, 31);
9711   %}
9712   ins_pipe(ialu_reg);
9713 %}
9714 
9715 // Long Divide
9716 
9717 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9718   match(Set dst (DivL src1 src2));
9719 
9720   ins_cost(INSN_COST * 35);
9721   format %{ "sdiv   $dst, $src1, $src2" %}
9722 
9723   ins_encode(aarch64_enc_div(dst, src1, src2));
9724   ins_pipe(ldiv_reg_reg);
9725 %}
9726 
9727 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
9728   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9729   ins_cost(INSN_COST);
9730   format %{ "lsr $dst, $src1, $div1" %}
9731   ins_encode %{
9732     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9733   %}
9734   ins_pipe(ialu_reg_shift);
9735 %}
9736 
9737 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
9738   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9739   ins_cost(INSN_COST);
9740   format %{ "add $dst, $src, $div1" %}
9741 
9742   ins_encode %{
9743     __ add(as_Register($dst$$reg),
9744               as_Register($src$$reg),
9745               as_Register($src$$reg),
9746               Assembler::LSR, 63);
9747   %}
9748   ins_pipe(ialu_reg);
9749 %}
9750 
9751 // Integer Remainder
9752 
9753 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9754   match(Set dst (ModI src1 src2));
9755 
9756   ins_cost(INSN_COST * 22);
9757   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9758             "msubw($dst, rscratch1, $src2, $src1" %}
9759 
9760   ins_encode(aarch64_enc_modw(dst, src1, src2));
9761   ins_pipe(idiv_reg_reg);
9762 %}
9763 
9764 // Long Remainder
9765 
9766 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9767   match(Set dst (ModL src1 src2));
9768 
9769   ins_cost(INSN_COST * 38);
9770   format %{ "sdiv   rscratch1, $src1, $src2\n"
9771             "msub($dst, rscratch1, $src2, $src1" %}
9772 
9773   ins_encode(aarch64_enc_mod(dst, src1, src2));
9774   ins_pipe(ldiv_reg_reg);
9775 %}
9776 
9777 // Integer Shifts
9778 
9779 // Shift Left Register
9780 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9781   match(Set dst (LShiftI src1 src2));
9782 
9783   ins_cost(INSN_COST * 2);
9784   format %{ "lslvw  $dst, $src1, $src2" %}
9785 
9786   ins_encode %{
9787     __ lslvw(as_Register($dst$$reg),
9788              as_Register($src1$$reg),
9789              as_Register($src2$$reg));
9790   %}
9791 
9792   ins_pipe(ialu_reg_reg_vshift);
9793 %}
9794 
9795 // Shift Left Immediate
9796 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9797   match(Set dst (LShiftI src1 src2));
9798 
9799   ins_cost(INSN_COST);
9800   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9801 
9802   ins_encode %{
9803     __ lslw(as_Register($dst$$reg),
9804             as_Register($src1$$reg),
9805             $src2$$constant & 0x1f);
9806   %}
9807 
9808   ins_pipe(ialu_reg_shift);
9809 %}
9810 
9811 // Shift Right Logical Register
9812 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9813   match(Set dst (URShiftI src1 src2));
9814 
9815   ins_cost(INSN_COST * 2);
9816   format %{ "lsrvw  $dst, $src1, $src2" %}
9817 
9818   ins_encode %{
9819     __ lsrvw(as_Register($dst$$reg),
9820              as_Register($src1$$reg),
9821              as_Register($src2$$reg));
9822   %}
9823 
9824   ins_pipe(ialu_reg_reg_vshift);
9825 %}
9826 
9827 // Shift Right Logical Immediate
9828 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9829   match(Set dst (URShiftI src1 src2));
9830 
9831   ins_cost(INSN_COST);
9832   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9833 
9834   ins_encode %{
9835     __ lsrw(as_Register($dst$$reg),
9836             as_Register($src1$$reg),
9837             $src2$$constant & 0x1f);
9838   %}
9839 
9840   ins_pipe(ialu_reg_shift);
9841 %}
9842 
9843 // Shift Right Arithmetic Register
9844 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9845   match(Set dst (RShiftI src1 src2));
9846 
9847   ins_cost(INSN_COST * 2);
9848   format %{ "asrvw  $dst, $src1, $src2" %}
9849 
9850   ins_encode %{
9851     __ asrvw(as_Register($dst$$reg),
9852              as_Register($src1$$reg),
9853              as_Register($src2$$reg));
9854   %}
9855 
9856   ins_pipe(ialu_reg_reg_vshift);
9857 %}
9858 
9859 // Shift Right Arithmetic Immediate
9860 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9861   match(Set dst (RShiftI src1 src2));
9862 
9863   ins_cost(INSN_COST);
9864   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9865 
9866   ins_encode %{
9867     __ asrw(as_Register($dst$$reg),
9868             as_Register($src1$$reg),
9869             $src2$$constant & 0x1f);
9870   %}
9871 
9872   ins_pipe(ialu_reg_shift);
9873 %}
9874 
9875 // Combined Int Mask and Right Shift (using UBFM)
9876 // TODO
9877 
9878 // Long Shifts
9879 
9880 // Shift Left Register
9881 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9882   match(Set dst (LShiftL src1 src2));
9883 
9884   ins_cost(INSN_COST * 2);
9885   format %{ "lslv  $dst, $src1, $src2" %}
9886 
9887   ins_encode %{
9888     __ lslv(as_Register($dst$$reg),
9889             as_Register($src1$$reg),
9890             as_Register($src2$$reg));
9891   %}
9892 
9893   ins_pipe(ialu_reg_reg_vshift);
9894 %}
9895 
9896 // Shift Left Immediate
9897 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9898   match(Set dst (LShiftL src1 src2));
9899 
9900   ins_cost(INSN_COST);
9901   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9902 
9903   ins_encode %{
9904     __ lsl(as_Register($dst$$reg),
9905             as_Register($src1$$reg),
9906             $src2$$constant & 0x3f);
9907   %}
9908 
9909   ins_pipe(ialu_reg_shift);
9910 %}
9911 
9912 // Shift Right Logical Register
9913 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9914   match(Set dst (URShiftL src1 src2));
9915 
9916   ins_cost(INSN_COST * 2);
9917   format %{ "lsrv  $dst, $src1, $src2" %}
9918 
9919   ins_encode %{
9920     __ lsrv(as_Register($dst$$reg),
9921             as_Register($src1$$reg),
9922             as_Register($src2$$reg));
9923   %}
9924 
9925   ins_pipe(ialu_reg_reg_vshift);
9926 %}
9927 
9928 // Shift Right Logical Immediate
9929 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9930   match(Set dst (URShiftL src1 src2));
9931 
9932   ins_cost(INSN_COST);
9933   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9934 
9935   ins_encode %{
9936     __ lsr(as_Register($dst$$reg),
9937            as_Register($src1$$reg),
9938            $src2$$constant & 0x3f);
9939   %}
9940 
9941   ins_pipe(ialu_reg_shift);
9942 %}
9943 
9944 // A special-case pattern for card table stores.
9945 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9946   match(Set dst (URShiftL (CastP2X src1) src2));
9947 
9948   ins_cost(INSN_COST);
9949   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9950 
9951   ins_encode %{
9952     __ lsr(as_Register($dst$$reg),
9953            as_Register($src1$$reg),
9954            $src2$$constant & 0x3f);
9955   %}
9956 
9957   ins_pipe(ialu_reg_shift);
9958 %}
9959 
9960 // Shift Right Arithmetic Register
9961 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9962   match(Set dst (RShiftL src1 src2));
9963 
9964   ins_cost(INSN_COST * 2);
9965   format %{ "asrv  $dst, $src1, $src2" %}
9966 
9967   ins_encode %{
9968     __ asrv(as_Register($dst$$reg),
9969             as_Register($src1$$reg),
9970             as_Register($src2$$reg));
9971   %}
9972 
9973   ins_pipe(ialu_reg_reg_vshift);
9974 %}
9975 
9976 // Shift Right Arithmetic Immediate
9977 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9978   match(Set dst (RShiftL src1 src2));
9979 
9980   ins_cost(INSN_COST);
9981   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9982 
9983   ins_encode %{
9984     __ asr(as_Register($dst$$reg),
9985            as_Register($src1$$reg),
9986            $src2$$constant & 0x3f);
9987   %}
9988 
9989   ins_pipe(ialu_reg_shift);
9990 %}
9991 
9992 // BEGIN This section of the file is automatically generated. Do not edit --------------
9993 
9994 instruct regL_not_reg(iRegLNoSp dst,
9995                          iRegL src1, immL_M1 m1,
9996                          rFlagsReg cr) %{
9997   match(Set dst (XorL src1 m1));
9998   ins_cost(INSN_COST);
9999   format %{ "eon  $dst, $src1, zr" %}
10000 
10001   ins_encode %{
10002     __ eon(as_Register($dst$$reg),
10003               as_Register($src1$$reg),
10004               zr,
10005               Assembler::LSL, 0);
10006   %}
10007 
10008   ins_pipe(ialu_reg);
10009 %}
10010 instruct regI_not_reg(iRegINoSp dst,
10011                          iRegIorL2I src1, immI_M1 m1,
10012                          rFlagsReg cr) %{
10013   match(Set dst (XorI src1 m1));
10014   ins_cost(INSN_COST);
10015   format %{ "eonw  $dst, $src1, zr" %}
10016 
10017   ins_encode %{
10018     __ eonw(as_Register($dst$$reg),
10019               as_Register($src1$$reg),
10020               zr,
10021               Assembler::LSL, 0);
10022   %}
10023 
10024   ins_pipe(ialu_reg);
10025 %}
10026 
10027 instruct AndI_reg_not_reg(iRegINoSp dst,
10028                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10029                          rFlagsReg cr) %{
10030   match(Set dst (AndI src1 (XorI src2 m1)));
10031   ins_cost(INSN_COST);
10032   format %{ "bicw  $dst, $src1, $src2" %}
10033 
10034   ins_encode %{
10035     __ bicw(as_Register($dst$$reg),
10036               as_Register($src1$$reg),
10037               as_Register($src2$$reg),
10038               Assembler::LSL, 0);
10039   %}
10040 
10041   ins_pipe(ialu_reg_reg);
10042 %}
10043 
10044 instruct AndL_reg_not_reg(iRegLNoSp dst,
10045                          iRegL src1, iRegL src2, immL_M1 m1,
10046                          rFlagsReg cr) %{
10047   match(Set dst (AndL src1 (XorL src2 m1)));
10048   ins_cost(INSN_COST);
10049   format %{ "bic  $dst, $src1, $src2" %}
10050 
10051   ins_encode %{
10052     __ bic(as_Register($dst$$reg),
10053               as_Register($src1$$reg),
10054               as_Register($src2$$reg),
10055               Assembler::LSL, 0);
10056   %}
10057 
10058   ins_pipe(ialu_reg_reg);
10059 %}
10060 
10061 instruct OrI_reg_not_reg(iRegINoSp dst,
10062                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10063                          rFlagsReg cr) %{
10064   match(Set dst (OrI src1 (XorI src2 m1)));
10065   ins_cost(INSN_COST);
10066   format %{ "ornw  $dst, $src1, $src2" %}
10067 
10068   ins_encode %{
10069     __ ornw(as_Register($dst$$reg),
10070               as_Register($src1$$reg),
10071               as_Register($src2$$reg),
10072               Assembler::LSL, 0);
10073   %}
10074 
10075   ins_pipe(ialu_reg_reg);
10076 %}
10077 
10078 instruct OrL_reg_not_reg(iRegLNoSp dst,
10079                          iRegL src1, iRegL src2, immL_M1 m1,
10080                          rFlagsReg cr) %{
10081   match(Set dst (OrL src1 (XorL src2 m1)));
10082   ins_cost(INSN_COST);
10083   format %{ "orn  $dst, $src1, $src2" %}
10084 
10085   ins_encode %{
10086     __ orn(as_Register($dst$$reg),
10087               as_Register($src1$$reg),
10088               as_Register($src2$$reg),
10089               Assembler::LSL, 0);
10090   %}
10091 
10092   ins_pipe(ialu_reg_reg);
10093 %}
10094 
10095 instruct XorI_reg_not_reg(iRegINoSp dst,
10096                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10097                          rFlagsReg cr) %{
10098   match(Set dst (XorI m1 (XorI src2 src1)));
10099   ins_cost(INSN_COST);
10100   format %{ "eonw  $dst, $src1, $src2" %}
10101 
10102   ins_encode %{
10103     __ eonw(as_Register($dst$$reg),
10104               as_Register($src1$$reg),
10105               as_Register($src2$$reg),
10106               Assembler::LSL, 0);
10107   %}
10108 
10109   ins_pipe(ialu_reg_reg);
10110 %}
10111 
10112 instruct XorL_reg_not_reg(iRegLNoSp dst,
10113                          iRegL src1, iRegL src2, immL_M1 m1,
10114                          rFlagsReg cr) %{
10115   match(Set dst (XorL m1 (XorL src2 src1)));
10116   ins_cost(INSN_COST);
10117   format %{ "eon  $dst, $src1, $src2" %}
10118 
10119   ins_encode %{
10120     __ eon(as_Register($dst$$reg),
10121               as_Register($src1$$reg),
10122               as_Register($src2$$reg),
10123               Assembler::LSL, 0);
10124   %}
10125 
10126   ins_pipe(ialu_reg_reg);
10127 %}
10128 
10129 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10130                          iRegIorL2I src1, iRegIorL2I src2,
10131                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10132   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10133   ins_cost(1.9 * INSN_COST);
10134   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10135 
10136   ins_encode %{
10137     __ bicw(as_Register($dst$$reg),
10138               as_Register($src1$$reg),
10139               as_Register($src2$$reg),
10140               Assembler::LSR,
10141               $src3$$constant & 0x1f);
10142   %}
10143 
10144   ins_pipe(ialu_reg_reg_shift);
10145 %}
10146 
10147 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10148                          iRegL src1, iRegL src2,
10149                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10150   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10151   ins_cost(1.9 * INSN_COST);
10152   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10153 
10154   ins_encode %{
10155     __ bic(as_Register($dst$$reg),
10156               as_Register($src1$$reg),
10157               as_Register($src2$$reg),
10158               Assembler::LSR,
10159               $src3$$constant & 0x3f);
10160   %}
10161 
10162   ins_pipe(ialu_reg_reg_shift);
10163 %}
10164 
10165 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10166                          iRegIorL2I src1, iRegIorL2I src2,
10167                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10168   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10169   ins_cost(1.9 * INSN_COST);
10170   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10171 
10172   ins_encode %{
10173     __ bicw(as_Register($dst$$reg),
10174               as_Register($src1$$reg),
10175               as_Register($src2$$reg),
10176               Assembler::ASR,
10177               $src3$$constant & 0x1f);
10178   %}
10179 
10180   ins_pipe(ialu_reg_reg_shift);
10181 %}
10182 
10183 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10184                          iRegL src1, iRegL src2,
10185                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10186   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10187   ins_cost(1.9 * INSN_COST);
10188   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10189 
10190   ins_encode %{
10191     __ bic(as_Register($dst$$reg),
10192               as_Register($src1$$reg),
10193               as_Register($src2$$reg),
10194               Assembler::ASR,
10195               $src3$$constant & 0x3f);
10196   %}
10197 
10198   ins_pipe(ialu_reg_reg_shift);
10199 %}
10200 
10201 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10202                          iRegIorL2I src1, iRegIorL2I src2,
10203                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10204   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10205   ins_cost(1.9 * INSN_COST);
10206   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10207 
10208   ins_encode %{
10209     __ bicw(as_Register($dst$$reg),
10210               as_Register($src1$$reg),
10211               as_Register($src2$$reg),
10212               Assembler::LSL,
10213               $src3$$constant & 0x1f);
10214   %}
10215 
10216   ins_pipe(ialu_reg_reg_shift);
10217 %}
10218 
10219 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10220                          iRegL src1, iRegL src2,
10221                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10222   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10223   ins_cost(1.9 * INSN_COST);
10224   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10225 
10226   ins_encode %{
10227     __ bic(as_Register($dst$$reg),
10228               as_Register($src1$$reg),
10229               as_Register($src2$$reg),
10230               Assembler::LSL,
10231               $src3$$constant & 0x3f);
10232   %}
10233 
10234   ins_pipe(ialu_reg_reg_shift);
10235 %}
10236 
10237 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10238                          iRegIorL2I src1, iRegIorL2I src2,
10239                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10240   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10241   ins_cost(1.9 * INSN_COST);
10242   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10243 
10244   ins_encode %{
10245     __ eonw(as_Register($dst$$reg),
10246               as_Register($src1$$reg),
10247               as_Register($src2$$reg),
10248               Assembler::LSR,
10249               $src3$$constant & 0x1f);
10250   %}
10251 
10252   ins_pipe(ialu_reg_reg_shift);
10253 %}
10254 
10255 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10256                          iRegL src1, iRegL src2,
10257                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10258   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10259   ins_cost(1.9 * INSN_COST);
10260   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10261 
10262   ins_encode %{
10263     __ eon(as_Register($dst$$reg),
10264               as_Register($src1$$reg),
10265               as_Register($src2$$reg),
10266               Assembler::LSR,
10267               $src3$$constant & 0x3f);
10268   %}
10269 
10270   ins_pipe(ialu_reg_reg_shift);
10271 %}
10272 
10273 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10274                          iRegIorL2I src1, iRegIorL2I src2,
10275                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10276   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10277   ins_cost(1.9 * INSN_COST);
10278   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10279 
10280   ins_encode %{
10281     __ eonw(as_Register($dst$$reg),
10282               as_Register($src1$$reg),
10283               as_Register($src2$$reg),
10284               Assembler::ASR,
10285               $src3$$constant & 0x1f);
10286   %}
10287 
10288   ins_pipe(ialu_reg_reg_shift);
10289 %}
10290 
10291 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10292                          iRegL src1, iRegL src2,
10293                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10294   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10295   ins_cost(1.9 * INSN_COST);
10296   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10297 
10298   ins_encode %{
10299     __ eon(as_Register($dst$$reg),
10300               as_Register($src1$$reg),
10301               as_Register($src2$$reg),
10302               Assembler::ASR,
10303               $src3$$constant & 0x3f);
10304   %}
10305 
10306   ins_pipe(ialu_reg_reg_shift);
10307 %}
10308 
10309 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10310                          iRegIorL2I src1, iRegIorL2I src2,
10311                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10312   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10313   ins_cost(1.9 * INSN_COST);
10314   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10315 
10316   ins_encode %{
10317     __ eonw(as_Register($dst$$reg),
10318               as_Register($src1$$reg),
10319               as_Register($src2$$reg),
10320               Assembler::LSL,
10321               $src3$$constant & 0x1f);
10322   %}
10323 
10324   ins_pipe(ialu_reg_reg_shift);
10325 %}
10326 
10327 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10328                          iRegL src1, iRegL src2,
10329                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10330   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10331   ins_cost(1.9 * INSN_COST);
10332   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10333 
10334   ins_encode %{
10335     __ eon(as_Register($dst$$reg),
10336               as_Register($src1$$reg),
10337               as_Register($src2$$reg),
10338               Assembler::LSL,
10339               $src3$$constant & 0x3f);
10340   %}
10341 
10342   ins_pipe(ialu_reg_reg_shift);
10343 %}
10344 
10345 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10346                          iRegIorL2I src1, iRegIorL2I src2,
10347                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10348   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10349   ins_cost(1.9 * INSN_COST);
10350   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10351 
10352   ins_encode %{
10353     __ ornw(as_Register($dst$$reg),
10354               as_Register($src1$$reg),
10355               as_Register($src2$$reg),
10356               Assembler::LSR,
10357               $src3$$constant & 0x1f);
10358   %}
10359 
10360   ins_pipe(ialu_reg_reg_shift);
10361 %}
10362 
10363 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10364                          iRegL src1, iRegL src2,
10365                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10366   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10367   ins_cost(1.9 * INSN_COST);
10368   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10369 
10370   ins_encode %{
10371     __ orn(as_Register($dst$$reg),
10372               as_Register($src1$$reg),
10373               as_Register($src2$$reg),
10374               Assembler::LSR,
10375               $src3$$constant & 0x3f);
10376   %}
10377 
10378   ins_pipe(ialu_reg_reg_shift);
10379 %}
10380 
10381 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10382                          iRegIorL2I src1, iRegIorL2I src2,
10383                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10384   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10385   ins_cost(1.9 * INSN_COST);
10386   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10387 
10388   ins_encode %{
10389     __ ornw(as_Register($dst$$reg),
10390               as_Register($src1$$reg),
10391               as_Register($src2$$reg),
10392               Assembler::ASR,
10393               $src3$$constant & 0x1f);
10394   %}
10395 
10396   ins_pipe(ialu_reg_reg_shift);
10397 %}
10398 
10399 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10400                          iRegL src1, iRegL src2,
10401                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10402   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10403   ins_cost(1.9 * INSN_COST);
10404   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10405 
10406   ins_encode %{
10407     __ orn(as_Register($dst$$reg),
10408               as_Register($src1$$reg),
10409               as_Register($src2$$reg),
10410               Assembler::ASR,
10411               $src3$$constant & 0x3f);
10412   %}
10413 
10414   ins_pipe(ialu_reg_reg_shift);
10415 %}
10416 
10417 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10418                          iRegIorL2I src1, iRegIorL2I src2,
10419                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10420   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10421   ins_cost(1.9 * INSN_COST);
10422   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10423 
10424   ins_encode %{
10425     __ ornw(as_Register($dst$$reg),
10426               as_Register($src1$$reg),
10427               as_Register($src2$$reg),
10428               Assembler::LSL,
10429               $src3$$constant & 0x1f);
10430   %}
10431 
10432   ins_pipe(ialu_reg_reg_shift);
10433 %}
10434 
10435 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10436                          iRegL src1, iRegL src2,
10437                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10438   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10439   ins_cost(1.9 * INSN_COST);
10440   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10441 
10442   ins_encode %{
10443     __ orn(as_Register($dst$$reg),
10444               as_Register($src1$$reg),
10445               as_Register($src2$$reg),
10446               Assembler::LSL,
10447               $src3$$constant & 0x3f);
10448   %}
10449 
10450   ins_pipe(ialu_reg_reg_shift);
10451 %}
10452 
10453 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10454                          iRegIorL2I src1, iRegIorL2I src2,
10455                          immI src3, rFlagsReg cr) %{
10456   match(Set dst (AndI src1 (URShiftI src2 src3)));
10457 
10458   ins_cost(1.9 * INSN_COST);
10459   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10460 
10461   ins_encode %{
10462     __ andw(as_Register($dst$$reg),
10463               as_Register($src1$$reg),
10464               as_Register($src2$$reg),
10465               Assembler::LSR,
10466               $src3$$constant & 0x1f);
10467   %}
10468 
10469   ins_pipe(ialu_reg_reg_shift);
10470 %}
10471 
10472 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10473                          iRegL src1, iRegL src2,
10474                          immI src3, rFlagsReg cr) %{
10475   match(Set dst (AndL src1 (URShiftL src2 src3)));
10476 
10477   ins_cost(1.9 * INSN_COST);
10478   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10479 
10480   ins_encode %{
10481     __ andr(as_Register($dst$$reg),
10482               as_Register($src1$$reg),
10483               as_Register($src2$$reg),
10484               Assembler::LSR,
10485               $src3$$constant & 0x3f);
10486   %}
10487 
10488   ins_pipe(ialu_reg_reg_shift);
10489 %}
10490 
10491 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10492                          iRegIorL2I src1, iRegIorL2I src2,
10493                          immI src3, rFlagsReg cr) %{
10494   match(Set dst (AndI src1 (RShiftI src2 src3)));
10495 
10496   ins_cost(1.9 * INSN_COST);
10497   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10498 
10499   ins_encode %{
10500     __ andw(as_Register($dst$$reg),
10501               as_Register($src1$$reg),
10502               as_Register($src2$$reg),
10503               Assembler::ASR,
10504               $src3$$constant & 0x1f);
10505   %}
10506 
10507   ins_pipe(ialu_reg_reg_shift);
10508 %}
10509 
10510 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10511                          iRegL src1, iRegL src2,
10512                          immI src3, rFlagsReg cr) %{
10513   match(Set dst (AndL src1 (RShiftL src2 src3)));
10514 
10515   ins_cost(1.9 * INSN_COST);
10516   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10517 
10518   ins_encode %{
10519     __ andr(as_Register($dst$$reg),
10520               as_Register($src1$$reg),
10521               as_Register($src2$$reg),
10522               Assembler::ASR,
10523               $src3$$constant & 0x3f);
10524   %}
10525 
10526   ins_pipe(ialu_reg_reg_shift);
10527 %}
10528 
10529 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10530                          iRegIorL2I src1, iRegIorL2I src2,
10531                          immI src3, rFlagsReg cr) %{
10532   match(Set dst (AndI src1 (LShiftI src2 src3)));
10533 
10534   ins_cost(1.9 * INSN_COST);
10535   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10536 
10537   ins_encode %{
10538     __ andw(as_Register($dst$$reg),
10539               as_Register($src1$$reg),
10540               as_Register($src2$$reg),
10541               Assembler::LSL,
10542               $src3$$constant & 0x1f);
10543   %}
10544 
10545   ins_pipe(ialu_reg_reg_shift);
10546 %}
10547 
10548 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10549                          iRegL src1, iRegL src2,
10550                          immI src3, rFlagsReg cr) %{
10551   match(Set dst (AndL src1 (LShiftL src2 src3)));
10552 
10553   ins_cost(1.9 * INSN_COST);
10554   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10555 
10556   ins_encode %{
10557     __ andr(as_Register($dst$$reg),
10558               as_Register($src1$$reg),
10559               as_Register($src2$$reg),
10560               Assembler::LSL,
10561               $src3$$constant & 0x3f);
10562   %}
10563 
10564   ins_pipe(ialu_reg_reg_shift);
10565 %}
10566 
10567 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10568                          iRegIorL2I src1, iRegIorL2I src2,
10569                          immI src3, rFlagsReg cr) %{
10570   match(Set dst (XorI src1 (URShiftI src2 src3)));
10571 
10572   ins_cost(1.9 * INSN_COST);
10573   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10574 
10575   ins_encode %{
10576     __ eorw(as_Register($dst$$reg),
10577               as_Register($src1$$reg),
10578               as_Register($src2$$reg),
10579               Assembler::LSR,
10580               $src3$$constant & 0x1f);
10581   %}
10582 
10583   ins_pipe(ialu_reg_reg_shift);
10584 %}
10585 
10586 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10587                          iRegL src1, iRegL src2,
10588                          immI src3, rFlagsReg cr) %{
10589   match(Set dst (XorL src1 (URShiftL src2 src3)));
10590 
10591   ins_cost(1.9 * INSN_COST);
10592   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10593 
10594   ins_encode %{
10595     __ eor(as_Register($dst$$reg),
10596               as_Register($src1$$reg),
10597               as_Register($src2$$reg),
10598               Assembler::LSR,
10599               $src3$$constant & 0x3f);
10600   %}
10601 
10602   ins_pipe(ialu_reg_reg_shift);
10603 %}
10604 
10605 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10606                          iRegIorL2I src1, iRegIorL2I src2,
10607                          immI src3, rFlagsReg cr) %{
10608   match(Set dst (XorI src1 (RShiftI src2 src3)));
10609 
10610   ins_cost(1.9 * INSN_COST);
10611   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10612 
10613   ins_encode %{
10614     __ eorw(as_Register($dst$$reg),
10615               as_Register($src1$$reg),
10616               as_Register($src2$$reg),
10617               Assembler::ASR,
10618               $src3$$constant & 0x1f);
10619   %}
10620 
10621   ins_pipe(ialu_reg_reg_shift);
10622 %}
10623 
10624 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10625                          iRegL src1, iRegL src2,
10626                          immI src3, rFlagsReg cr) %{
10627   match(Set dst (XorL src1 (RShiftL src2 src3)));
10628 
10629   ins_cost(1.9 * INSN_COST);
10630   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10631 
10632   ins_encode %{
10633     __ eor(as_Register($dst$$reg),
10634               as_Register($src1$$reg),
10635               as_Register($src2$$reg),
10636               Assembler::ASR,
10637               $src3$$constant & 0x3f);
10638   %}
10639 
10640   ins_pipe(ialu_reg_reg_shift);
10641 %}
10642 
10643 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10644                          iRegIorL2I src1, iRegIorL2I src2,
10645                          immI src3, rFlagsReg cr) %{
10646   match(Set dst (XorI src1 (LShiftI src2 src3)));
10647 
10648   ins_cost(1.9 * INSN_COST);
10649   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10650 
10651   ins_encode %{
10652     __ eorw(as_Register($dst$$reg),
10653               as_Register($src1$$reg),
10654               as_Register($src2$$reg),
10655               Assembler::LSL,
10656               $src3$$constant & 0x1f);
10657   %}
10658 
10659   ins_pipe(ialu_reg_reg_shift);
10660 %}
10661 
10662 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10663                          iRegL src1, iRegL src2,
10664                          immI src3, rFlagsReg cr) %{
10665   match(Set dst (XorL src1 (LShiftL src2 src3)));
10666 
10667   ins_cost(1.9 * INSN_COST);
10668   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10669 
10670   ins_encode %{
10671     __ eor(as_Register($dst$$reg),
10672               as_Register($src1$$reg),
10673               as_Register($src2$$reg),
10674               Assembler::LSL,
10675               $src3$$constant & 0x3f);
10676   %}
10677 
10678   ins_pipe(ialu_reg_reg_shift);
10679 %}
10680 
10681 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10682                          iRegIorL2I src1, iRegIorL2I src2,
10683                          immI src3, rFlagsReg cr) %{
10684   match(Set dst (OrI src1 (URShiftI src2 src3)));
10685 
10686   ins_cost(1.9 * INSN_COST);
10687   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10688 
10689   ins_encode %{
10690     __ orrw(as_Register($dst$$reg),
10691               as_Register($src1$$reg),
10692               as_Register($src2$$reg),
10693               Assembler::LSR,
10694               $src3$$constant & 0x1f);
10695   %}
10696 
10697   ins_pipe(ialu_reg_reg_shift);
10698 %}
10699 
10700 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10701                          iRegL src1, iRegL src2,
10702                          immI src3, rFlagsReg cr) %{
10703   match(Set dst (OrL src1 (URShiftL src2 src3)));
10704 
10705   ins_cost(1.9 * INSN_COST);
10706   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10707 
10708   ins_encode %{
10709     __ orr(as_Register($dst$$reg),
10710               as_Register($src1$$reg),
10711               as_Register($src2$$reg),
10712               Assembler::LSR,
10713               $src3$$constant & 0x3f);
10714   %}
10715 
10716   ins_pipe(ialu_reg_reg_shift);
10717 %}
10718 
10719 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10720                          iRegIorL2I src1, iRegIorL2I src2,
10721                          immI src3, rFlagsReg cr) %{
10722   match(Set dst (OrI src1 (RShiftI src2 src3)));
10723 
10724   ins_cost(1.9 * INSN_COST);
10725   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10726 
10727   ins_encode %{
10728     __ orrw(as_Register($dst$$reg),
10729               as_Register($src1$$reg),
10730               as_Register($src2$$reg),
10731               Assembler::ASR,
10732               $src3$$constant & 0x1f);
10733   %}
10734 
10735   ins_pipe(ialu_reg_reg_shift);
10736 %}
10737 
10738 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10739                          iRegL src1, iRegL src2,
10740                          immI src3, rFlagsReg cr) %{
10741   match(Set dst (OrL src1 (RShiftL src2 src3)));
10742 
10743   ins_cost(1.9 * INSN_COST);
10744   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10745 
10746   ins_encode %{
10747     __ orr(as_Register($dst$$reg),
10748               as_Register($src1$$reg),
10749               as_Register($src2$$reg),
10750               Assembler::ASR,
10751               $src3$$constant & 0x3f);
10752   %}
10753 
10754   ins_pipe(ialu_reg_reg_shift);
10755 %}
10756 
10757 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10758                          iRegIorL2I src1, iRegIorL2I src2,
10759                          immI src3, rFlagsReg cr) %{
10760   match(Set dst (OrI src1 (LShiftI src2 src3)));
10761 
10762   ins_cost(1.9 * INSN_COST);
10763   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10764 
10765   ins_encode %{
10766     __ orrw(as_Register($dst$$reg),
10767               as_Register($src1$$reg),
10768               as_Register($src2$$reg),
10769               Assembler::LSL,
10770               $src3$$constant & 0x1f);
10771   %}
10772 
10773   ins_pipe(ialu_reg_reg_shift);
10774 %}
10775 
10776 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10777                          iRegL src1, iRegL src2,
10778                          immI src3, rFlagsReg cr) %{
10779   match(Set dst (OrL src1 (LShiftL src2 src3)));
10780 
10781   ins_cost(1.9 * INSN_COST);
10782   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10783 
10784   ins_encode %{
10785     __ orr(as_Register($dst$$reg),
10786               as_Register($src1$$reg),
10787               as_Register($src2$$reg),
10788               Assembler::LSL,
10789               $src3$$constant & 0x3f);
10790   %}
10791 
10792   ins_pipe(ialu_reg_reg_shift);
10793 %}
10794 
10795 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10796                          iRegIorL2I src1, iRegIorL2I src2,
10797                          immI src3, rFlagsReg cr) %{
10798   match(Set dst (AddI src1 (URShiftI src2 src3)));
10799 
10800   ins_cost(1.9 * INSN_COST);
10801   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10802 
10803   ins_encode %{
10804     __ addw(as_Register($dst$$reg),
10805               as_Register($src1$$reg),
10806               as_Register($src2$$reg),
10807               Assembler::LSR,
10808               $src3$$constant & 0x1f);
10809   %}
10810 
10811   ins_pipe(ialu_reg_reg_shift);
10812 %}
10813 
10814 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10815                          iRegL src1, iRegL src2,
10816                          immI src3, rFlagsReg cr) %{
10817   match(Set dst (AddL src1 (URShiftL src2 src3)));
10818 
10819   ins_cost(1.9 * INSN_COST);
10820   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10821 
10822   ins_encode %{
10823     __ add(as_Register($dst$$reg),
10824               as_Register($src1$$reg),
10825               as_Register($src2$$reg),
10826               Assembler::LSR,
10827               $src3$$constant & 0x3f);
10828   %}
10829 
10830   ins_pipe(ialu_reg_reg_shift);
10831 %}
10832 
10833 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10834                          iRegIorL2I src1, iRegIorL2I src2,
10835                          immI src3, rFlagsReg cr) %{
10836   match(Set dst (AddI src1 (RShiftI src2 src3)));
10837 
10838   ins_cost(1.9 * INSN_COST);
10839   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10840 
10841   ins_encode %{
10842     __ addw(as_Register($dst$$reg),
10843               as_Register($src1$$reg),
10844               as_Register($src2$$reg),
10845               Assembler::ASR,
10846               $src3$$constant & 0x1f);
10847   %}
10848 
10849   ins_pipe(ialu_reg_reg_shift);
10850 %}
10851 
10852 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10853                          iRegL src1, iRegL src2,
10854                          immI src3, rFlagsReg cr) %{
10855   match(Set dst (AddL src1 (RShiftL src2 src3)));
10856 
10857   ins_cost(1.9 * INSN_COST);
10858   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10859 
10860   ins_encode %{
10861     __ add(as_Register($dst$$reg),
10862               as_Register($src1$$reg),
10863               as_Register($src2$$reg),
10864               Assembler::ASR,
10865               $src3$$constant & 0x3f);
10866   %}
10867 
10868   ins_pipe(ialu_reg_reg_shift);
10869 %}
10870 
10871 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10872                          iRegIorL2I src1, iRegIorL2I src2,
10873                          immI src3, rFlagsReg cr) %{
10874   match(Set dst (AddI src1 (LShiftI src2 src3)));
10875 
10876   ins_cost(1.9 * INSN_COST);
10877   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10878 
10879   ins_encode %{
10880     __ addw(as_Register($dst$$reg),
10881               as_Register($src1$$reg),
10882               as_Register($src2$$reg),
10883               Assembler::LSL,
10884               $src3$$constant & 0x1f);
10885   %}
10886 
10887   ins_pipe(ialu_reg_reg_shift);
10888 %}
10889 
10890 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10891                          iRegL src1, iRegL src2,
10892                          immI src3, rFlagsReg cr) %{
10893   match(Set dst (AddL src1 (LShiftL src2 src3)));
10894 
10895   ins_cost(1.9 * INSN_COST);
10896   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10897 
10898   ins_encode %{
10899     __ add(as_Register($dst$$reg),
10900               as_Register($src1$$reg),
10901               as_Register($src2$$reg),
10902               Assembler::LSL,
10903               $src3$$constant & 0x3f);
10904   %}
10905 
10906   ins_pipe(ialu_reg_reg_shift);
10907 %}
10908 
10909 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10910                          iRegIorL2I src1, iRegIorL2I src2,
10911                          immI src3, rFlagsReg cr) %{
10912   match(Set dst (SubI src1 (URShiftI src2 src3)));
10913 
10914   ins_cost(1.9 * INSN_COST);
10915   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10916 
10917   ins_encode %{
10918     __ subw(as_Register($dst$$reg),
10919               as_Register($src1$$reg),
10920               as_Register($src2$$reg),
10921               Assembler::LSR,
10922               $src3$$constant & 0x1f);
10923   %}
10924 
10925   ins_pipe(ialu_reg_reg_shift);
10926 %}
10927 
10928 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10929                          iRegL src1, iRegL src2,
10930                          immI src3, rFlagsReg cr) %{
10931   match(Set dst (SubL src1 (URShiftL src2 src3)));
10932 
10933   ins_cost(1.9 * INSN_COST);
10934   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10935 
10936   ins_encode %{
10937     __ sub(as_Register($dst$$reg),
10938               as_Register($src1$$reg),
10939               as_Register($src2$$reg),
10940               Assembler::LSR,
10941               $src3$$constant & 0x3f);
10942   %}
10943 
10944   ins_pipe(ialu_reg_reg_shift);
10945 %}
10946 
10947 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10948                          iRegIorL2I src1, iRegIorL2I src2,
10949                          immI src3, rFlagsReg cr) %{
10950   match(Set dst (SubI src1 (RShiftI src2 src3)));
10951 
10952   ins_cost(1.9 * INSN_COST);
10953   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10954 
10955   ins_encode %{
10956     __ subw(as_Register($dst$$reg),
10957               as_Register($src1$$reg),
10958               as_Register($src2$$reg),
10959               Assembler::ASR,
10960               $src3$$constant & 0x1f);
10961   %}
10962 
10963   ins_pipe(ialu_reg_reg_shift);
10964 %}
10965 
10966 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10967                          iRegL src1, iRegL src2,
10968                          immI src3, rFlagsReg cr) %{
10969   match(Set dst (SubL src1 (RShiftL src2 src3)));
10970 
10971   ins_cost(1.9 * INSN_COST);
10972   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10973 
10974   ins_encode %{
10975     __ sub(as_Register($dst$$reg),
10976               as_Register($src1$$reg),
10977               as_Register($src2$$reg),
10978               Assembler::ASR,
10979               $src3$$constant & 0x3f);
10980   %}
10981 
10982   ins_pipe(ialu_reg_reg_shift);
10983 %}
10984 
10985 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10986                          iRegIorL2I src1, iRegIorL2I src2,
10987                          immI src3, rFlagsReg cr) %{
10988   match(Set dst (SubI src1 (LShiftI src2 src3)));
10989 
10990   ins_cost(1.9 * INSN_COST);
10991   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10992 
10993   ins_encode %{
10994     __ subw(as_Register($dst$$reg),
10995               as_Register($src1$$reg),
10996               as_Register($src2$$reg),
10997               Assembler::LSL,
10998               $src3$$constant & 0x1f);
10999   %}
11000 
11001   ins_pipe(ialu_reg_reg_shift);
11002 %}
11003 
11004 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11005                          iRegL src1, iRegL src2,
11006                          immI src3, rFlagsReg cr) %{
11007   match(Set dst (SubL src1 (LShiftL src2 src3)));
11008 
11009   ins_cost(1.9 * INSN_COST);
11010   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11011 
11012   ins_encode %{
11013     __ sub(as_Register($dst$$reg),
11014               as_Register($src1$$reg),
11015               as_Register($src2$$reg),
11016               Assembler::LSL,
11017               $src3$$constant & 0x3f);
11018   %}
11019 
11020   ins_pipe(ialu_reg_reg_shift);
11021 %}
11022 
11023 
11024 
11025 // Shift Left followed by Shift Right.
11026 // This idiom is used by the compiler for the i2b bytecode etc.
11027 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11028 %{
11029   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11030   // Make sure we are not going to exceed what sbfm can do.
11031   predicate((unsigned int)n->in(2)->get_int() <= 63
11032             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11033 
11034   ins_cost(INSN_COST * 2);
11035   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11036   ins_encode %{
11037     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11038     int s = 63 - lshift;
11039     int r = (rshift - lshift) & 63;
11040     __ sbfm(as_Register($dst$$reg),
11041             as_Register($src$$reg),
11042             r, s);
11043   %}
11044 
11045   ins_pipe(ialu_reg_shift);
11046 %}
11047 
11048 // Shift Left followed by Shift Right.
11049 // This idiom is used by the compiler for the i2b bytecode etc.
11050 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11051 %{
11052   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11053   // Make sure we are not going to exceed what sbfmw can do.
11054   predicate((unsigned int)n->in(2)->get_int() <= 31
11055             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11056 
11057   ins_cost(INSN_COST * 2);
11058   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11059   ins_encode %{
11060     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11061     int s = 31 - lshift;
11062     int r = (rshift - lshift) & 31;
11063     __ sbfmw(as_Register($dst$$reg),
11064             as_Register($src$$reg),
11065             r, s);
11066   %}
11067 
11068   ins_pipe(ialu_reg_shift);
11069 %}
11070 
11071 // Shift Left followed by Shift Right.
11072 // This idiom is used by the compiler for the i2b bytecode etc.
11073 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11074 %{
11075   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11076   // Make sure we are not going to exceed what ubfm can do.
11077   predicate((unsigned int)n->in(2)->get_int() <= 63
11078             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11079 
11080   ins_cost(INSN_COST * 2);
11081   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11082   ins_encode %{
11083     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11084     int s = 63 - lshift;
11085     int r = (rshift - lshift) & 63;
11086     __ ubfm(as_Register($dst$$reg),
11087             as_Register($src$$reg),
11088             r, s);
11089   %}
11090 
11091   ins_pipe(ialu_reg_shift);
11092 %}
11093 
11094 // Shift Left followed by Shift Right.
11095 // This idiom is used by the compiler for the i2b bytecode etc.
11096 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11097 %{
11098   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11099   // Make sure we are not going to exceed what ubfmw can do.
11100   predicate((unsigned int)n->in(2)->get_int() <= 31
11101             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11102 
11103   ins_cost(INSN_COST * 2);
11104   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11105   ins_encode %{
11106     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11107     int s = 31 - lshift;
11108     int r = (rshift - lshift) & 31;
11109     __ ubfmw(as_Register($dst$$reg),
11110             as_Register($src$$reg),
11111             r, s);
11112   %}
11113 
11114   ins_pipe(ialu_reg_shift);
11115 %}
11116 // Bitfield extract with shift & mask
11117 
11118 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11119 %{
11120   match(Set dst (AndI (URShiftI src rshift) mask));
11121 
11122   ins_cost(INSN_COST);
11123   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11124   ins_encode %{
11125     int rshift = $rshift$$constant;
11126     long mask = $mask$$constant;
11127     int width = exact_log2(mask+1);
11128     __ ubfxw(as_Register($dst$$reg),
11129             as_Register($src$$reg), rshift, width);
11130   %}
11131   ins_pipe(ialu_reg_shift);
11132 %}
11133 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11134 %{
11135   match(Set dst (AndL (URShiftL src rshift) mask));
11136 
11137   ins_cost(INSN_COST);
11138   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11139   ins_encode %{
11140     int rshift = $rshift$$constant;
11141     long mask = $mask$$constant;
11142     int width = exact_log2(mask+1);
11143     __ ubfx(as_Register($dst$$reg),
11144             as_Register($src$$reg), rshift, width);
11145   %}
11146   ins_pipe(ialu_reg_shift);
11147 %}
11148 
11149 // We can use ubfx when extending an And with a mask when we know mask
11150 // is positive.  We know that because immI_bitmask guarantees it.
11151 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11152 %{
11153   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11154 
11155   ins_cost(INSN_COST * 2);
11156   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11157   ins_encode %{
11158     int rshift = $rshift$$constant;
11159     long mask = $mask$$constant;
11160     int width = exact_log2(mask+1);
11161     __ ubfx(as_Register($dst$$reg),
11162             as_Register($src$$reg), rshift, width);
11163   %}
11164   ins_pipe(ialu_reg_shift);
11165 %}
11166 
11167 // We can use ubfiz when masking by a positive number and then left shifting the result.
11168 // We know that the mask is positive because immI_bitmask guarantees it.
11169 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11170 %{
11171   match(Set dst (LShiftI (AndI src mask) lshift));
11172   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11173     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11174 
11175   ins_cost(INSN_COST);
11176   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11177   ins_encode %{
11178     int lshift = $lshift$$constant;
11179     long mask = $mask$$constant;
11180     int width = exact_log2(mask+1);
11181     __ ubfizw(as_Register($dst$$reg),
11182           as_Register($src$$reg), lshift, width);
11183   %}
11184   ins_pipe(ialu_reg_shift);
11185 %}
11186 // We can use ubfiz when masking by a positive number and then left shifting the result.
11187 // We know that the mask is positive because immL_bitmask guarantees it.
11188 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11189 %{
11190   match(Set dst (LShiftL (AndL src mask) lshift));
11191   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11192     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11193 
11194   ins_cost(INSN_COST);
11195   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11196   ins_encode %{
11197     int lshift = $lshift$$constant;
11198     long mask = $mask$$constant;
11199     int width = exact_log2(mask+1);
11200     __ ubfiz(as_Register($dst$$reg),
11201           as_Register($src$$reg), lshift, width);
11202   %}
11203   ins_pipe(ialu_reg_shift);
11204 %}
11205 
11206 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11207 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11208 %{
11209   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11210   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11211     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11212 
11213   ins_cost(INSN_COST);
11214   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11215   ins_encode %{
11216     int lshift = $lshift$$constant;
11217     long mask = $mask$$constant;
11218     int width = exact_log2(mask+1);
11219     __ ubfiz(as_Register($dst$$reg),
11220              as_Register($src$$reg), lshift, width);
11221   %}
11222   ins_pipe(ialu_reg_shift);
11223 %}
11224 
11225 // Rotations
11226 
11227 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11228 %{
11229   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11230   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11231 
11232   ins_cost(INSN_COST);
11233   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11234 
11235   ins_encode %{
11236     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11237             $rshift$$constant & 63);
11238   %}
11239   ins_pipe(ialu_reg_reg_extr);
11240 %}
11241 
11242 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11243 %{
11244   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11245   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11246 
11247   ins_cost(INSN_COST);
11248   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11249 
11250   ins_encode %{
11251     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11252             $rshift$$constant & 31);
11253   %}
11254   ins_pipe(ialu_reg_reg_extr);
11255 %}
11256 
11257 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11258 %{
11259   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11260   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11261 
11262   ins_cost(INSN_COST);
11263   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11264 
11265   ins_encode %{
11266     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11267             $rshift$$constant & 63);
11268   %}
11269   ins_pipe(ialu_reg_reg_extr);
11270 %}
11271 
11272 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11273 %{
11274   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11275   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11276 
11277   ins_cost(INSN_COST);
11278   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11279 
11280   ins_encode %{
11281     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11282             $rshift$$constant & 31);
11283   %}
11284   ins_pipe(ialu_reg_reg_extr);
11285 %}
11286 
11287 
11288 // rol expander
11289 
11290 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11291 %{
11292   effect(DEF dst, USE src, USE shift);
11293 
11294   format %{ "rol    $dst, $src, $shift" %}
11295   ins_cost(INSN_COST * 3);
11296   ins_encode %{
11297     __ subw(rscratch1, zr, as_Register($shift$$reg));
11298     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11299             rscratch1);
11300     %}
11301   ins_pipe(ialu_reg_reg_vshift);
11302 %}
11303 
11304 // rol expander
11305 
11306 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11307 %{
11308   effect(DEF dst, USE src, USE shift);
11309 
11310   format %{ "rol    $dst, $src, $shift" %}
11311   ins_cost(INSN_COST * 3);
11312   ins_encode %{
11313     __ subw(rscratch1, zr, as_Register($shift$$reg));
11314     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11315             rscratch1);
11316     %}
11317   ins_pipe(ialu_reg_reg_vshift);
11318 %}
11319 
11320 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11321 %{
11322   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11323 
11324   expand %{
11325     rolL_rReg(dst, src, shift, cr);
11326   %}
11327 %}
11328 
11329 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11330 %{
11331   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11332 
11333   expand %{
11334     rolL_rReg(dst, src, shift, cr);
11335   %}
11336 %}
11337 
11338 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11339 %{
11340   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11341 
11342   expand %{
11343     rolI_rReg(dst, src, shift, cr);
11344   %}
11345 %}
11346 
11347 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11348 %{
11349   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11350 
11351   expand %{
11352     rolI_rReg(dst, src, shift, cr);
11353   %}
11354 %}
11355 
11356 // ror expander
11357 
11358 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11359 %{
11360   effect(DEF dst, USE src, USE shift);
11361 
11362   format %{ "ror    $dst, $src, $shift" %}
11363   ins_cost(INSN_COST);
11364   ins_encode %{
11365     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11366             as_Register($shift$$reg));
11367     %}
11368   ins_pipe(ialu_reg_reg_vshift);
11369 %}
11370 
11371 // ror expander
11372 
11373 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11374 %{
11375   effect(DEF dst, USE src, USE shift);
11376 
11377   format %{ "ror    $dst, $src, $shift" %}
11378   ins_cost(INSN_COST);
11379   ins_encode %{
11380     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11381             as_Register($shift$$reg));
11382     %}
11383   ins_pipe(ialu_reg_reg_vshift);
11384 %}
11385 
11386 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11387 %{
11388   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11389 
11390   expand %{
11391     rorL_rReg(dst, src, shift, cr);
11392   %}
11393 %}
11394 
11395 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11396 %{
11397   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11398 
11399   expand %{
11400     rorL_rReg(dst, src, shift, cr);
11401   %}
11402 %}
11403 
11404 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11405 %{
11406   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11407 
11408   expand %{
11409     rorI_rReg(dst, src, shift, cr);
11410   %}
11411 %}
11412 
11413 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11414 %{
11415   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11416 
11417   expand %{
11418     rorI_rReg(dst, src, shift, cr);
11419   %}
11420 %}
11421 
11422 // Add/subtract (extended)
11423 
11424 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11425 %{
11426   match(Set dst (AddL src1 (ConvI2L src2)));
11427   ins_cost(INSN_COST);
11428   format %{ "add  $dst, $src1, $src2, sxtw" %}
11429 
11430    ins_encode %{
11431      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11432             as_Register($src2$$reg), ext::sxtw);
11433    %}
11434   ins_pipe(ialu_reg_reg);
11435 %};
11436 
11437 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11438 %{
11439   match(Set dst (SubL src1 (ConvI2L src2)));
11440   ins_cost(INSN_COST);
11441   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11442 
11443    ins_encode %{
11444      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11445             as_Register($src2$$reg), ext::sxtw);
11446    %}
11447   ins_pipe(ialu_reg_reg);
11448 %};
11449 
11450 
11451 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11452 %{
11453   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11454   ins_cost(INSN_COST);
11455   format %{ "add  $dst, $src1, $src2, sxth" %}
11456 
11457    ins_encode %{
11458      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11459             as_Register($src2$$reg), ext::sxth);
11460    %}
11461   ins_pipe(ialu_reg_reg);
11462 %}
11463 
11464 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11465 %{
11466   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11467   ins_cost(INSN_COST);
11468   format %{ "add  $dst, $src1, $src2, sxtb" %}
11469 
11470    ins_encode %{
11471      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11472             as_Register($src2$$reg), ext::sxtb);
11473    %}
11474   ins_pipe(ialu_reg_reg);
11475 %}
11476 
11477 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11478 %{
11479   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11480   ins_cost(INSN_COST);
11481   format %{ "add  $dst, $src1, $src2, uxtb" %}
11482 
11483    ins_encode %{
11484      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11485             as_Register($src2$$reg), ext::uxtb);
11486    %}
11487   ins_pipe(ialu_reg_reg);
11488 %}
11489 
11490 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11491 %{
11492   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11493   ins_cost(INSN_COST);
11494   format %{ "add  $dst, $src1, $src2, sxth" %}
11495 
11496    ins_encode %{
11497      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11498             as_Register($src2$$reg), ext::sxth);
11499    %}
11500   ins_pipe(ialu_reg_reg);
11501 %}
11502 
11503 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11504 %{
11505   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11506   ins_cost(INSN_COST);
11507   format %{ "add  $dst, $src1, $src2, sxtw" %}
11508 
11509    ins_encode %{
11510      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11511             as_Register($src2$$reg), ext::sxtw);
11512    %}
11513   ins_pipe(ialu_reg_reg);
11514 %}
11515 
11516 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11517 %{
11518   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11519   ins_cost(INSN_COST);
11520   format %{ "add  $dst, $src1, $src2, sxtb" %}
11521 
11522    ins_encode %{
11523      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11524             as_Register($src2$$reg), ext::sxtb);
11525    %}
11526   ins_pipe(ialu_reg_reg);
11527 %}
11528 
11529 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11530 %{
11531   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11532   ins_cost(INSN_COST);
11533   format %{ "add  $dst, $src1, $src2, uxtb" %}
11534 
11535    ins_encode %{
11536      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11537             as_Register($src2$$reg), ext::uxtb);
11538    %}
11539   ins_pipe(ialu_reg_reg);
11540 %}
11541 
11542 
11543 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11544 %{
11545   match(Set dst (AddI src1 (AndI src2 mask)));
11546   ins_cost(INSN_COST);
11547   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11548 
11549    ins_encode %{
11550      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11551             as_Register($src2$$reg), ext::uxtb);
11552    %}
11553   ins_pipe(ialu_reg_reg);
11554 %}
11555 
11556 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11557 %{
11558   match(Set dst (AddI src1 (AndI src2 mask)));
11559   ins_cost(INSN_COST);
11560   format %{ "addw  $dst, $src1, $src2, uxth" %}
11561 
11562    ins_encode %{
11563      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11564             as_Register($src2$$reg), ext::uxth);
11565    %}
11566   ins_pipe(ialu_reg_reg);
11567 %}
11568 
11569 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11570 %{
11571   match(Set dst (AddL src1 (AndL src2 mask)));
11572   ins_cost(INSN_COST);
11573   format %{ "add  $dst, $src1, $src2, uxtb" %}
11574 
11575    ins_encode %{
11576      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11577             as_Register($src2$$reg), ext::uxtb);
11578    %}
11579   ins_pipe(ialu_reg_reg);
11580 %}
11581 
11582 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11583 %{
11584   match(Set dst (AddL src1 (AndL src2 mask)));
11585   ins_cost(INSN_COST);
11586   format %{ "add  $dst, $src1, $src2, uxth" %}
11587 
11588    ins_encode %{
11589      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11590             as_Register($src2$$reg), ext::uxth);
11591    %}
11592   ins_pipe(ialu_reg_reg);
11593 %}
11594 
11595 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11596 %{
11597   match(Set dst (AddL src1 (AndL src2 mask)));
11598   ins_cost(INSN_COST);
11599   format %{ "add  $dst, $src1, $src2, uxtw" %}
11600 
11601    ins_encode %{
11602      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11603             as_Register($src2$$reg), ext::uxtw);
11604    %}
11605   ins_pipe(ialu_reg_reg);
11606 %}
11607 
11608 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11609 %{
11610   match(Set dst (SubI src1 (AndI src2 mask)));
11611   ins_cost(INSN_COST);
11612   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11613 
11614    ins_encode %{
11615      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11616             as_Register($src2$$reg), ext::uxtb);
11617    %}
11618   ins_pipe(ialu_reg_reg);
11619 %}
11620 
11621 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11622 %{
11623   match(Set dst (SubI src1 (AndI src2 mask)));
11624   ins_cost(INSN_COST);
11625   format %{ "subw  $dst, $src1, $src2, uxth" %}
11626 
11627    ins_encode %{
11628      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11629             as_Register($src2$$reg), ext::uxth);
11630    %}
11631   ins_pipe(ialu_reg_reg);
11632 %}
11633 
11634 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11635 %{
11636   match(Set dst (SubL src1 (AndL src2 mask)));
11637   ins_cost(INSN_COST);
11638   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11639 
11640    ins_encode %{
11641      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11642             as_Register($src2$$reg), ext::uxtb);
11643    %}
11644   ins_pipe(ialu_reg_reg);
11645 %}
11646 
11647 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11648 %{
11649   match(Set dst (SubL src1 (AndL src2 mask)));
11650   ins_cost(INSN_COST);
11651   format %{ "sub  $dst, $src1, $src2, uxth" %}
11652 
11653    ins_encode %{
11654      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11655             as_Register($src2$$reg), ext::uxth);
11656    %}
11657   ins_pipe(ialu_reg_reg);
11658 %}
11659 
11660 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11661 %{
11662   match(Set dst (SubL src1 (AndL src2 mask)));
11663   ins_cost(INSN_COST);
11664   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11665 
11666    ins_encode %{
11667      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11668             as_Register($src2$$reg), ext::uxtw);
11669    %}
11670   ins_pipe(ialu_reg_reg);
11671 %}
11672 
11673 
11674 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11675 %{
11676   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11677   ins_cost(1.9 * INSN_COST);
11678   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
11679 
11680    ins_encode %{
11681      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11682             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11683    %}
11684   ins_pipe(ialu_reg_reg_shift);
11685 %}
11686 
11687 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11688 %{
11689   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11690   ins_cost(1.9 * INSN_COST);
11691   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
11692 
11693    ins_encode %{
11694      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11695             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11696    %}
11697   ins_pipe(ialu_reg_reg_shift);
11698 %}
11699 
11700 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11701 %{
11702   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11703   ins_cost(1.9 * INSN_COST);
11704   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
11705 
11706    ins_encode %{
11707      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11708             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11709    %}
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11714 %{
11715   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11716   ins_cost(1.9 * INSN_COST);
11717   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
11718 
11719    ins_encode %{
11720      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11721             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11722    %}
11723   ins_pipe(ialu_reg_reg_shift);
11724 %}
11725 
11726 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11727 %{
11728   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11729   ins_cost(1.9 * INSN_COST);
11730   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
11731 
11732    ins_encode %{
11733      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11734             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11735    %}
11736   ins_pipe(ialu_reg_reg_shift);
11737 %}
11738 
11739 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11740 %{
11741   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11742   ins_cost(1.9 * INSN_COST);
11743   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
11744 
11745    ins_encode %{
11746      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11747             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11748    %}
11749   ins_pipe(ialu_reg_reg_shift);
11750 %}
11751 
11752 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
11753 %{
11754   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11755   ins_cost(1.9 * INSN_COST);
11756   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
11757 
11758    ins_encode %{
11759      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11760             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11761    %}
11762   ins_pipe(ialu_reg_reg_shift);
11763 %}
11764 
11765 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
11766 %{
11767   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11768   ins_cost(1.9 * INSN_COST);
11769   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
11770 
11771    ins_encode %{
11772      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11773             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11774    %}
11775   ins_pipe(ialu_reg_reg_shift);
11776 %}
11777 
11778 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
11779 %{
11780   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11781   ins_cost(1.9 * INSN_COST);
11782   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
11783 
11784    ins_encode %{
11785      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11786             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11787    %}
11788   ins_pipe(ialu_reg_reg_shift);
11789 %}
11790 
11791 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
11792 %{
11793   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
11796 
11797    ins_encode %{
11798      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11799             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11800    %}
11801   ins_pipe(ialu_reg_reg_shift);
11802 %}
11803 
11804 
11805 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
11806 %{
11807   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
11808   ins_cost(1.9 * INSN_COST);
11809   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
11810 
11811    ins_encode %{
11812      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11813             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
11814    %}
11815   ins_pipe(ialu_reg_reg_shift);
11816 %};
11817 
11818 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
11819 %{
11820   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
11821   ins_cost(1.9 * INSN_COST);
11822   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
11823 
11824    ins_encode %{
11825      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11826             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
11827    %}
11828   ins_pipe(ialu_reg_reg_shift);
11829 %};
11830 
11831 
11832 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
11833 %{
11834   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11835   ins_cost(1.9 * INSN_COST);
11836   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
11837 
11838    ins_encode %{
11839      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11840             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11841    %}
11842   ins_pipe(ialu_reg_reg_shift);
11843 %}
11844 
11845 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
11846 %{
11847   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11848   ins_cost(1.9 * INSN_COST);
11849   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
11850 
11851    ins_encode %{
11852      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11853             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11854    %}
11855   ins_pipe(ialu_reg_reg_shift);
11856 %}
11857 
11858 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
11859 %{
11860   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11861   ins_cost(1.9 * INSN_COST);
11862   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
11863 
11864    ins_encode %{
11865      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11866             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
11867    %}
11868   ins_pipe(ialu_reg_reg_shift);
11869 %}
11870 
11871 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
11872 %{
11873   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11874   ins_cost(1.9 * INSN_COST);
11875   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
11876 
11877    ins_encode %{
11878      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11879             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11880    %}
11881   ins_pipe(ialu_reg_reg_shift);
11882 %}
11883 
11884 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
11885 %{
11886   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11887   ins_cost(1.9 * INSN_COST);
11888   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
11889 
11890    ins_encode %{
11891      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11892             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11893    %}
11894   ins_pipe(ialu_reg_reg_shift);
11895 %}
11896 
11897 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
11898 %{
11899   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11900   ins_cost(1.9 * INSN_COST);
11901   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
11902 
11903    ins_encode %{
11904      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11905             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
11906    %}
11907   ins_pipe(ialu_reg_reg_shift);
11908 %}
11909 
11910 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
11911 %{
11912   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
11913   ins_cost(1.9 * INSN_COST);
11914   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
11915 
11916    ins_encode %{
11917      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11918             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11919    %}
11920   ins_pipe(ialu_reg_reg_shift);
11921 %}
11922 
11923 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
11924 %{
11925   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
11926   ins_cost(1.9 * INSN_COST);
11927   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
11928 
11929    ins_encode %{
11930      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11931             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11932    %}
11933   ins_pipe(ialu_reg_reg_shift);
11934 %}
11935 
11936 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
11937 %{
11938   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
11939   ins_cost(1.9 * INSN_COST);
11940   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
11941 
11942    ins_encode %{
11943      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11944             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11945    %}
11946   ins_pipe(ialu_reg_reg_shift);
11947 %}
11948 
11949 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
11950 %{
11951   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
11952   ins_cost(1.9 * INSN_COST);
11953   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
11954 
11955    ins_encode %{
11956      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11957             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11958    %}
11959   ins_pipe(ialu_reg_reg_shift);
11960 %}
11961 // END This section of the file is automatically generated. Do not edit --------------
11962 
11963 // ============================================================================
11964 // Floating Point Arithmetic Instructions
11965 
11966 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11967   match(Set dst (AddF src1 src2));
11968 
11969   ins_cost(INSN_COST * 5);
11970   format %{ "fadds   $dst, $src1, $src2" %}
11971 
11972   ins_encode %{
11973     __ fadds(as_FloatRegister($dst$$reg),
11974              as_FloatRegister($src1$$reg),
11975              as_FloatRegister($src2$$reg));
11976   %}
11977 
11978   ins_pipe(fp_dop_reg_reg_s);
11979 %}
11980 
11981 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11982   match(Set dst (AddD src1 src2));
11983 
11984   ins_cost(INSN_COST * 5);
11985   format %{ "faddd   $dst, $src1, $src2" %}
11986 
11987   ins_encode %{
11988     __ faddd(as_FloatRegister($dst$$reg),
11989              as_FloatRegister($src1$$reg),
11990              as_FloatRegister($src2$$reg));
11991   %}
11992 
11993   ins_pipe(fp_dop_reg_reg_d);
11994 %}
11995 
11996 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11997   match(Set dst (SubF src1 src2));
11998 
11999   ins_cost(INSN_COST * 5);
12000   format %{ "fsubs   $dst, $src1, $src2" %}
12001 
12002   ins_encode %{
12003     __ fsubs(as_FloatRegister($dst$$reg),
12004              as_FloatRegister($src1$$reg),
12005              as_FloatRegister($src2$$reg));
12006   %}
12007 
12008   ins_pipe(fp_dop_reg_reg_s);
12009 %}
12010 
12011 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12012   match(Set dst (SubD src1 src2));
12013 
12014   ins_cost(INSN_COST * 5);
12015   format %{ "fsubd   $dst, $src1, $src2" %}
12016 
12017   ins_encode %{
12018     __ fsubd(as_FloatRegister($dst$$reg),
12019              as_FloatRegister($src1$$reg),
12020              as_FloatRegister($src2$$reg));
12021   %}
12022 
12023   ins_pipe(fp_dop_reg_reg_d);
12024 %}
12025 
12026 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12027   match(Set dst (MulF src1 src2));
12028 
12029   ins_cost(INSN_COST * 6);
12030   format %{ "fmuls   $dst, $src1, $src2" %}
12031 
12032   ins_encode %{
12033     __ fmuls(as_FloatRegister($dst$$reg),
12034              as_FloatRegister($src1$$reg),
12035              as_FloatRegister($src2$$reg));
12036   %}
12037 
12038   ins_pipe(fp_dop_reg_reg_s);
12039 %}
12040 
12041 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12042   match(Set dst (MulD src1 src2));
12043 
12044   ins_cost(INSN_COST * 6);
12045   format %{ "fmuld   $dst, $src1, $src2" %}
12046 
12047   ins_encode %{
12048     __ fmuld(as_FloatRegister($dst$$reg),
12049              as_FloatRegister($src1$$reg),
12050              as_FloatRegister($src2$$reg));
12051   %}
12052 
12053   ins_pipe(fp_dop_reg_reg_d);
12054 %}
12055 
12056 // src1 * src2 + src3
12057 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12058   predicate(UseFMA);
12059   match(Set dst (FmaF src3 (Binary src1 src2)));
12060 
12061   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12062 
12063   ins_encode %{
12064     __ fmadds(as_FloatRegister($dst$$reg),
12065              as_FloatRegister($src1$$reg),
12066              as_FloatRegister($src2$$reg),
12067              as_FloatRegister($src3$$reg));
12068   %}
12069 
12070   ins_pipe(pipe_class_default);
12071 %}
12072 
12073 // src1 * src2 + src3
12074 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12075   predicate(UseFMA);
12076   match(Set dst (FmaD src3 (Binary src1 src2)));
12077 
12078   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12079 
12080   ins_encode %{
12081     __ fmaddd(as_FloatRegister($dst$$reg),
12082              as_FloatRegister($src1$$reg),
12083              as_FloatRegister($src2$$reg),
12084              as_FloatRegister($src3$$reg));
12085   %}
12086 
12087   ins_pipe(pipe_class_default);
12088 %}
12089 
12090 // -src1 * src2 + src3
12091 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12092   predicate(UseFMA);
12093   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12094   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12095 
12096   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12097 
12098   ins_encode %{
12099     __ fmsubs(as_FloatRegister($dst$$reg),
12100               as_FloatRegister($src1$$reg),
12101               as_FloatRegister($src2$$reg),
12102               as_FloatRegister($src3$$reg));
12103   %}
12104 
12105   ins_pipe(pipe_class_default);
12106 %}
12107 
12108 // -src1 * src2 + src3
12109 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12110   predicate(UseFMA);
12111   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12112   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12113 
12114   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12115 
12116   ins_encode %{
12117     __ fmsubd(as_FloatRegister($dst$$reg),
12118               as_FloatRegister($src1$$reg),
12119               as_FloatRegister($src2$$reg),
12120               as_FloatRegister($src3$$reg));
12121   %}
12122 
12123   ins_pipe(pipe_class_default);
12124 %}
12125 
12126 // -src1 * src2 - src3
12127 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12128   predicate(UseFMA);
12129   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12130   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12131 
12132   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12133 
12134   ins_encode %{
12135     __ fnmadds(as_FloatRegister($dst$$reg),
12136                as_FloatRegister($src1$$reg),
12137                as_FloatRegister($src2$$reg),
12138                as_FloatRegister($src3$$reg));
12139   %}
12140 
12141   ins_pipe(pipe_class_default);
12142 %}
12143 
12144 // -src1 * src2 - src3
12145 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12146   predicate(UseFMA);
12147   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12148   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12149 
12150   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12151 
12152   ins_encode %{
12153     __ fnmaddd(as_FloatRegister($dst$$reg),
12154                as_FloatRegister($src1$$reg),
12155                as_FloatRegister($src2$$reg),
12156                as_FloatRegister($src3$$reg));
12157   %}
12158 
12159   ins_pipe(pipe_class_default);
12160 %}
12161 
12162 // src1 * src2 - src3
12163 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12164   predicate(UseFMA);
12165   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12166 
12167   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12168 
12169   ins_encode %{
12170     __ fnmsubs(as_FloatRegister($dst$$reg),
12171                as_FloatRegister($src1$$reg),
12172                as_FloatRegister($src2$$reg),
12173                as_FloatRegister($src3$$reg));
12174   %}
12175 
12176   ins_pipe(pipe_class_default);
12177 %}
12178 
12179 // src1 * src2 - src3
12180 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12181   predicate(UseFMA);
12182   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12183 
12184   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12185 
12186   ins_encode %{
12187   // n.b. insn name should be fnmsubd
12188     __ fnmsub(as_FloatRegister($dst$$reg),
12189               as_FloatRegister($src1$$reg),
12190               as_FloatRegister($src2$$reg),
12191               as_FloatRegister($src3$$reg));
12192   %}
12193 
12194   ins_pipe(pipe_class_default);
12195 %}
12196 
12197 
12198 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12199   match(Set dst (DivF src1  src2));
12200 
12201   ins_cost(INSN_COST * 18);
12202   format %{ "fdivs   $dst, $src1, $src2" %}
12203 
12204   ins_encode %{
12205     __ fdivs(as_FloatRegister($dst$$reg),
12206              as_FloatRegister($src1$$reg),
12207              as_FloatRegister($src2$$reg));
12208   %}
12209 
12210   ins_pipe(fp_div_s);
12211 %}
12212 
12213 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12214   match(Set dst (DivD src1  src2));
12215 
12216   ins_cost(INSN_COST * 32);
12217   format %{ "fdivd   $dst, $src1, $src2" %}
12218 
12219   ins_encode %{
12220     __ fdivd(as_FloatRegister($dst$$reg),
12221              as_FloatRegister($src1$$reg),
12222              as_FloatRegister($src2$$reg));
12223   %}
12224 
12225   ins_pipe(fp_div_d);
12226 %}
12227 
12228 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12229   match(Set dst (NegF src));
12230 
12231   ins_cost(INSN_COST * 3);
12232   format %{ "fneg   $dst, $src" %}
12233 
12234   ins_encode %{
12235     __ fnegs(as_FloatRegister($dst$$reg),
12236              as_FloatRegister($src$$reg));
12237   %}
12238 
12239   ins_pipe(fp_uop_s);
12240 %}
12241 
12242 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12243   match(Set dst (NegD src));
12244 
12245   ins_cost(INSN_COST * 3);
12246   format %{ "fnegd   $dst, $src" %}
12247 
12248   ins_encode %{
12249     __ fnegd(as_FloatRegister($dst$$reg),
12250              as_FloatRegister($src$$reg));
12251   %}
12252 
12253   ins_pipe(fp_uop_d);
12254 %}
12255 
12256 instruct absF_reg(vRegF dst, vRegF src) %{
12257   match(Set dst (AbsF src));
12258 
12259   ins_cost(INSN_COST * 3);
12260   format %{ "fabss   $dst, $src" %}
12261   ins_encode %{
12262     __ fabss(as_FloatRegister($dst$$reg),
12263              as_FloatRegister($src$$reg));
12264   %}
12265 
12266   ins_pipe(fp_uop_s);
12267 %}
12268 
12269 instruct absD_reg(vRegD dst, vRegD src) %{
12270   match(Set dst (AbsD src));
12271 
12272   ins_cost(INSN_COST * 3);
12273   format %{ "fabsd   $dst, $src" %}
12274   ins_encode %{
12275     __ fabsd(as_FloatRegister($dst$$reg),
12276              as_FloatRegister($src$$reg));
12277   %}
12278 
12279   ins_pipe(fp_uop_d);
12280 %}
12281 
12282 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12283   match(Set dst (SqrtD src));
12284 
12285   ins_cost(INSN_COST * 50);
12286   format %{ "fsqrtd  $dst, $src" %}
12287   ins_encode %{
12288     __ fsqrtd(as_FloatRegister($dst$$reg),
12289              as_FloatRegister($src$$reg));
12290   %}
12291 
12292   ins_pipe(fp_div_s);
12293 %}
12294 
12295 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12296   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12297 
12298   ins_cost(INSN_COST * 50);
12299   format %{ "fsqrts  $dst, $src" %}
12300   ins_encode %{
12301     __ fsqrts(as_FloatRegister($dst$$reg),
12302              as_FloatRegister($src$$reg));
12303   %}
12304 
12305   ins_pipe(fp_div_d);
12306 %}
12307 
12308 // ============================================================================
12309 // Logical Instructions
12310 
12311 // Integer Logical Instructions
12312 
12313 // And Instructions
12314 
12315 
12316 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12317   match(Set dst (AndI src1 src2));
12318 
12319   format %{ "andw  $dst, $src1, $src2\t# int" %}
12320 
12321   ins_cost(INSN_COST);
12322   ins_encode %{
12323     __ andw(as_Register($dst$$reg),
12324             as_Register($src1$$reg),
12325             as_Register($src2$$reg));
12326   %}
12327 
12328   ins_pipe(ialu_reg_reg);
12329 %}
12330 
12331 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12332   match(Set dst (AndI src1 src2));
12333 
12334   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12335 
12336   ins_cost(INSN_COST);
12337   ins_encode %{
12338     __ andw(as_Register($dst$$reg),
12339             as_Register($src1$$reg),
12340             (unsigned long)($src2$$constant));
12341   %}
12342 
12343   ins_pipe(ialu_reg_imm);
12344 %}
12345 
12346 // Or Instructions
12347 
12348 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12349   match(Set dst (OrI src1 src2));
12350 
12351   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12352 
12353   ins_cost(INSN_COST);
12354   ins_encode %{
12355     __ orrw(as_Register($dst$$reg),
12356             as_Register($src1$$reg),
12357             as_Register($src2$$reg));
12358   %}
12359 
12360   ins_pipe(ialu_reg_reg);
12361 %}
12362 
12363 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12364   match(Set dst (OrI src1 src2));
12365 
12366   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12367 
12368   ins_cost(INSN_COST);
12369   ins_encode %{
12370     __ orrw(as_Register($dst$$reg),
12371             as_Register($src1$$reg),
12372             (unsigned long)($src2$$constant));
12373   %}
12374 
12375   ins_pipe(ialu_reg_imm);
12376 %}
12377 
12378 // Xor Instructions
12379 
12380 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12381   match(Set dst (XorI src1 src2));
12382 
12383   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12384 
12385   ins_cost(INSN_COST);
12386   ins_encode %{
12387     __ eorw(as_Register($dst$$reg),
12388             as_Register($src1$$reg),
12389             as_Register($src2$$reg));
12390   %}
12391 
12392   ins_pipe(ialu_reg_reg);
12393 %}
12394 
12395 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12396   match(Set dst (XorI src1 src2));
12397 
12398   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12399 
12400   ins_cost(INSN_COST);
12401   ins_encode %{
12402     __ eorw(as_Register($dst$$reg),
12403             as_Register($src1$$reg),
12404             (unsigned long)($src2$$constant));
12405   %}
12406 
12407   ins_pipe(ialu_reg_imm);
12408 %}
12409 
12410 // Long Logical Instructions
12411 // TODO
12412 
12413 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12414   match(Set dst (AndL src1 src2));
12415 
12416   format %{ "and  $dst, $src1, $src2\t# int" %}
12417 
12418   ins_cost(INSN_COST);
12419   ins_encode %{
12420     __ andr(as_Register($dst$$reg),
12421             as_Register($src1$$reg),
12422             as_Register($src2$$reg));
12423   %}
12424 
12425   ins_pipe(ialu_reg_reg);
12426 %}
12427 
12428 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12429   match(Set dst (AndL src1 src2));
12430 
12431   format %{ "and  $dst, $src1, $src2\t# int" %}
12432 
12433   ins_cost(INSN_COST);
12434   ins_encode %{
12435     __ andr(as_Register($dst$$reg),
12436             as_Register($src1$$reg),
12437             (unsigned long)($src2$$constant));
12438   %}
12439 
12440   ins_pipe(ialu_reg_imm);
12441 %}
12442 
12443 // Or Instructions
12444 
12445 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12446   match(Set dst (OrL src1 src2));
12447 
12448   format %{ "orr  $dst, $src1, $src2\t# int" %}
12449 
12450   ins_cost(INSN_COST);
12451   ins_encode %{
12452     __ orr(as_Register($dst$$reg),
12453            as_Register($src1$$reg),
12454            as_Register($src2$$reg));
12455   %}
12456 
12457   ins_pipe(ialu_reg_reg);
12458 %}
12459 
12460 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12461   match(Set dst (OrL src1 src2));
12462 
12463   format %{ "orr  $dst, $src1, $src2\t# int" %}
12464 
12465   ins_cost(INSN_COST);
12466   ins_encode %{
12467     __ orr(as_Register($dst$$reg),
12468            as_Register($src1$$reg),
12469            (unsigned long)($src2$$constant));
12470   %}
12471 
12472   ins_pipe(ialu_reg_imm);
12473 %}
12474 
12475 // Xor Instructions
12476 
12477 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12478   match(Set dst (XorL src1 src2));
12479 
12480   format %{ "eor  $dst, $src1, $src2\t# int" %}
12481 
12482   ins_cost(INSN_COST);
12483   ins_encode %{
12484     __ eor(as_Register($dst$$reg),
12485            as_Register($src1$$reg),
12486            as_Register($src2$$reg));
12487   %}
12488 
12489   ins_pipe(ialu_reg_reg);
12490 %}
12491 
12492 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12493   match(Set dst (XorL src1 src2));
12494 
12495   ins_cost(INSN_COST);
12496   format %{ "eor  $dst, $src1, $src2\t# int" %}
12497 
12498   ins_encode %{
12499     __ eor(as_Register($dst$$reg),
12500            as_Register($src1$$reg),
12501            (unsigned long)($src2$$constant));
12502   %}
12503 
12504   ins_pipe(ialu_reg_imm);
12505 %}
12506 
12507 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12508 %{
12509   match(Set dst (ConvI2L src));
12510 
12511   ins_cost(INSN_COST);
12512   format %{ "sxtw  $dst, $src\t# i2l" %}
12513   ins_encode %{
12514     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12515   %}
12516   ins_pipe(ialu_reg_shift);
12517 %}
12518 
12519 // this pattern occurs in bigmath arithmetic
12520 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12521 %{
12522   match(Set dst (AndL (ConvI2L src) mask));
12523 
12524   ins_cost(INSN_COST);
12525   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12526   ins_encode %{
12527     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12528   %}
12529 
12530   ins_pipe(ialu_reg_shift);
12531 %}
12532 
12533 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12534   match(Set dst (ConvL2I src));
12535 
12536   ins_cost(INSN_COST);
12537   format %{ "movw  $dst, $src \t// l2i" %}
12538 
12539   ins_encode %{
12540     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12541   %}
12542 
12543   ins_pipe(ialu_reg);
12544 %}
12545 
12546 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12547 %{
12548   match(Set dst (Conv2B src));
12549   effect(KILL cr);
12550 
12551   format %{
12552     "cmpw $src, zr\n\t"
12553     "cset $dst, ne"
12554   %}
12555 
12556   ins_encode %{
12557     __ cmpw(as_Register($src$$reg), zr);
12558     __ cset(as_Register($dst$$reg), Assembler::NE);
12559   %}
12560 
12561   ins_pipe(ialu_reg);
12562 %}
12563 
12564 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12565 %{
12566   match(Set dst (Conv2B src));
12567   effect(KILL cr);
12568 
12569   format %{
12570     "cmp  $src, zr\n\t"
12571     "cset $dst, ne"
12572   %}
12573 
12574   ins_encode %{
12575     __ cmp(as_Register($src$$reg), zr);
12576     __ cset(as_Register($dst$$reg), Assembler::NE);
12577   %}
12578 
12579   ins_pipe(ialu_reg);
12580 %}
12581 
12582 instruct convD2F_reg(vRegF dst, vRegD src) %{
12583   match(Set dst (ConvD2F src));
12584 
12585   ins_cost(INSN_COST * 5);
12586   format %{ "fcvtd  $dst, $src \t// d2f" %}
12587 
12588   ins_encode %{
12589     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12590   %}
12591 
12592   ins_pipe(fp_d2f);
12593 %}
12594 
12595 instruct convF2D_reg(vRegD dst, vRegF src) %{
12596   match(Set dst (ConvF2D src));
12597 
12598   ins_cost(INSN_COST * 5);
12599   format %{ "fcvts  $dst, $src \t// f2d" %}
12600 
12601   ins_encode %{
12602     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12603   %}
12604 
12605   ins_pipe(fp_f2d);
12606 %}
12607 
12608 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12609   match(Set dst (ConvF2I src));
12610 
12611   ins_cost(INSN_COST * 5);
12612   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12613 
12614   ins_encode %{
12615     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12616   %}
12617 
12618   ins_pipe(fp_f2i);
12619 %}
12620 
12621 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12622   match(Set dst (ConvF2L src));
12623 
12624   ins_cost(INSN_COST * 5);
12625   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12626 
12627   ins_encode %{
12628     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12629   %}
12630 
12631   ins_pipe(fp_f2l);
12632 %}
12633 
12634 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12635   match(Set dst (ConvI2F src));
12636 
12637   ins_cost(INSN_COST * 5);
12638   format %{ "scvtfws  $dst, $src \t// i2f" %}
12639 
12640   ins_encode %{
12641     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12642   %}
12643 
12644   ins_pipe(fp_i2f);
12645 %}
12646 
12647 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12648   match(Set dst (ConvL2F src));
12649 
12650   ins_cost(INSN_COST * 5);
12651   format %{ "scvtfs  $dst, $src \t// l2f" %}
12652 
12653   ins_encode %{
12654     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12655   %}
12656 
12657   ins_pipe(fp_l2f);
12658 %}
12659 
12660 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12661   match(Set dst (ConvD2I src));
12662 
12663   ins_cost(INSN_COST * 5);
12664   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12665 
12666   ins_encode %{
12667     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12668   %}
12669 
12670   ins_pipe(fp_d2i);
12671 %}
12672 
12673 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12674   match(Set dst (ConvD2L src));
12675 
12676   ins_cost(INSN_COST * 5);
12677   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12678 
12679   ins_encode %{
12680     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12681   %}
12682 
12683   ins_pipe(fp_d2l);
12684 %}
12685 
12686 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12687   match(Set dst (ConvI2D src));
12688 
12689   ins_cost(INSN_COST * 5);
12690   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12691 
12692   ins_encode %{
12693     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12694   %}
12695 
12696   ins_pipe(fp_i2d);
12697 %}
12698 
12699 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12700   match(Set dst (ConvL2D src));
12701 
12702   ins_cost(INSN_COST * 5);
12703   format %{ "scvtfd  $dst, $src \t// l2d" %}
12704 
12705   ins_encode %{
12706     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12707   %}
12708 
12709   ins_pipe(fp_l2d);
12710 %}
12711 
12712 // stack <-> reg and reg <-> reg shuffles with no conversion
12713 
12714 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12715 
12716   match(Set dst (MoveF2I src));
12717 
12718   effect(DEF dst, USE src);
12719 
12720   ins_cost(4 * INSN_COST);
12721 
12722   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12723 
12724   ins_encode %{
12725     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12726   %}
12727 
12728   ins_pipe(iload_reg_reg);
12729 
12730 %}
12731 
12732 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12733 
12734   match(Set dst (MoveI2F src));
12735 
12736   effect(DEF dst, USE src);
12737 
12738   ins_cost(4 * INSN_COST);
12739 
12740   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12741 
12742   ins_encode %{
12743     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12744   %}
12745 
12746   ins_pipe(pipe_class_memory);
12747 
12748 %}
12749 
12750 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12751 
12752   match(Set dst (MoveD2L src));
12753 
12754   effect(DEF dst, USE src);
12755 
12756   ins_cost(4 * INSN_COST);
12757 
12758   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12759 
12760   ins_encode %{
12761     __ ldr($dst$$Register, Address(sp, $src$$disp));
12762   %}
12763 
12764   ins_pipe(iload_reg_reg);
12765 
12766 %}
12767 
12768 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12769 
12770   match(Set dst (MoveL2D src));
12771 
12772   effect(DEF dst, USE src);
12773 
12774   ins_cost(4 * INSN_COST);
12775 
12776   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12777 
12778   ins_encode %{
12779     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12780   %}
12781 
12782   ins_pipe(pipe_class_memory);
12783 
12784 %}
12785 
12786 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12787 
12788   match(Set dst (MoveF2I src));
12789 
12790   effect(DEF dst, USE src);
12791 
12792   ins_cost(INSN_COST);
12793 
12794   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12795 
12796   ins_encode %{
12797     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12798   %}
12799 
12800   ins_pipe(pipe_class_memory);
12801 
12802 %}
12803 
12804 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12805 
12806   match(Set dst (MoveI2F src));
12807 
12808   effect(DEF dst, USE src);
12809 
12810   ins_cost(INSN_COST);
12811 
12812   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12813 
12814   ins_encode %{
12815     __ strw($src$$Register, Address(sp, $dst$$disp));
12816   %}
12817 
12818   ins_pipe(istore_reg_reg);
12819 
12820 %}
12821 
12822 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12823 
12824   match(Set dst (MoveD2L src));
12825 
12826   effect(DEF dst, USE src);
12827 
12828   ins_cost(INSN_COST);
12829 
12830   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12831 
12832   ins_encode %{
12833     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12834   %}
12835 
12836   ins_pipe(pipe_class_memory);
12837 
12838 %}
12839 
12840 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12841 
12842   match(Set dst (MoveL2D src));
12843 
12844   effect(DEF dst, USE src);
12845 
12846   ins_cost(INSN_COST);
12847 
12848   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12849 
12850   ins_encode %{
12851     __ str($src$$Register, Address(sp, $dst$$disp));
12852   %}
12853 
12854   ins_pipe(istore_reg_reg);
12855 
12856 %}
12857 
12858 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12859 
12860   match(Set dst (MoveF2I src));
12861 
12862   effect(DEF dst, USE src);
12863 
12864   ins_cost(INSN_COST);
12865 
12866   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12867 
12868   ins_encode %{
12869     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12870   %}
12871 
12872   ins_pipe(fp_f2i);
12873 
12874 %}
12875 
12876 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12877 
12878   match(Set dst (MoveI2F src));
12879 
12880   effect(DEF dst, USE src);
12881 
12882   ins_cost(INSN_COST);
12883 
12884   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12885 
12886   ins_encode %{
12887     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12888   %}
12889 
12890   ins_pipe(fp_i2f);
12891 
12892 %}
12893 
12894 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12895 
12896   match(Set dst (MoveD2L src));
12897 
12898   effect(DEF dst, USE src);
12899 
12900   ins_cost(INSN_COST);
12901 
12902   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12903 
12904   ins_encode %{
12905     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12906   %}
12907 
12908   ins_pipe(fp_d2l);
12909 
12910 %}
12911 
12912 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12913 
12914   match(Set dst (MoveL2D src));
12915 
12916   effect(DEF dst, USE src);
12917 
12918   ins_cost(INSN_COST);
12919 
12920   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12921 
12922   ins_encode %{
12923     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12924   %}
12925 
12926   ins_pipe(fp_l2d);
12927 
12928 %}
12929 
12930 // ============================================================================
12931 // clearing of an array
12932 
12933 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12934 %{
12935   match(Set dummy (ClearArray cnt base));
12936   effect(USE_KILL cnt, USE_KILL base);
12937 
12938   ins_cost(4 * INSN_COST);
12939   format %{ "ClearArray $cnt, $base" %}
12940 
12941   ins_encode %{
12942     __ zero_words($base$$Register, $cnt$$Register);
12943   %}
12944 
12945   ins_pipe(pipe_class_memory);
12946 %}
12947 
12948 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12949 %{
12950   predicate((u_int64_t)n->in(2)->get_long()
12951             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
12952   match(Set dummy (ClearArray cnt base));
12953   effect(USE_KILL base);
12954 
12955   ins_cost(4 * INSN_COST);
12956   format %{ "ClearArray $cnt, $base" %}
12957 
12958   ins_encode %{
12959     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
12960   %}
12961 
12962   ins_pipe(pipe_class_memory);
12963 %}
12964 
12965 // ============================================================================
12966 // Overflow Math Instructions
12967 
12968 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12969 %{
12970   match(Set cr (OverflowAddI op1 op2));
12971 
12972   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12973   ins_cost(INSN_COST);
12974   ins_encode %{
12975     __ cmnw($op1$$Register, $op2$$Register);
12976   %}
12977 
12978   ins_pipe(icmp_reg_reg);
12979 %}
12980 
12981 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12982 %{
12983   match(Set cr (OverflowAddI op1 op2));
12984 
12985   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12986   ins_cost(INSN_COST);
12987   ins_encode %{
12988     __ cmnw($op1$$Register, $op2$$constant);
12989   %}
12990 
12991   ins_pipe(icmp_reg_imm);
12992 %}
12993 
12994 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12995 %{
12996   match(Set cr (OverflowAddL op1 op2));
12997 
12998   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12999   ins_cost(INSN_COST);
13000   ins_encode %{
13001     __ cmn($op1$$Register, $op2$$Register);
13002   %}
13003 
13004   ins_pipe(icmp_reg_reg);
13005 %}
13006 
13007 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13008 %{
13009   match(Set cr (OverflowAddL op1 op2));
13010 
13011   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13012   ins_cost(INSN_COST);
13013   ins_encode %{
13014     __ cmn($op1$$Register, $op2$$constant);
13015   %}
13016 
13017   ins_pipe(icmp_reg_imm);
13018 %}
13019 
13020 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13021 %{
13022   match(Set cr (OverflowSubI op1 op2));
13023 
13024   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13025   ins_cost(INSN_COST);
13026   ins_encode %{
13027     __ cmpw($op1$$Register, $op2$$Register);
13028   %}
13029 
13030   ins_pipe(icmp_reg_reg);
13031 %}
13032 
13033 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13034 %{
13035   match(Set cr (OverflowSubI op1 op2));
13036 
13037   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13038   ins_cost(INSN_COST);
13039   ins_encode %{
13040     __ cmpw($op1$$Register, $op2$$constant);
13041   %}
13042 
13043   ins_pipe(icmp_reg_imm);
13044 %}
13045 
13046 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13047 %{
13048   match(Set cr (OverflowSubL op1 op2));
13049 
13050   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13051   ins_cost(INSN_COST);
13052   ins_encode %{
13053     __ cmp($op1$$Register, $op2$$Register);
13054   %}
13055 
13056   ins_pipe(icmp_reg_reg);
13057 %}
13058 
13059 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13060 %{
13061   match(Set cr (OverflowSubL op1 op2));
13062 
13063   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13064   ins_cost(INSN_COST);
13065   ins_encode %{
13066     __ subs(zr, $op1$$Register, $op2$$constant);
13067   %}
13068 
13069   ins_pipe(icmp_reg_imm);
13070 %}
13071 
13072 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13073 %{
13074   match(Set cr (OverflowSubI zero op1));
13075 
13076   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13077   ins_cost(INSN_COST);
13078   ins_encode %{
13079     __ cmpw(zr, $op1$$Register);
13080   %}
13081 
13082   ins_pipe(icmp_reg_imm);
13083 %}
13084 
13085 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13086 %{
13087   match(Set cr (OverflowSubL zero op1));
13088 
13089   format %{ "cmp   zr, $op1\t# overflow check long" %}
13090   ins_cost(INSN_COST);
13091   ins_encode %{
13092     __ cmp(zr, $op1$$Register);
13093   %}
13094 
13095   ins_pipe(icmp_reg_imm);
13096 %}
13097 
13098 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13099 %{
13100   match(Set cr (OverflowMulI op1 op2));
13101 
13102   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13103             "cmp   rscratch1, rscratch1, sxtw\n\t"
13104             "movw  rscratch1, #0x80000000\n\t"
13105             "cselw rscratch1, rscratch1, zr, NE\n\t"
13106             "cmpw  rscratch1, #1" %}
13107   ins_cost(5 * INSN_COST);
13108   ins_encode %{
13109     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13110     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13111     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13112     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13113     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13114   %}
13115 
13116   ins_pipe(pipe_slow);
13117 %}
13118 
13119 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13120 %{
13121   match(If cmp (OverflowMulI op1 op2));
13122   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13123             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13124   effect(USE labl, KILL cr);
13125 
13126   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13127             "cmp   rscratch1, rscratch1, sxtw\n\t"
13128             "b$cmp   $labl" %}
13129   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13130   ins_encode %{
13131     Label* L = $labl$$label;
13132     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13133     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13134     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13135     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13136   %}
13137 
13138   ins_pipe(pipe_serial);
13139 %}
13140 
13141 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13142 %{
13143   match(Set cr (OverflowMulL op1 op2));
13144 
13145   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13146             "smulh rscratch2, $op1, $op2\n\t"
13147             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13148             "movw  rscratch1, #0x80000000\n\t"
13149             "cselw rscratch1, rscratch1, zr, NE\n\t"
13150             "cmpw  rscratch1, #1" %}
13151   ins_cost(6 * INSN_COST);
13152   ins_encode %{
13153     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13154     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13155     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13156     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13157     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13158     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13159   %}
13160 
13161   ins_pipe(pipe_slow);
13162 %}
13163 
13164 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13165 %{
13166   match(If cmp (OverflowMulL op1 op2));
13167   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13168             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13169   effect(USE labl, KILL cr);
13170 
13171   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13172             "smulh rscratch2, $op1, $op2\n\t"
13173             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13174             "b$cmp $labl" %}
13175   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13176   ins_encode %{
13177     Label* L = $labl$$label;
13178     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13179     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13180     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13181     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13182     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13183   %}
13184 
13185   ins_pipe(pipe_serial);
13186 %}
13187 
13188 // ============================================================================
13189 // Compare Instructions
13190 
13191 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13192 %{
13193   match(Set cr (CmpI op1 op2));
13194 
13195   effect(DEF cr, USE op1, USE op2);
13196 
13197   ins_cost(INSN_COST);
13198   format %{ "cmpw  $op1, $op2" %}
13199 
13200   ins_encode(aarch64_enc_cmpw(op1, op2));
13201 
13202   ins_pipe(icmp_reg_reg);
13203 %}
13204 
13205 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13206 %{
13207   match(Set cr (CmpI op1 zero));
13208 
13209   effect(DEF cr, USE op1);
13210 
13211   ins_cost(INSN_COST);
13212   format %{ "cmpw $op1, 0" %}
13213 
13214   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13215 
13216   ins_pipe(icmp_reg_imm);
13217 %}
13218 
13219 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13220 %{
13221   match(Set cr (CmpI op1 op2));
13222 
13223   effect(DEF cr, USE op1);
13224 
13225   ins_cost(INSN_COST);
13226   format %{ "cmpw  $op1, $op2" %}
13227 
13228   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13229 
13230   ins_pipe(icmp_reg_imm);
13231 %}
13232 
13233 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13234 %{
13235   match(Set cr (CmpI op1 op2));
13236 
13237   effect(DEF cr, USE op1);
13238 
13239   ins_cost(INSN_COST * 2);
13240   format %{ "cmpw  $op1, $op2" %}
13241 
13242   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13243 
13244   ins_pipe(icmp_reg_imm);
13245 %}
13246 
13247 // Unsigned compare Instructions; really, same as signed compare
13248 // except it should only be used to feed an If or a CMovI which takes a
13249 // cmpOpU.
13250 
13251 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13252 %{
13253   match(Set cr (CmpU op1 op2));
13254 
13255   effect(DEF cr, USE op1, USE op2);
13256 
13257   ins_cost(INSN_COST);
13258   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13259 
13260   ins_encode(aarch64_enc_cmpw(op1, op2));
13261 
13262   ins_pipe(icmp_reg_reg);
13263 %}
13264 
13265 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13266 %{
13267   match(Set cr (CmpU op1 zero));
13268 
13269   effect(DEF cr, USE op1);
13270 
13271   ins_cost(INSN_COST);
13272   format %{ "cmpw $op1, #0\t# unsigned" %}
13273 
13274   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13275 
13276   ins_pipe(icmp_reg_imm);
13277 %}
13278 
13279 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13280 %{
13281   match(Set cr (CmpU op1 op2));
13282 
13283   effect(DEF cr, USE op1);
13284 
13285   ins_cost(INSN_COST);
13286   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13287 
13288   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13289 
13290   ins_pipe(icmp_reg_imm);
13291 %}
13292 
13293 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13294 %{
13295   match(Set cr (CmpU op1 op2));
13296 
13297   effect(DEF cr, USE op1);
13298 
13299   ins_cost(INSN_COST * 2);
13300   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13301 
13302   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13303 
13304   ins_pipe(icmp_reg_imm);
13305 %}
13306 
13307 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13308 %{
13309   match(Set cr (CmpL op1 op2));
13310 
13311   effect(DEF cr, USE op1, USE op2);
13312 
13313   ins_cost(INSN_COST);
13314   format %{ "cmp  $op1, $op2" %}
13315 
13316   ins_encode(aarch64_enc_cmp(op1, op2));
13317 
13318   ins_pipe(icmp_reg_reg);
13319 %}
13320 
13321 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13322 %{
13323   match(Set cr (CmpL op1 zero));
13324 
13325   effect(DEF cr, USE op1);
13326 
13327   ins_cost(INSN_COST);
13328   format %{ "tst  $op1" %}
13329 
13330   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13331 
13332   ins_pipe(icmp_reg_imm);
13333 %}
13334 
13335 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13336 %{
13337   match(Set cr (CmpL op1 op2));
13338 
13339   effect(DEF cr, USE op1);
13340 
13341   ins_cost(INSN_COST);
13342   format %{ "cmp  $op1, $op2" %}
13343 
13344   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13345 
13346   ins_pipe(icmp_reg_imm);
13347 %}
13348 
13349 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13350 %{
13351   match(Set cr (CmpL op1 op2));
13352 
13353   effect(DEF cr, USE op1);
13354 
13355   ins_cost(INSN_COST * 2);
13356   format %{ "cmp  $op1, $op2" %}
13357 
13358   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13359 
13360   ins_pipe(icmp_reg_imm);
13361 %}
13362 
13363 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13364 %{
13365   match(Set cr (CmpUL op1 op2));
13366 
13367   effect(DEF cr, USE op1, USE op2);
13368 
13369   ins_cost(INSN_COST);
13370   format %{ "cmp  $op1, $op2" %}
13371 
13372   ins_encode(aarch64_enc_cmp(op1, op2));
13373 
13374   ins_pipe(icmp_reg_reg);
13375 %}
13376 
13377 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13378 %{
13379   match(Set cr (CmpUL op1 zero));
13380 
13381   effect(DEF cr, USE op1);
13382 
13383   ins_cost(INSN_COST);
13384   format %{ "tst  $op1" %}
13385 
13386   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13387 
13388   ins_pipe(icmp_reg_imm);
13389 %}
13390 
13391 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13392 %{
13393   match(Set cr (CmpUL op1 op2));
13394 
13395   effect(DEF cr, USE op1);
13396 
13397   ins_cost(INSN_COST);
13398   format %{ "cmp  $op1, $op2" %}
13399 
13400   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13401 
13402   ins_pipe(icmp_reg_imm);
13403 %}
13404 
13405 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13406 %{
13407   match(Set cr (CmpUL op1 op2));
13408 
13409   effect(DEF cr, USE op1);
13410 
13411   ins_cost(INSN_COST * 2);
13412   format %{ "cmp  $op1, $op2" %}
13413 
13414   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13415 
13416   ins_pipe(icmp_reg_imm);
13417 %}
13418 
13419 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13420 %{
13421   match(Set cr (CmpP op1 op2));
13422 
13423   effect(DEF cr, USE op1, USE op2);
13424 
13425   ins_cost(INSN_COST);
13426   format %{ "cmp  $op1, $op2\t // ptr" %}
13427 
13428   ins_encode(aarch64_enc_cmpp(op1, op2));
13429 
13430   ins_pipe(icmp_reg_reg);
13431 %}
13432 
13433 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13434 %{
13435   match(Set cr (CmpN op1 op2));
13436 
13437   effect(DEF cr, USE op1, USE op2);
13438 
13439   ins_cost(INSN_COST);
13440   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13441 
13442   ins_encode(aarch64_enc_cmpn(op1, op2));
13443 
13444   ins_pipe(icmp_reg_reg);
13445 %}
13446 
13447 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13448 %{
13449   match(Set cr (CmpP op1 zero));
13450 
13451   effect(DEF cr, USE op1, USE zero);
13452 
13453   ins_cost(INSN_COST);
13454   format %{ "cmp  $op1, 0\t // ptr" %}
13455 
13456   ins_encode(aarch64_enc_testp(op1));
13457 
13458   ins_pipe(icmp_reg_imm);
13459 %}
13460 
13461 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13462 %{
13463   match(Set cr (CmpN op1 zero));
13464 
13465   effect(DEF cr, USE op1, USE zero);
13466 
13467   ins_cost(INSN_COST);
13468   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13469 
13470   ins_encode(aarch64_enc_testn(op1));
13471 
13472   ins_pipe(icmp_reg_imm);
13473 %}
13474 
13475 // FP comparisons
13476 //
13477 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13478 // using normal cmpOp. See declaration of rFlagsReg for details.
13479 
13480 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13481 %{
13482   match(Set cr (CmpF src1 src2));
13483 
13484   ins_cost(3 * INSN_COST);
13485   format %{ "fcmps $src1, $src2" %}
13486 
13487   ins_encode %{
13488     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13489   %}
13490 
13491   ins_pipe(pipe_class_compare);
13492 %}
13493 
13494 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13495 %{
13496   match(Set cr (CmpF src1 src2));
13497 
13498   ins_cost(3 * INSN_COST);
13499   format %{ "fcmps $src1, 0.0" %}
13500 
13501   ins_encode %{
13502     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13503   %}
13504 
13505   ins_pipe(pipe_class_compare);
13506 %}
13507 // FROM HERE
13508 
13509 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13510 %{
13511   match(Set cr (CmpD src1 src2));
13512 
13513   ins_cost(3 * INSN_COST);
13514   format %{ "fcmpd $src1, $src2" %}
13515 
13516   ins_encode %{
13517     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13518   %}
13519 
13520   ins_pipe(pipe_class_compare);
13521 %}
13522 
13523 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13524 %{
13525   match(Set cr (CmpD src1 src2));
13526 
13527   ins_cost(3 * INSN_COST);
13528   format %{ "fcmpd $src1, 0.0" %}
13529 
13530   ins_encode %{
13531     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13532   %}
13533 
13534   ins_pipe(pipe_class_compare);
13535 %}
13536 
13537 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13538 %{
13539   match(Set dst (CmpF3 src1 src2));
13540   effect(KILL cr);
13541 
13542   ins_cost(5 * INSN_COST);
13543   format %{ "fcmps $src1, $src2\n\t"
13544             "csinvw($dst, zr, zr, eq\n\t"
13545             "csnegw($dst, $dst, $dst, lt)"
13546   %}
13547 
13548   ins_encode %{
13549     Label done;
13550     FloatRegister s1 = as_FloatRegister($src1$$reg);
13551     FloatRegister s2 = as_FloatRegister($src2$$reg);
13552     Register d = as_Register($dst$$reg);
13553     __ fcmps(s1, s2);
13554     // installs 0 if EQ else -1
13555     __ csinvw(d, zr, zr, Assembler::EQ);
13556     // keeps -1 if less or unordered else installs 1
13557     __ csnegw(d, d, d, Assembler::LT);
13558     __ bind(done);
13559   %}
13560 
13561   ins_pipe(pipe_class_default);
13562 
13563 %}
13564 
13565 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13566 %{
13567   match(Set dst (CmpD3 src1 src2));
13568   effect(KILL cr);
13569 
13570   ins_cost(5 * INSN_COST);
13571   format %{ "fcmpd $src1, $src2\n\t"
13572             "csinvw($dst, zr, zr, eq\n\t"
13573             "csnegw($dst, $dst, $dst, lt)"
13574   %}
13575 
13576   ins_encode %{
13577     Label done;
13578     FloatRegister s1 = as_FloatRegister($src1$$reg);
13579     FloatRegister s2 = as_FloatRegister($src2$$reg);
13580     Register d = as_Register($dst$$reg);
13581     __ fcmpd(s1, s2);
13582     // installs 0 if EQ else -1
13583     __ csinvw(d, zr, zr, Assembler::EQ);
13584     // keeps -1 if less or unordered else installs 1
13585     __ csnegw(d, d, d, Assembler::LT);
13586     __ bind(done);
13587   %}
13588   ins_pipe(pipe_class_default);
13589 
13590 %}
13591 
13592 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13593 %{
13594   match(Set dst (CmpF3 src1 zero));
13595   effect(KILL cr);
13596 
13597   ins_cost(5 * INSN_COST);
13598   format %{ "fcmps $src1, 0.0\n\t"
13599             "csinvw($dst, zr, zr, eq\n\t"
13600             "csnegw($dst, $dst, $dst, lt)"
13601   %}
13602 
13603   ins_encode %{
13604     Label done;
13605     FloatRegister s1 = as_FloatRegister($src1$$reg);
13606     Register d = as_Register($dst$$reg);
13607     __ fcmps(s1, 0.0D);
13608     // installs 0 if EQ else -1
13609     __ csinvw(d, zr, zr, Assembler::EQ);
13610     // keeps -1 if less or unordered else installs 1
13611     __ csnegw(d, d, d, Assembler::LT);
13612     __ bind(done);
13613   %}
13614 
13615   ins_pipe(pipe_class_default);
13616 
13617 %}
13618 
13619 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13620 %{
13621   match(Set dst (CmpD3 src1 zero));
13622   effect(KILL cr);
13623 
13624   ins_cost(5 * INSN_COST);
13625   format %{ "fcmpd $src1, 0.0\n\t"
13626             "csinvw($dst, zr, zr, eq\n\t"
13627             "csnegw($dst, $dst, $dst, lt)"
13628   %}
13629 
13630   ins_encode %{
13631     Label done;
13632     FloatRegister s1 = as_FloatRegister($src1$$reg);
13633     Register d = as_Register($dst$$reg);
13634     __ fcmpd(s1, 0.0D);
13635     // installs 0 if EQ else -1
13636     __ csinvw(d, zr, zr, Assembler::EQ);
13637     // keeps -1 if less or unordered else installs 1
13638     __ csnegw(d, d, d, Assembler::LT);
13639     __ bind(done);
13640   %}
13641   ins_pipe(pipe_class_default);
13642 
13643 %}
13644 
13645 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13646 %{
13647   match(Set dst (CmpLTMask p q));
13648   effect(KILL cr);
13649 
13650   ins_cost(3 * INSN_COST);
13651 
13652   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13653             "csetw $dst, lt\n\t"
13654             "subw $dst, zr, $dst"
13655   %}
13656 
13657   ins_encode %{
13658     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13659     __ csetw(as_Register($dst$$reg), Assembler::LT);
13660     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13661   %}
13662 
13663   ins_pipe(ialu_reg_reg);
13664 %}
13665 
13666 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13667 %{
13668   match(Set dst (CmpLTMask src zero));
13669   effect(KILL cr);
13670 
13671   ins_cost(INSN_COST);
13672 
13673   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13674 
13675   ins_encode %{
13676     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13677   %}
13678 
13679   ins_pipe(ialu_reg_shift);
13680 %}
13681 
13682 // ============================================================================
13683 // Max and Min
13684 
13685 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13686 %{
13687   match(Set dst (MinI src1 src2));
13688 
13689   effect(DEF dst, USE src1, USE src2, KILL cr);
13690   size(8);
13691 
13692   ins_cost(INSN_COST * 3);
13693   format %{
13694     "cmpw $src1 $src2\t signed int\n\t"
13695     "cselw $dst, $src1, $src2 lt\t"
13696   %}
13697 
13698   ins_encode %{
13699     __ cmpw(as_Register($src1$$reg),
13700             as_Register($src2$$reg));
13701     __ cselw(as_Register($dst$$reg),
13702              as_Register($src1$$reg),
13703              as_Register($src2$$reg),
13704              Assembler::LT);
13705   %}
13706 
13707   ins_pipe(ialu_reg_reg);
13708 %}
13709 // FROM HERE
13710 
13711 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13712 %{
13713   match(Set dst (MaxI src1 src2));
13714 
13715   effect(DEF dst, USE src1, USE src2, KILL cr);
13716   size(8);
13717 
13718   ins_cost(INSN_COST * 3);
13719   format %{
13720     "cmpw $src1 $src2\t signed int\n\t"
13721     "cselw $dst, $src1, $src2 gt\t"
13722   %}
13723 
13724   ins_encode %{
13725     __ cmpw(as_Register($src1$$reg),
13726             as_Register($src2$$reg));
13727     __ cselw(as_Register($dst$$reg),
13728              as_Register($src1$$reg),
13729              as_Register($src2$$reg),
13730              Assembler::GT);
13731   %}
13732 
13733   ins_pipe(ialu_reg_reg);
13734 %}
13735 
13736 // ============================================================================
13737 // Branch Instructions
13738 
13739 // Direct Branch.
13740 instruct branch(label lbl)
13741 %{
13742   match(Goto);
13743 
13744   effect(USE lbl);
13745 
13746   ins_cost(BRANCH_COST);
13747   format %{ "b  $lbl" %}
13748 
13749   ins_encode(aarch64_enc_b(lbl));
13750 
13751   ins_pipe(pipe_branch);
13752 %}
13753 
13754 // Conditional Near Branch
13755 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13756 %{
13757   // Same match rule as `branchConFar'.
13758   match(If cmp cr);
13759 
13760   effect(USE lbl);
13761 
13762   ins_cost(BRANCH_COST);
13763   // If set to 1 this indicates that the current instruction is a
13764   // short variant of a long branch. This avoids using this
13765   // instruction in first-pass matching. It will then only be used in
13766   // the `Shorten_branches' pass.
13767   // ins_short_branch(1);
13768   format %{ "b$cmp  $lbl" %}
13769 
13770   ins_encode(aarch64_enc_br_con(cmp, lbl));
13771 
13772   ins_pipe(pipe_branch_cond);
13773 %}
13774 
13775 // Conditional Near Branch Unsigned
13776 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13777 %{
13778   // Same match rule as `branchConFar'.
13779   match(If cmp cr);
13780 
13781   effect(USE lbl);
13782 
13783   ins_cost(BRANCH_COST);
13784   // If set to 1 this indicates that the current instruction is a
13785   // short variant of a long branch. This avoids using this
13786   // instruction in first-pass matching. It will then only be used in
13787   // the `Shorten_branches' pass.
13788   // ins_short_branch(1);
13789   format %{ "b$cmp  $lbl\t# unsigned" %}
13790 
13791   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13792 
13793   ins_pipe(pipe_branch_cond);
13794 %}
13795 
13796 // Make use of CBZ and CBNZ.  These instructions, as well as being
13797 // shorter than (cmp; branch), have the additional benefit of not
13798 // killing the flags.
13799 
13800 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13801   match(If cmp (CmpI op1 op2));
13802   effect(USE labl);
13803 
13804   ins_cost(BRANCH_COST);
13805   format %{ "cbw$cmp   $op1, $labl" %}
13806   ins_encode %{
13807     Label* L = $labl$$label;
13808     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13809     if (cond == Assembler::EQ)
13810       __ cbzw($op1$$Register, *L);
13811     else
13812       __ cbnzw($op1$$Register, *L);
13813   %}
13814   ins_pipe(pipe_cmp_branch);
13815 %}
13816 
13817 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13818   match(If cmp (CmpL op1 op2));
13819   effect(USE labl);
13820 
13821   ins_cost(BRANCH_COST);
13822   format %{ "cb$cmp   $op1, $labl" %}
13823   ins_encode %{
13824     Label* L = $labl$$label;
13825     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13826     if (cond == Assembler::EQ)
13827       __ cbz($op1$$Register, *L);
13828     else
13829       __ cbnz($op1$$Register, *L);
13830   %}
13831   ins_pipe(pipe_cmp_branch);
13832 %}
13833 
13834 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13835   match(If cmp (CmpP op1 op2));
13836   effect(USE labl);
13837 
13838   ins_cost(BRANCH_COST);
13839   format %{ "cb$cmp   $op1, $labl" %}
13840   ins_encode %{
13841     Label* L = $labl$$label;
13842     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13843     if (cond == Assembler::EQ)
13844       __ cbz($op1$$Register, *L);
13845     else
13846       __ cbnz($op1$$Register, *L);
13847   %}
13848   ins_pipe(pipe_cmp_branch);
13849 %}
13850 
13851 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
13852   match(If cmp (CmpN op1 op2));
13853   effect(USE labl);
13854 
13855   ins_cost(BRANCH_COST);
13856   format %{ "cbw$cmp   $op1, $labl" %}
13857   ins_encode %{
13858     Label* L = $labl$$label;
13859     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13860     if (cond == Assembler::EQ)
13861       __ cbzw($op1$$Register, *L);
13862     else
13863       __ cbnzw($op1$$Register, *L);
13864   %}
13865   ins_pipe(pipe_cmp_branch);
13866 %}
13867 
13868 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13869   match(If cmp (CmpP (DecodeN oop) zero));
13870   effect(USE labl);
13871 
13872   ins_cost(BRANCH_COST);
13873   format %{ "cb$cmp   $oop, $labl" %}
13874   ins_encode %{
13875     Label* L = $labl$$label;
13876     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13877     if (cond == Assembler::EQ)
13878       __ cbzw($oop$$Register, *L);
13879     else
13880       __ cbnzw($oop$$Register, *L);
13881   %}
13882   ins_pipe(pipe_cmp_branch);
13883 %}
13884 
13885 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
13886   match(If cmp (CmpU op1 op2));
13887   effect(USE labl);
13888 
13889   ins_cost(BRANCH_COST);
13890   format %{ "cbw$cmp   $op1, $labl" %}
13891   ins_encode %{
13892     Label* L = $labl$$label;
13893     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13894     if (cond == Assembler::EQ || cond == Assembler::LS)
13895       __ cbzw($op1$$Register, *L);
13896     else
13897       __ cbnzw($op1$$Register, *L);
13898   %}
13899   ins_pipe(pipe_cmp_branch);
13900 %}
13901 
13902 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
13903   match(If cmp (CmpUL op1 op2));
13904   effect(USE labl);
13905 
13906   ins_cost(BRANCH_COST);
13907   format %{ "cb$cmp   $op1, $labl" %}
13908   ins_encode %{
13909     Label* L = $labl$$label;
13910     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13911     if (cond == Assembler::EQ || cond == Assembler::LS)
13912       __ cbz($op1$$Register, *L);
13913     else
13914       __ cbnz($op1$$Register, *L);
13915   %}
13916   ins_pipe(pipe_cmp_branch);
13917 %}
13918 
13919 // Test bit and Branch
13920 
13921 // Patterns for short (< 32KiB) variants
13922 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
13923   match(If cmp (CmpL op1 op2));
13924   effect(USE labl);
13925 
13926   ins_cost(BRANCH_COST);
13927   format %{ "cb$cmp   $op1, $labl # long" %}
13928   ins_encode %{
13929     Label* L = $labl$$label;
13930     Assembler::Condition cond =
13931       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13932     __ tbr(cond, $op1$$Register, 63, *L);
13933   %}
13934   ins_pipe(pipe_cmp_branch);
13935   ins_short_branch(1);
13936 %}
13937 
13938 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
13939   match(If cmp (CmpI op1 op2));
13940   effect(USE labl);
13941 
13942   ins_cost(BRANCH_COST);
13943   format %{ "cb$cmp   $op1, $labl # int" %}
13944   ins_encode %{
13945     Label* L = $labl$$label;
13946     Assembler::Condition cond =
13947       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13948     __ tbr(cond, $op1$$Register, 31, *L);
13949   %}
13950   ins_pipe(pipe_cmp_branch);
13951   ins_short_branch(1);
13952 %}
13953 
13954 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
13955   match(If cmp (CmpL (AndL op1 op2) op3));
13956   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
13957   effect(USE labl);
13958 
13959   ins_cost(BRANCH_COST);
13960   format %{ "tb$cmp   $op1, $op2, $labl" %}
13961   ins_encode %{
13962     Label* L = $labl$$label;
13963     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13964     int bit = exact_log2($op2$$constant);
13965     __ tbr(cond, $op1$$Register, bit, *L);
13966   %}
13967   ins_pipe(pipe_cmp_branch);
13968   ins_short_branch(1);
13969 %}
13970 
13971 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
13972   match(If cmp (CmpI (AndI op1 op2) op3));
13973   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
13974   effect(USE labl);
13975 
13976   ins_cost(BRANCH_COST);
13977   format %{ "tb$cmp   $op1, $op2, $labl" %}
13978   ins_encode %{
13979     Label* L = $labl$$label;
13980     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13981     int bit = exact_log2($op2$$constant);
13982     __ tbr(cond, $op1$$Register, bit, *L);
13983   %}
13984   ins_pipe(pipe_cmp_branch);
13985   ins_short_branch(1);
13986 %}
13987 
13988 // And far variants
13989 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
13990   match(If cmp (CmpL op1 op2));
13991   effect(USE labl);
13992 
13993   ins_cost(BRANCH_COST);
13994   format %{ "cb$cmp   $op1, $labl # long" %}
13995   ins_encode %{
13996     Label* L = $labl$$label;
13997     Assembler::Condition cond =
13998       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13999     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14000   %}
14001   ins_pipe(pipe_cmp_branch);
14002 %}
14003 
14004 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14005   match(If cmp (CmpI op1 op2));
14006   effect(USE labl);
14007 
14008   ins_cost(BRANCH_COST);
14009   format %{ "cb$cmp   $op1, $labl # int" %}
14010   ins_encode %{
14011     Label* L = $labl$$label;
14012     Assembler::Condition cond =
14013       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14014     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14015   %}
14016   ins_pipe(pipe_cmp_branch);
14017 %}
14018 
14019 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14020   match(If cmp (CmpL (AndL op1 op2) op3));
14021   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14022   effect(USE labl);
14023 
14024   ins_cost(BRANCH_COST);
14025   format %{ "tb$cmp   $op1, $op2, $labl" %}
14026   ins_encode %{
14027     Label* L = $labl$$label;
14028     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14029     int bit = exact_log2($op2$$constant);
14030     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14031   %}
14032   ins_pipe(pipe_cmp_branch);
14033 %}
14034 
14035 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14036   match(If cmp (CmpI (AndI op1 op2) op3));
14037   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14038   effect(USE labl);
14039 
14040   ins_cost(BRANCH_COST);
14041   format %{ "tb$cmp   $op1, $op2, $labl" %}
14042   ins_encode %{
14043     Label* L = $labl$$label;
14044     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14045     int bit = exact_log2($op2$$constant);
14046     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14047   %}
14048   ins_pipe(pipe_cmp_branch);
14049 %}
14050 
14051 // Test bits
14052 
14053 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14054   match(Set cr (CmpL (AndL op1 op2) op3));
14055   predicate(Assembler::operand_valid_for_logical_immediate
14056             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14057 
14058   ins_cost(INSN_COST);
14059   format %{ "tst $op1, $op2 # long" %}
14060   ins_encode %{
14061     __ tst($op1$$Register, $op2$$constant);
14062   %}
14063   ins_pipe(ialu_reg_reg);
14064 %}
14065 
14066 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14067   match(Set cr (CmpI (AndI op1 op2) op3));
14068   predicate(Assembler::operand_valid_for_logical_immediate
14069             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14070 
14071   ins_cost(INSN_COST);
14072   format %{ "tst $op1, $op2 # int" %}
14073   ins_encode %{
14074     __ tstw($op1$$Register, $op2$$constant);
14075   %}
14076   ins_pipe(ialu_reg_reg);
14077 %}
14078 
14079 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14080   match(Set cr (CmpL (AndL op1 op2) op3));
14081 
14082   ins_cost(INSN_COST);
14083   format %{ "tst $op1, $op2 # long" %}
14084   ins_encode %{
14085     __ tst($op1$$Register, $op2$$Register);
14086   %}
14087   ins_pipe(ialu_reg_reg);
14088 %}
14089 
14090 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14091   match(Set cr (CmpI (AndI op1 op2) op3));
14092 
14093   ins_cost(INSN_COST);
14094   format %{ "tstw $op1, $op2 # int" %}
14095   ins_encode %{
14096     __ tstw($op1$$Register, $op2$$Register);
14097   %}
14098   ins_pipe(ialu_reg_reg);
14099 %}
14100 
14101 
14102 // Conditional Far Branch
14103 // Conditional Far Branch Unsigned
14104 // TODO: fixme
14105 
14106 // counted loop end branch near
14107 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14108 %{
14109   match(CountedLoopEnd cmp cr);
14110 
14111   effect(USE lbl);
14112 
14113   ins_cost(BRANCH_COST);
14114   // short variant.
14115   // ins_short_branch(1);
14116   format %{ "b$cmp $lbl \t// counted loop end" %}
14117 
14118   ins_encode(aarch64_enc_br_con(cmp, lbl));
14119 
14120   ins_pipe(pipe_branch);
14121 %}
14122 
14123 // counted loop end branch near Unsigned
14124 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14125 %{
14126   match(CountedLoopEnd cmp cr);
14127 
14128   effect(USE lbl);
14129 
14130   ins_cost(BRANCH_COST);
14131   // short variant.
14132   // ins_short_branch(1);
14133   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14134 
14135   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14136 
14137   ins_pipe(pipe_branch);
14138 %}
14139 
14140 // counted loop end branch far
14141 // counted loop end branch far unsigned
14142 // TODO: fixme
14143 
14144 // ============================================================================
14145 // inlined locking and unlocking
14146 
14147 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14148 %{
14149   match(Set cr (FastLock object box));
14150   effect(TEMP tmp, TEMP tmp2);
14151 
14152   // TODO
14153   // identify correct cost
14154   ins_cost(5 * INSN_COST);
14155   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14156 
14157   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14158 
14159   ins_pipe(pipe_serial);
14160 %}
14161 
14162 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14163 %{
14164   match(Set cr (FastUnlock object box));
14165   effect(TEMP tmp, TEMP tmp2);
14166 
14167   ins_cost(5 * INSN_COST);
14168   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14169 
14170   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14171 
14172   ins_pipe(pipe_serial);
14173 %}
14174 
14175 
14176 // ============================================================================
14177 // Safepoint Instructions
14178 
14179 // TODO
14180 // provide a near and far version of this code
14181 
14182 instruct safePoint(iRegP poll)
14183 %{
14184   match(SafePoint poll);
14185 
14186   format %{
14187     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14188   %}
14189   ins_encode %{
14190     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14191   %}
14192   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14193 %}
14194 
14195 
14196 // ============================================================================
14197 // Procedure Call/Return Instructions
14198 
14199 // Call Java Static Instruction
14200 
14201 instruct CallStaticJavaDirect(method meth)
14202 %{
14203   match(CallStaticJava);
14204 
14205   effect(USE meth);
14206 
14207   ins_cost(CALL_COST);
14208 
14209   format %{ "call,static $meth \t// ==> " %}
14210 
14211   ins_encode( aarch64_enc_java_static_call(meth),
14212               aarch64_enc_call_epilog );
14213 
14214   ins_pipe(pipe_class_call);
14215 %}
14216 
14217 // TO HERE
14218 
14219 // Call Java Dynamic Instruction
14220 instruct CallDynamicJavaDirect(method meth)
14221 %{
14222   match(CallDynamicJava);
14223 
14224   effect(USE meth);
14225 
14226   ins_cost(CALL_COST);
14227 
14228   format %{ "CALL,dynamic $meth \t// ==> " %}
14229 
14230   ins_encode( aarch64_enc_java_dynamic_call(meth),
14231                aarch64_enc_call_epilog );
14232 
14233   ins_pipe(pipe_class_call);
14234 %}
14235 
14236 // Call Runtime Instruction
14237 
14238 instruct CallRuntimeDirect(method meth)
14239 %{
14240   match(CallRuntime);
14241 
14242   effect(USE meth);
14243 
14244   ins_cost(CALL_COST);
14245 
14246   format %{ "CALL, runtime $meth" %}
14247 
14248   ins_encode( aarch64_enc_java_to_runtime(meth) );
14249 
14250   ins_pipe(pipe_class_call);
14251 %}
14252 
14253 // Call Runtime Instruction
14254 
14255 instruct CallLeafDirect(method meth)
14256 %{
14257   match(CallLeaf);
14258 
14259   effect(USE meth);
14260 
14261   ins_cost(CALL_COST);
14262 
14263   format %{ "CALL, runtime leaf $meth" %}
14264 
14265   ins_encode( aarch64_enc_java_to_runtime(meth) );
14266 
14267   ins_pipe(pipe_class_call);
14268 %}
14269 
14270 // Call Runtime Instruction
14271 
14272 instruct CallLeafNoFPDirect(method meth)
14273 %{
14274   match(CallLeafNoFP);
14275 
14276   effect(USE meth);
14277 
14278   ins_cost(CALL_COST);
14279 
14280   format %{ "CALL, runtime leaf nofp $meth" %}
14281 
14282   ins_encode( aarch64_enc_java_to_runtime(meth) );
14283 
14284   ins_pipe(pipe_class_call);
14285 %}
14286 
14287 // Tail Call; Jump from runtime stub to Java code.
14288 // Also known as an 'interprocedural jump'.
14289 // Target of jump will eventually return to caller.
14290 // TailJump below removes the return address.
14291 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14292 %{
14293   match(TailCall jump_target method_oop);
14294 
14295   ins_cost(CALL_COST);
14296 
14297   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14298 
14299   ins_encode(aarch64_enc_tail_call(jump_target));
14300 
14301   ins_pipe(pipe_class_call);
14302 %}
14303 
14304 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14305 %{
14306   match(TailJump jump_target ex_oop);
14307 
14308   ins_cost(CALL_COST);
14309 
14310   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14311 
14312   ins_encode(aarch64_enc_tail_jmp(jump_target));
14313 
14314   ins_pipe(pipe_class_call);
14315 %}
14316 
14317 // Create exception oop: created by stack-crawling runtime code.
14318 // Created exception is now available to this handler, and is setup
14319 // just prior to jumping to this handler. No code emitted.
14320 // TODO check
14321 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14322 instruct CreateException(iRegP_R0 ex_oop)
14323 %{
14324   match(Set ex_oop (CreateEx));
14325 
14326   format %{ " -- \t// exception oop; no code emitted" %}
14327 
14328   size(0);
14329 
14330   ins_encode( /*empty*/ );
14331 
14332   ins_pipe(pipe_class_empty);
14333 %}
14334 
14335 // Rethrow exception: The exception oop will come in the first
14336 // argument position. Then JUMP (not call) to the rethrow stub code.
14337 instruct RethrowException() %{
14338   match(Rethrow);
14339   ins_cost(CALL_COST);
14340 
14341   format %{ "b rethrow_stub" %}
14342 
14343   ins_encode( aarch64_enc_rethrow() );
14344 
14345   ins_pipe(pipe_class_call);
14346 %}
14347 
14348 
14349 // Return Instruction
14350 // epilog node loads ret address into lr as part of frame pop
14351 instruct Ret()
14352 %{
14353   match(Return);
14354 
14355   format %{ "ret\t// return register" %}
14356 
14357   ins_encode( aarch64_enc_ret() );
14358 
14359   ins_pipe(pipe_branch);
14360 %}
14361 
14362 // Die now.
14363 instruct ShouldNotReachHere() %{
14364   match(Halt);
14365 
14366   ins_cost(CALL_COST);
14367   format %{ "ShouldNotReachHere" %}
14368 
14369   ins_encode %{
14370     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14371     // return true
14372     __ dpcs1(0xdead + 1);
14373   %}
14374 
14375   ins_pipe(pipe_class_default);
14376 %}
14377 
14378 // ============================================================================
14379 // Partial Subtype Check
14380 //
14381 // superklass array for an instance of the superklass.  Set a hidden
14382 // internal cache on a hit (cache is checked with exposed code in
14383 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14384 // encoding ALSO sets flags.
14385 
14386 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14387 %{
14388   match(Set result (PartialSubtypeCheck sub super));
14389   effect(KILL cr, KILL temp);
14390 
14391   ins_cost(1100);  // slightly larger than the next version
14392   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14393 
14394   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14395 
14396   opcode(0x1); // Force zero of result reg on hit
14397 
14398   ins_pipe(pipe_class_memory);
14399 %}
14400 
14401 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14402 %{
14403   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14404   effect(KILL temp, KILL result);
14405 
14406   ins_cost(1100);  // slightly larger than the next version
14407   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14408 
14409   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14410 
14411   opcode(0x0); // Don't zero result reg on hit
14412 
14413   ins_pipe(pipe_class_memory);
14414 %}
14415 
14416 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14417                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14418 %{
14419   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14420   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14421   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14422 
14423   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14424   ins_encode %{
14425     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14426     __ string_compare($str1$$Register, $str2$$Register,
14427                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14428                       $tmp1$$Register, $tmp2$$Register,
14429                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14430   %}
14431   ins_pipe(pipe_class_memory);
14432 %}
14433 
14434 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14435                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14436 %{
14437   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14438   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14439   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14440 
14441   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14442   ins_encode %{
14443     __ string_compare($str1$$Register, $str2$$Register,
14444                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14445                       $tmp1$$Register, $tmp2$$Register,
14446                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14447   %}
14448   ins_pipe(pipe_class_memory);
14449 %}
14450 
14451 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14452                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14453                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14454 %{
14455   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14456   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14457   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14458          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14459 
14460   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14461   ins_encode %{
14462     __ string_compare($str1$$Register, $str2$$Register,
14463                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14464                       $tmp1$$Register, $tmp2$$Register,
14465                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14466                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14467   %}
14468   ins_pipe(pipe_class_memory);
14469 %}
14470 
14471 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14472                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14473                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14474 %{
14475   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14476   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14477   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14478          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14479 
14480   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14481   ins_encode %{
14482     __ string_compare($str1$$Register, $str2$$Register,
14483                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14484                       $tmp1$$Register, $tmp2$$Register,
14485                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14486                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14487   %}
14488   ins_pipe(pipe_class_memory);
14489 %}
14490 
14491 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14492        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14493        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14494 %{
14495   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14496   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14497   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14498          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14499   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14500 
14501   ins_encode %{
14502     __ string_indexof($str1$$Register, $str2$$Register,
14503                       $cnt1$$Register, $cnt2$$Register,
14504                       $tmp1$$Register, $tmp2$$Register,
14505                       $tmp3$$Register, $tmp4$$Register,
14506                       $tmp5$$Register, $tmp6$$Register,
14507                       -1, $result$$Register, StrIntrinsicNode::UU);
14508   %}
14509   ins_pipe(pipe_class_memory);
14510 %}
14511 
14512 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14513        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14514        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14515 %{
14516   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14517   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14518   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14519          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14520   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14521 
14522   ins_encode %{
14523     __ string_indexof($str1$$Register, $str2$$Register,
14524                       $cnt1$$Register, $cnt2$$Register,
14525                       $tmp1$$Register, $tmp2$$Register,
14526                       $tmp3$$Register, $tmp4$$Register,
14527                       $tmp5$$Register, $tmp6$$Register,
14528                       -1, $result$$Register, StrIntrinsicNode::LL);
14529   %}
14530   ins_pipe(pipe_class_memory);
14531 %}
14532 
14533 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14534        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14535        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14536 %{
14537   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14538   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14539   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14540          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14541   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14542 
14543   ins_encode %{
14544     __ string_indexof($str1$$Register, $str2$$Register,
14545                       $cnt1$$Register, $cnt2$$Register,
14546                       $tmp1$$Register, $tmp2$$Register,
14547                       $tmp3$$Register, $tmp4$$Register,
14548                       $tmp5$$Register, $tmp6$$Register,
14549                       -1, $result$$Register, StrIntrinsicNode::UL);
14550   %}
14551   ins_pipe(pipe_class_memory);
14552 %}
14553 
14554 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14555                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14556                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14557 %{
14558   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14559   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14560   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14561          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14562   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14563 
14564   ins_encode %{
14565     int icnt2 = (int)$int_cnt2$$constant;
14566     __ string_indexof($str1$$Register, $str2$$Register,
14567                       $cnt1$$Register, zr,
14568                       $tmp1$$Register, $tmp2$$Register,
14569                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14570                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14571   %}
14572   ins_pipe(pipe_class_memory);
14573 %}
14574 
14575 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14576                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14577                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14578 %{
14579   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14580   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14581   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14582          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14583   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
14584 
14585   ins_encode %{
14586     int icnt2 = (int)$int_cnt2$$constant;
14587     __ string_indexof($str1$$Register, $str2$$Register,
14588                       $cnt1$$Register, zr,
14589                       $tmp1$$Register, $tmp2$$Register,
14590                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14591                       icnt2, $result$$Register, StrIntrinsicNode::LL);
14592   %}
14593   ins_pipe(pipe_class_memory);
14594 %}
14595 
14596 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14597                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14598                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14599 %{
14600   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14601   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14602   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14603          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14604   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
14605 
14606   ins_encode %{
14607     int icnt2 = (int)$int_cnt2$$constant;
14608     __ string_indexof($str1$$Register, $str2$$Register,
14609                       $cnt1$$Register, zr,
14610                       $tmp1$$Register, $tmp2$$Register,
14611                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14612                       icnt2, $result$$Register, StrIntrinsicNode::UL);
14613   %}
14614   ins_pipe(pipe_class_memory);
14615 %}
14616 
14617 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
14618                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14619                               iRegINoSp tmp3, rFlagsReg cr)
14620 %{
14621   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
14622   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
14623          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14624 
14625   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
14626 
14627   ins_encode %{
14628     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
14629                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
14630                            $tmp3$$Register);
14631   %}
14632   ins_pipe(pipe_class_memory);
14633 %}
14634 
14635 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14636                         iRegI_R0 result, rFlagsReg cr)
14637 %{
14638   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
14639   match(Set result (StrEquals (Binary str1 str2) cnt));
14640   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14641 
14642   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14643   ins_encode %{
14644     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14645     __ string_equals($str1$$Register, $str2$$Register,
14646                      $result$$Register, $cnt$$Register, 1);
14647   %}
14648   ins_pipe(pipe_class_memory);
14649 %}
14650 
14651 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14652                         iRegI_R0 result, rFlagsReg cr)
14653 %{
14654   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
14655   match(Set result (StrEquals (Binary str1 str2) cnt));
14656   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14657 
14658   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14659   ins_encode %{
14660     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14661     __ string_equals($str1$$Register, $str2$$Register,
14662                      $result$$Register, $cnt$$Register, 2);
14663   %}
14664   ins_pipe(pipe_class_memory);
14665 %}
14666 
14667 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14668                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
14669                        iRegP_R10 tmp, rFlagsReg cr)
14670 %{
14671   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14672   match(Set result (AryEq ary1 ary2));
14673   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14674 
14675   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14676   ins_encode %{
14677     __ arrays_equals($ary1$$Register, $ary2$$Register,
14678                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
14679                      $result$$Register, $tmp$$Register, 1);
14680     %}
14681   ins_pipe(pipe_class_memory);
14682 %}
14683 
14684 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14685                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
14686                        iRegP_R10 tmp, rFlagsReg cr)
14687 %{
14688   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14689   match(Set result (AryEq ary1 ary2));
14690   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14691 
14692   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14693   ins_encode %{
14694     __ arrays_equals($ary1$$Register, $ary2$$Register,
14695                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
14696                      $result$$Register, $tmp$$Register, 2);
14697   %}
14698   ins_pipe(pipe_class_memory);
14699 %}
14700 
14701 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
14702 %{
14703   match(Set result (HasNegatives ary1 len));
14704   effect(USE_KILL ary1, USE_KILL len, KILL cr);
14705   format %{ "has negatives byte[] $ary1,$len -> $result" %}
14706   ins_encode %{
14707     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
14708   %}
14709   ins_pipe( pipe_slow );
14710 %}
14711 
14712 // fast char[] to byte[] compression
14713 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14714                          vRegD_V0 tmp1, vRegD_V1 tmp2,
14715                          vRegD_V2 tmp3, vRegD_V3 tmp4,
14716                          iRegI_R0 result, rFlagsReg cr)
14717 %{
14718   match(Set result (StrCompressedCopy src (Binary dst len)));
14719   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
14720 
14721   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
14722   ins_encode %{
14723     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
14724                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
14725                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
14726                            $result$$Register);
14727   %}
14728   ins_pipe( pipe_slow );
14729 %}
14730 
14731 // fast byte[] to char[] inflation
14732 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
14733                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
14734 %{
14735   match(Set dummy (StrInflatedCopy src (Binary dst len)));
14736   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
14737 
14738   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
14739   ins_encode %{
14740     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
14741                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
14742   %}
14743   ins_pipe(pipe_class_memory);
14744 %}
14745 
14746 // encode char[] to byte[] in ISO_8859_1
14747 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14748                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14749                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14750                           iRegI_R0 result, rFlagsReg cr)
14751 %{
14752   match(Set result (EncodeISOArray src (Binary dst len)));
14753   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14754          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14755 
14756   format %{ "Encode array $src,$dst,$len -> $result" %}
14757   ins_encode %{
14758     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14759          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14760          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14761   %}
14762   ins_pipe( pipe_class_memory );
14763 %}
14764 
14765 // ============================================================================
14766 // This name is KNOWN by the ADLC and cannot be changed.
14767 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14768 // for this guy.
14769 instruct tlsLoadP(thread_RegP dst)
14770 %{
14771   match(Set dst (ThreadLocal));
14772 
14773   ins_cost(0);
14774 
14775   format %{ " -- \t// $dst=Thread::current(), empty" %}
14776 
14777   size(0);
14778 
14779   ins_encode( /*empty*/ );
14780 
14781   ins_pipe(pipe_class_empty);
14782 %}
14783 
14784 // ====================VECTOR INSTRUCTIONS=====================================
14785 
14786 // Load vector (32 bits)
14787 instruct loadV4(vecD dst, vmem4 mem)
14788 %{
14789   predicate(n->as_LoadVector()->memory_size() == 4);
14790   match(Set dst (LoadVector mem));
14791   ins_cost(4 * INSN_COST);
14792   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14793   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14794   ins_pipe(vload_reg_mem64);
14795 %}
14796 
14797 // Load vector (64 bits)
14798 instruct loadV8(vecD dst, vmem8 mem)
14799 %{
14800   predicate(n->as_LoadVector()->memory_size() == 8);
14801   match(Set dst (LoadVector mem));
14802   ins_cost(4 * INSN_COST);
14803   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14804   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14805   ins_pipe(vload_reg_mem64);
14806 %}
14807 
14808 // Load Vector (128 bits)
14809 instruct loadV16(vecX dst, vmem16 mem)
14810 %{
14811   predicate(n->as_LoadVector()->memory_size() == 16);
14812   match(Set dst (LoadVector mem));
14813   ins_cost(4 * INSN_COST);
14814   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14815   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14816   ins_pipe(vload_reg_mem128);
14817 %}
14818 
14819 // Store Vector (32 bits)
14820 instruct storeV4(vecD src, vmem4 mem)
14821 %{
14822   predicate(n->as_StoreVector()->memory_size() == 4);
14823   match(Set mem (StoreVector mem src));
14824   ins_cost(4 * INSN_COST);
14825   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14826   ins_encode( aarch64_enc_strvS(src, mem) );
14827   ins_pipe(vstore_reg_mem64);
14828 %}
14829 
14830 // Store Vector (64 bits)
14831 instruct storeV8(vecD src, vmem8 mem)
14832 %{
14833   predicate(n->as_StoreVector()->memory_size() == 8);
14834   match(Set mem (StoreVector mem src));
14835   ins_cost(4 * INSN_COST);
14836   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14837   ins_encode( aarch64_enc_strvD(src, mem) );
14838   ins_pipe(vstore_reg_mem64);
14839 %}
14840 
14841 // Store Vector (128 bits)
14842 instruct storeV16(vecX src, vmem16 mem)
14843 %{
14844   predicate(n->as_StoreVector()->memory_size() == 16);
14845   match(Set mem (StoreVector mem src));
14846   ins_cost(4 * INSN_COST);
14847   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14848   ins_encode( aarch64_enc_strvQ(src, mem) );
14849   ins_pipe(vstore_reg_mem128);
14850 %}
14851 
14852 instruct replicate8B(vecD dst, iRegIorL2I src)
14853 %{
14854   predicate(n->as_Vector()->length() == 4 ||
14855             n->as_Vector()->length() == 8);
14856   match(Set dst (ReplicateB src));
14857   ins_cost(INSN_COST);
14858   format %{ "dup  $dst, $src\t# vector (8B)" %}
14859   ins_encode %{
14860     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14861   %}
14862   ins_pipe(vdup_reg_reg64);
14863 %}
14864 
14865 instruct replicate16B(vecX dst, iRegIorL2I src)
14866 %{
14867   predicate(n->as_Vector()->length() == 16);
14868   match(Set dst (ReplicateB src));
14869   ins_cost(INSN_COST);
14870   format %{ "dup  $dst, $src\t# vector (16B)" %}
14871   ins_encode %{
14872     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14873   %}
14874   ins_pipe(vdup_reg_reg128);
14875 %}
14876 
14877 instruct replicate8B_imm(vecD dst, immI con)
14878 %{
14879   predicate(n->as_Vector()->length() == 4 ||
14880             n->as_Vector()->length() == 8);
14881   match(Set dst (ReplicateB con));
14882   ins_cost(INSN_COST);
14883   format %{ "movi  $dst, $con\t# vector(8B)" %}
14884   ins_encode %{
14885     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14886   %}
14887   ins_pipe(vmovi_reg_imm64);
14888 %}
14889 
14890 instruct replicate16B_imm(vecX dst, immI con)
14891 %{
14892   predicate(n->as_Vector()->length() == 16);
14893   match(Set dst (ReplicateB con));
14894   ins_cost(INSN_COST);
14895   format %{ "movi  $dst, $con\t# vector(16B)" %}
14896   ins_encode %{
14897     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14898   %}
14899   ins_pipe(vmovi_reg_imm128);
14900 %}
14901 
14902 instruct replicate4S(vecD dst, iRegIorL2I src)
14903 %{
14904   predicate(n->as_Vector()->length() == 2 ||
14905             n->as_Vector()->length() == 4);
14906   match(Set dst (ReplicateS src));
14907   ins_cost(INSN_COST);
14908   format %{ "dup  $dst, $src\t# vector (4S)" %}
14909   ins_encode %{
14910     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14911   %}
14912   ins_pipe(vdup_reg_reg64);
14913 %}
14914 
14915 instruct replicate8S(vecX dst, iRegIorL2I src)
14916 %{
14917   predicate(n->as_Vector()->length() == 8);
14918   match(Set dst (ReplicateS src));
14919   ins_cost(INSN_COST);
14920   format %{ "dup  $dst, $src\t# vector (8S)" %}
14921   ins_encode %{
14922     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14923   %}
14924   ins_pipe(vdup_reg_reg128);
14925 %}
14926 
14927 instruct replicate4S_imm(vecD dst, immI con)
14928 %{
14929   predicate(n->as_Vector()->length() == 2 ||
14930             n->as_Vector()->length() == 4);
14931   match(Set dst (ReplicateS con));
14932   ins_cost(INSN_COST);
14933   format %{ "movi  $dst, $con\t# vector(4H)" %}
14934   ins_encode %{
14935     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14936   %}
14937   ins_pipe(vmovi_reg_imm64);
14938 %}
14939 
14940 instruct replicate8S_imm(vecX dst, immI con)
14941 %{
14942   predicate(n->as_Vector()->length() == 8);
14943   match(Set dst (ReplicateS con));
14944   ins_cost(INSN_COST);
14945   format %{ "movi  $dst, $con\t# vector(8H)" %}
14946   ins_encode %{
14947     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14948   %}
14949   ins_pipe(vmovi_reg_imm128);
14950 %}
14951 
14952 instruct replicate2I(vecD dst, iRegIorL2I src)
14953 %{
14954   predicate(n->as_Vector()->length() == 2);
14955   match(Set dst (ReplicateI src));
14956   ins_cost(INSN_COST);
14957   format %{ "dup  $dst, $src\t# vector (2I)" %}
14958   ins_encode %{
14959     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14960   %}
14961   ins_pipe(vdup_reg_reg64);
14962 %}
14963 
14964 instruct replicate4I(vecX dst, iRegIorL2I src)
14965 %{
14966   predicate(n->as_Vector()->length() == 4);
14967   match(Set dst (ReplicateI src));
14968   ins_cost(INSN_COST);
14969   format %{ "dup  $dst, $src\t# vector (4I)" %}
14970   ins_encode %{
14971     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14972   %}
14973   ins_pipe(vdup_reg_reg128);
14974 %}
14975 
14976 instruct replicate2I_imm(vecD dst, immI con)
14977 %{
14978   predicate(n->as_Vector()->length() == 2);
14979   match(Set dst (ReplicateI con));
14980   ins_cost(INSN_COST);
14981   format %{ "movi  $dst, $con\t# vector(2I)" %}
14982   ins_encode %{
14983     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14984   %}
14985   ins_pipe(vmovi_reg_imm64);
14986 %}
14987 
14988 instruct replicate4I_imm(vecX dst, immI con)
14989 %{
14990   predicate(n->as_Vector()->length() == 4);
14991   match(Set dst (ReplicateI con));
14992   ins_cost(INSN_COST);
14993   format %{ "movi  $dst, $con\t# vector(4I)" %}
14994   ins_encode %{
14995     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14996   %}
14997   ins_pipe(vmovi_reg_imm128);
14998 %}
14999 
15000 instruct replicate2L(vecX dst, iRegL src)
15001 %{
15002   predicate(n->as_Vector()->length() == 2);
15003   match(Set dst (ReplicateL src));
15004   ins_cost(INSN_COST);
15005   format %{ "dup  $dst, $src\t# vector (2L)" %}
15006   ins_encode %{
15007     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15008   %}
15009   ins_pipe(vdup_reg_reg128);
15010 %}
15011 
15012 instruct replicate2L_zero(vecX dst, immI0 zero)
15013 %{
15014   predicate(n->as_Vector()->length() == 2);
15015   match(Set dst (ReplicateI zero));
15016   ins_cost(INSN_COST);
15017   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15018   ins_encode %{
15019     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15020            as_FloatRegister($dst$$reg),
15021            as_FloatRegister($dst$$reg));
15022   %}
15023   ins_pipe(vmovi_reg_imm128);
15024 %}
15025 
15026 instruct replicate2F(vecD dst, vRegF src)
15027 %{
15028   predicate(n->as_Vector()->length() == 2);
15029   match(Set dst (ReplicateF src));
15030   ins_cost(INSN_COST);
15031   format %{ "dup  $dst, $src\t# vector (2F)" %}
15032   ins_encode %{
15033     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15034            as_FloatRegister($src$$reg));
15035   %}
15036   ins_pipe(vdup_reg_freg64);
15037 %}
15038 
15039 instruct replicate4F(vecX dst, vRegF src)
15040 %{
15041   predicate(n->as_Vector()->length() == 4);
15042   match(Set dst (ReplicateF src));
15043   ins_cost(INSN_COST);
15044   format %{ "dup  $dst, $src\t# vector (4F)" %}
15045   ins_encode %{
15046     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15047            as_FloatRegister($src$$reg));
15048   %}
15049   ins_pipe(vdup_reg_freg128);
15050 %}
15051 
15052 instruct replicate2D(vecX dst, vRegD src)
15053 %{
15054   predicate(n->as_Vector()->length() == 2);
15055   match(Set dst (ReplicateD src));
15056   ins_cost(INSN_COST);
15057   format %{ "dup  $dst, $src\t# vector (2D)" %}
15058   ins_encode %{
15059     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15060            as_FloatRegister($src$$reg));
15061   %}
15062   ins_pipe(vdup_reg_dreg128);
15063 %}
15064 
15065 // ====================REDUCTION ARITHMETIC====================================
15066 
15067 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15068 %{
15069   match(Set dst (AddReductionVI src1 src2));
15070   ins_cost(INSN_COST);
15071   effect(TEMP tmp, TEMP tmp2);
15072   format %{ "umov  $tmp, $src2, S, 0\n\t"
15073             "umov  $tmp2, $src2, S, 1\n\t"
15074             "addw  $dst, $src1, $tmp\n\t"
15075             "addw  $dst, $dst, $tmp2\t add reduction2i"
15076   %}
15077   ins_encode %{
15078     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15079     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15080     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15081     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15082   %}
15083   ins_pipe(pipe_class_default);
15084 %}
15085 
15086 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15087 %{
15088   match(Set dst (AddReductionVI src1 src2));
15089   ins_cost(INSN_COST);
15090   effect(TEMP tmp, TEMP tmp2);
15091   format %{ "addv  $tmp, T4S, $src2\n\t"
15092             "umov  $tmp2, $tmp, S, 0\n\t"
15093             "addw  $dst, $tmp2, $src1\t add reduction4i"
15094   %}
15095   ins_encode %{
15096     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15097             as_FloatRegister($src2$$reg));
15098     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15099     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15100   %}
15101   ins_pipe(pipe_class_default);
15102 %}
15103 
15104 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15105 %{
15106   match(Set dst (MulReductionVI src1 src2));
15107   ins_cost(INSN_COST);
15108   effect(TEMP tmp, TEMP dst);
15109   format %{ "umov  $tmp, $src2, S, 0\n\t"
15110             "mul   $dst, $tmp, $src1\n\t"
15111             "umov  $tmp, $src2, S, 1\n\t"
15112             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15113   %}
15114   ins_encode %{
15115     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15116     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15117     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15118     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15119   %}
15120   ins_pipe(pipe_class_default);
15121 %}
15122 
15123 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15124 %{
15125   match(Set dst (MulReductionVI src1 src2));
15126   ins_cost(INSN_COST);
15127   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15128   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15129             "mul   $tmp, $tmp, $src2\n\t"
15130             "umov  $tmp2, $tmp, S, 0\n\t"
15131             "mul   $dst, $tmp2, $src1\n\t"
15132             "umov  $tmp2, $tmp, S, 1\n\t"
15133             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15134   %}
15135   ins_encode %{
15136     __ ins(as_FloatRegister($tmp$$reg), __ D,
15137            as_FloatRegister($src2$$reg), 0, 1);
15138     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15139            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15140     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15141     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15142     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15143     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15144   %}
15145   ins_pipe(pipe_class_default);
15146 %}
15147 
15148 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15149 %{
15150   match(Set dst (AddReductionVF src1 src2));
15151   ins_cost(INSN_COST);
15152   effect(TEMP tmp, TEMP dst);
15153   format %{ "fadds $dst, $src1, $src2\n\t"
15154             "ins   $tmp, S, $src2, 0, 1\n\t"
15155             "fadds $dst, $dst, $tmp\t add reduction2f"
15156   %}
15157   ins_encode %{
15158     __ fadds(as_FloatRegister($dst$$reg),
15159              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15160     __ ins(as_FloatRegister($tmp$$reg), __ S,
15161            as_FloatRegister($src2$$reg), 0, 1);
15162     __ fadds(as_FloatRegister($dst$$reg),
15163              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15164   %}
15165   ins_pipe(pipe_class_default);
15166 %}
15167 
15168 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15169 %{
15170   match(Set dst (AddReductionVF src1 src2));
15171   ins_cost(INSN_COST);
15172   effect(TEMP tmp, TEMP dst);
15173   format %{ "fadds $dst, $src1, $src2\n\t"
15174             "ins   $tmp, S, $src2, 0, 1\n\t"
15175             "fadds $dst, $dst, $tmp\n\t"
15176             "ins   $tmp, S, $src2, 0, 2\n\t"
15177             "fadds $dst, $dst, $tmp\n\t"
15178             "ins   $tmp, S, $src2, 0, 3\n\t"
15179             "fadds $dst, $dst, $tmp\t add reduction4f"
15180   %}
15181   ins_encode %{
15182     __ fadds(as_FloatRegister($dst$$reg),
15183              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15184     __ ins(as_FloatRegister($tmp$$reg), __ S,
15185            as_FloatRegister($src2$$reg), 0, 1);
15186     __ fadds(as_FloatRegister($dst$$reg),
15187              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15188     __ ins(as_FloatRegister($tmp$$reg), __ S,
15189            as_FloatRegister($src2$$reg), 0, 2);
15190     __ fadds(as_FloatRegister($dst$$reg),
15191              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15192     __ ins(as_FloatRegister($tmp$$reg), __ S,
15193            as_FloatRegister($src2$$reg), 0, 3);
15194     __ fadds(as_FloatRegister($dst$$reg),
15195              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15196   %}
15197   ins_pipe(pipe_class_default);
15198 %}
15199 
15200 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15201 %{
15202   match(Set dst (MulReductionVF src1 src2));
15203   ins_cost(INSN_COST);
15204   effect(TEMP tmp, TEMP dst);
15205   format %{ "fmuls $dst, $src1, $src2\n\t"
15206             "ins   $tmp, S, $src2, 0, 1\n\t"
15207             "fmuls $dst, $dst, $tmp\t add reduction4f"
15208   %}
15209   ins_encode %{
15210     __ fmuls(as_FloatRegister($dst$$reg),
15211              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15212     __ ins(as_FloatRegister($tmp$$reg), __ S,
15213            as_FloatRegister($src2$$reg), 0, 1);
15214     __ fmuls(as_FloatRegister($dst$$reg),
15215              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15216   %}
15217   ins_pipe(pipe_class_default);
15218 %}
15219 
15220 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15221 %{
15222   match(Set dst (MulReductionVF src1 src2));
15223   ins_cost(INSN_COST);
15224   effect(TEMP tmp, TEMP dst);
15225   format %{ "fmuls $dst, $src1, $src2\n\t"
15226             "ins   $tmp, S, $src2, 0, 1\n\t"
15227             "fmuls $dst, $dst, $tmp\n\t"
15228             "ins   $tmp, S, $src2, 0, 2\n\t"
15229             "fmuls $dst, $dst, $tmp\n\t"
15230             "ins   $tmp, S, $src2, 0, 3\n\t"
15231             "fmuls $dst, $dst, $tmp\t add reduction4f"
15232   %}
15233   ins_encode %{
15234     __ fmuls(as_FloatRegister($dst$$reg),
15235              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15236     __ ins(as_FloatRegister($tmp$$reg), __ S,
15237            as_FloatRegister($src2$$reg), 0, 1);
15238     __ fmuls(as_FloatRegister($dst$$reg),
15239              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15240     __ ins(as_FloatRegister($tmp$$reg), __ S,
15241            as_FloatRegister($src2$$reg), 0, 2);
15242     __ fmuls(as_FloatRegister($dst$$reg),
15243              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15244     __ ins(as_FloatRegister($tmp$$reg), __ S,
15245            as_FloatRegister($src2$$reg), 0, 3);
15246     __ fmuls(as_FloatRegister($dst$$reg),
15247              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15248   %}
15249   ins_pipe(pipe_class_default);
15250 %}
15251 
15252 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15253 %{
15254   match(Set dst (AddReductionVD src1 src2));
15255   ins_cost(INSN_COST);
15256   effect(TEMP tmp, TEMP dst);
15257   format %{ "faddd $dst, $src1, $src2\n\t"
15258             "ins   $tmp, D, $src2, 0, 1\n\t"
15259             "faddd $dst, $dst, $tmp\t add reduction2d"
15260   %}
15261   ins_encode %{
15262     __ faddd(as_FloatRegister($dst$$reg),
15263              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15264     __ ins(as_FloatRegister($tmp$$reg), __ D,
15265            as_FloatRegister($src2$$reg), 0, 1);
15266     __ faddd(as_FloatRegister($dst$$reg),
15267              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15268   %}
15269   ins_pipe(pipe_class_default);
15270 %}
15271 
15272 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15273 %{
15274   match(Set dst (MulReductionVD src1 src2));
15275   ins_cost(INSN_COST);
15276   effect(TEMP tmp, TEMP dst);
15277   format %{ "fmuld $dst, $src1, $src2\n\t"
15278             "ins   $tmp, D, $src2, 0, 1\n\t"
15279             "fmuld $dst, $dst, $tmp\t add reduction2d"
15280   %}
15281   ins_encode %{
15282     __ fmuld(as_FloatRegister($dst$$reg),
15283              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15284     __ ins(as_FloatRegister($tmp$$reg), __ D,
15285            as_FloatRegister($src2$$reg), 0, 1);
15286     __ fmuld(as_FloatRegister($dst$$reg),
15287              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15288   %}
15289   ins_pipe(pipe_class_default);
15290 %}
15291 
15292 // ====================VECTOR ARITHMETIC=======================================
15293 
15294 // --------------------------------- ADD --------------------------------------
15295 
15296 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15297 %{
15298   predicate(n->as_Vector()->length() == 4 ||
15299             n->as_Vector()->length() == 8);
15300   match(Set dst (AddVB src1 src2));
15301   ins_cost(INSN_COST);
15302   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15303   ins_encode %{
15304     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15305             as_FloatRegister($src1$$reg),
15306             as_FloatRegister($src2$$reg));
15307   %}
15308   ins_pipe(vdop64);
15309 %}
15310 
15311 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15312 %{
15313   predicate(n->as_Vector()->length() == 16);
15314   match(Set dst (AddVB src1 src2));
15315   ins_cost(INSN_COST);
15316   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15317   ins_encode %{
15318     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15319             as_FloatRegister($src1$$reg),
15320             as_FloatRegister($src2$$reg));
15321   %}
15322   ins_pipe(vdop128);
15323 %}
15324 
15325 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15326 %{
15327   predicate(n->as_Vector()->length() == 2 ||
15328             n->as_Vector()->length() == 4);
15329   match(Set dst (AddVS src1 src2));
15330   ins_cost(INSN_COST);
15331   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15332   ins_encode %{
15333     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15334             as_FloatRegister($src1$$reg),
15335             as_FloatRegister($src2$$reg));
15336   %}
15337   ins_pipe(vdop64);
15338 %}
15339 
15340 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15341 %{
15342   predicate(n->as_Vector()->length() == 8);
15343   match(Set dst (AddVS src1 src2));
15344   ins_cost(INSN_COST);
15345   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15346   ins_encode %{
15347     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15348             as_FloatRegister($src1$$reg),
15349             as_FloatRegister($src2$$reg));
15350   %}
15351   ins_pipe(vdop128);
15352 %}
15353 
15354 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15355 %{
15356   predicate(n->as_Vector()->length() == 2);
15357   match(Set dst (AddVI src1 src2));
15358   ins_cost(INSN_COST);
15359   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15360   ins_encode %{
15361     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15362             as_FloatRegister($src1$$reg),
15363             as_FloatRegister($src2$$reg));
15364   %}
15365   ins_pipe(vdop64);
15366 %}
15367 
15368 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15369 %{
15370   predicate(n->as_Vector()->length() == 4);
15371   match(Set dst (AddVI src1 src2));
15372   ins_cost(INSN_COST);
15373   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15374   ins_encode %{
15375     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15376             as_FloatRegister($src1$$reg),
15377             as_FloatRegister($src2$$reg));
15378   %}
15379   ins_pipe(vdop128);
15380 %}
15381 
15382 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15383 %{
15384   predicate(n->as_Vector()->length() == 2);
15385   match(Set dst (AddVL src1 src2));
15386   ins_cost(INSN_COST);
15387   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15388   ins_encode %{
15389     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15390             as_FloatRegister($src1$$reg),
15391             as_FloatRegister($src2$$reg));
15392   %}
15393   ins_pipe(vdop128);
15394 %}
15395 
15396 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15397 %{
15398   predicate(n->as_Vector()->length() == 2);
15399   match(Set dst (AddVF src1 src2));
15400   ins_cost(INSN_COST);
15401   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15402   ins_encode %{
15403     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15404             as_FloatRegister($src1$$reg),
15405             as_FloatRegister($src2$$reg));
15406   %}
15407   ins_pipe(vdop_fp64);
15408 %}
15409 
15410 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15411 %{
15412   predicate(n->as_Vector()->length() == 4);
15413   match(Set dst (AddVF src1 src2));
15414   ins_cost(INSN_COST);
15415   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15416   ins_encode %{
15417     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15418             as_FloatRegister($src1$$reg),
15419             as_FloatRegister($src2$$reg));
15420   %}
15421   ins_pipe(vdop_fp128);
15422 %}
15423 
15424 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15425 %{
15426   match(Set dst (AddVD src1 src2));
15427   ins_cost(INSN_COST);
15428   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15429   ins_encode %{
15430     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15431             as_FloatRegister($src1$$reg),
15432             as_FloatRegister($src2$$reg));
15433   %}
15434   ins_pipe(vdop_fp128);
15435 %}
15436 
15437 // --------------------------------- SUB --------------------------------------
15438 
15439 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15440 %{
15441   predicate(n->as_Vector()->length() == 4 ||
15442             n->as_Vector()->length() == 8);
15443   match(Set dst (SubVB src1 src2));
15444   ins_cost(INSN_COST);
15445   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15446   ins_encode %{
15447     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15448             as_FloatRegister($src1$$reg),
15449             as_FloatRegister($src2$$reg));
15450   %}
15451   ins_pipe(vdop64);
15452 %}
15453 
15454 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15455 %{
15456   predicate(n->as_Vector()->length() == 16);
15457   match(Set dst (SubVB src1 src2));
15458   ins_cost(INSN_COST);
15459   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15460   ins_encode %{
15461     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15462             as_FloatRegister($src1$$reg),
15463             as_FloatRegister($src2$$reg));
15464   %}
15465   ins_pipe(vdop128);
15466 %}
15467 
15468 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15469 %{
15470   predicate(n->as_Vector()->length() == 2 ||
15471             n->as_Vector()->length() == 4);
15472   match(Set dst (SubVS src1 src2));
15473   ins_cost(INSN_COST);
15474   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15475   ins_encode %{
15476     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15477             as_FloatRegister($src1$$reg),
15478             as_FloatRegister($src2$$reg));
15479   %}
15480   ins_pipe(vdop64);
15481 %}
15482 
15483 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15484 %{
15485   predicate(n->as_Vector()->length() == 8);
15486   match(Set dst (SubVS src1 src2));
15487   ins_cost(INSN_COST);
15488   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15489   ins_encode %{
15490     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15491             as_FloatRegister($src1$$reg),
15492             as_FloatRegister($src2$$reg));
15493   %}
15494   ins_pipe(vdop128);
15495 %}
15496 
15497 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15498 %{
15499   predicate(n->as_Vector()->length() == 2);
15500   match(Set dst (SubVI src1 src2));
15501   ins_cost(INSN_COST);
15502   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15503   ins_encode %{
15504     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15505             as_FloatRegister($src1$$reg),
15506             as_FloatRegister($src2$$reg));
15507   %}
15508   ins_pipe(vdop64);
15509 %}
15510 
15511 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15512 %{
15513   predicate(n->as_Vector()->length() == 4);
15514   match(Set dst (SubVI src1 src2));
15515   ins_cost(INSN_COST);
15516   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15517   ins_encode %{
15518     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15519             as_FloatRegister($src1$$reg),
15520             as_FloatRegister($src2$$reg));
15521   %}
15522   ins_pipe(vdop128);
15523 %}
15524 
15525 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15526 %{
15527   predicate(n->as_Vector()->length() == 2);
15528   match(Set dst (SubVL src1 src2));
15529   ins_cost(INSN_COST);
15530   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15531   ins_encode %{
15532     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15533             as_FloatRegister($src1$$reg),
15534             as_FloatRegister($src2$$reg));
15535   %}
15536   ins_pipe(vdop128);
15537 %}
15538 
15539 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15540 %{
15541   predicate(n->as_Vector()->length() == 2);
15542   match(Set dst (SubVF src1 src2));
15543   ins_cost(INSN_COST);
15544   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15545   ins_encode %{
15546     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15547             as_FloatRegister($src1$$reg),
15548             as_FloatRegister($src2$$reg));
15549   %}
15550   ins_pipe(vdop_fp64);
15551 %}
15552 
15553 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15554 %{
15555   predicate(n->as_Vector()->length() == 4);
15556   match(Set dst (SubVF src1 src2));
15557   ins_cost(INSN_COST);
15558   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15559   ins_encode %{
15560     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15561             as_FloatRegister($src1$$reg),
15562             as_FloatRegister($src2$$reg));
15563   %}
15564   ins_pipe(vdop_fp128);
15565 %}
15566 
15567 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15568 %{
15569   predicate(n->as_Vector()->length() == 2);
15570   match(Set dst (SubVD src1 src2));
15571   ins_cost(INSN_COST);
15572   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15573   ins_encode %{
15574     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15575             as_FloatRegister($src1$$reg),
15576             as_FloatRegister($src2$$reg));
15577   %}
15578   ins_pipe(vdop_fp128);
15579 %}
15580 
15581 // --------------------------------- MUL --------------------------------------
15582 
15583 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15584 %{
15585   predicate(n->as_Vector()->length() == 2 ||
15586             n->as_Vector()->length() == 4);
15587   match(Set dst (MulVS src1 src2));
15588   ins_cost(INSN_COST);
15589   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15590   ins_encode %{
15591     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15592             as_FloatRegister($src1$$reg),
15593             as_FloatRegister($src2$$reg));
15594   %}
15595   ins_pipe(vmul64);
15596 %}
15597 
15598 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15599 %{
15600   predicate(n->as_Vector()->length() == 8);
15601   match(Set dst (MulVS src1 src2));
15602   ins_cost(INSN_COST);
15603   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15604   ins_encode %{
15605     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15606             as_FloatRegister($src1$$reg),
15607             as_FloatRegister($src2$$reg));
15608   %}
15609   ins_pipe(vmul128);
15610 %}
15611 
15612 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15613 %{
15614   predicate(n->as_Vector()->length() == 2);
15615   match(Set dst (MulVI src1 src2));
15616   ins_cost(INSN_COST);
15617   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15618   ins_encode %{
15619     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15620             as_FloatRegister($src1$$reg),
15621             as_FloatRegister($src2$$reg));
15622   %}
15623   ins_pipe(vmul64);
15624 %}
15625 
15626 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15627 %{
15628   predicate(n->as_Vector()->length() == 4);
15629   match(Set dst (MulVI src1 src2));
15630   ins_cost(INSN_COST);
15631   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15632   ins_encode %{
15633     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15634             as_FloatRegister($src1$$reg),
15635             as_FloatRegister($src2$$reg));
15636   %}
15637   ins_pipe(vmul128);
15638 %}
15639 
15640 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15641 %{
15642   predicate(n->as_Vector()->length() == 2);
15643   match(Set dst (MulVF src1 src2));
15644   ins_cost(INSN_COST);
15645   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15646   ins_encode %{
15647     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15648             as_FloatRegister($src1$$reg),
15649             as_FloatRegister($src2$$reg));
15650   %}
15651   ins_pipe(vmuldiv_fp64);
15652 %}
15653 
15654 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15655 %{
15656   predicate(n->as_Vector()->length() == 4);
15657   match(Set dst (MulVF src1 src2));
15658   ins_cost(INSN_COST);
15659   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15660   ins_encode %{
15661     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15662             as_FloatRegister($src1$$reg),
15663             as_FloatRegister($src2$$reg));
15664   %}
15665   ins_pipe(vmuldiv_fp128);
15666 %}
15667 
15668 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15669 %{
15670   predicate(n->as_Vector()->length() == 2);
15671   match(Set dst (MulVD src1 src2));
15672   ins_cost(INSN_COST);
15673   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15674   ins_encode %{
15675     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15676             as_FloatRegister($src1$$reg),
15677             as_FloatRegister($src2$$reg));
15678   %}
15679   ins_pipe(vmuldiv_fp128);
15680 %}
15681 
15682 // --------------------------------- MLA --------------------------------------
15683 
15684 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15685 %{
15686   predicate(n->as_Vector()->length() == 2 ||
15687             n->as_Vector()->length() == 4);
15688   match(Set dst (AddVS dst (MulVS src1 src2)));
15689   ins_cost(INSN_COST);
15690   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15691   ins_encode %{
15692     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15693             as_FloatRegister($src1$$reg),
15694             as_FloatRegister($src2$$reg));
15695   %}
15696   ins_pipe(vmla64);
15697 %}
15698 
15699 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15700 %{
15701   predicate(n->as_Vector()->length() == 8);
15702   match(Set dst (AddVS dst (MulVS src1 src2)));
15703   ins_cost(INSN_COST);
15704   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15705   ins_encode %{
15706     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15707             as_FloatRegister($src1$$reg),
15708             as_FloatRegister($src2$$reg));
15709   %}
15710   ins_pipe(vmla128);
15711 %}
15712 
15713 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15714 %{
15715   predicate(n->as_Vector()->length() == 2);
15716   match(Set dst (AddVI dst (MulVI src1 src2)));
15717   ins_cost(INSN_COST);
15718   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15719   ins_encode %{
15720     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15721             as_FloatRegister($src1$$reg),
15722             as_FloatRegister($src2$$reg));
15723   %}
15724   ins_pipe(vmla64);
15725 %}
15726 
15727 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15728 %{
15729   predicate(n->as_Vector()->length() == 4);
15730   match(Set dst (AddVI dst (MulVI src1 src2)));
15731   ins_cost(INSN_COST);
15732   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15733   ins_encode %{
15734     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15735             as_FloatRegister($src1$$reg),
15736             as_FloatRegister($src2$$reg));
15737   %}
15738   ins_pipe(vmla128);
15739 %}
15740 
15741 // dst + src1 * src2
15742 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
15743   predicate(UseFMA && n->as_Vector()->length() == 2);
15744   match(Set dst (FmaVF  dst (Binary src1 src2)));
15745   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
15746   ins_cost(INSN_COST);
15747   ins_encode %{
15748     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
15749             as_FloatRegister($src1$$reg),
15750             as_FloatRegister($src2$$reg));
15751   %}
15752   ins_pipe(vmuldiv_fp64);
15753 %}
15754 
15755 // dst + src1 * src2
15756 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
15757   predicate(UseFMA && n->as_Vector()->length() == 4);
15758   match(Set dst (FmaVF  dst (Binary src1 src2)));
15759   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
15760   ins_cost(INSN_COST);
15761   ins_encode %{
15762     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
15763             as_FloatRegister($src1$$reg),
15764             as_FloatRegister($src2$$reg));
15765   %}
15766   ins_pipe(vmuldiv_fp128);
15767 %}
15768 
15769 // dst + src1 * src2
15770 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
15771   predicate(UseFMA && n->as_Vector()->length() == 2);
15772   match(Set dst (FmaVD  dst (Binary src1 src2)));
15773   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
15774   ins_cost(INSN_COST);
15775   ins_encode %{
15776     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
15777             as_FloatRegister($src1$$reg),
15778             as_FloatRegister($src2$$reg));
15779   %}
15780   ins_pipe(vmuldiv_fp128);
15781 %}
15782 
15783 // --------------------------------- MLS --------------------------------------
15784 
15785 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15786 %{
15787   predicate(n->as_Vector()->length() == 2 ||
15788             n->as_Vector()->length() == 4);
15789   match(Set dst (SubVS dst (MulVS src1 src2)));
15790   ins_cost(INSN_COST);
15791   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15792   ins_encode %{
15793     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15794             as_FloatRegister($src1$$reg),
15795             as_FloatRegister($src2$$reg));
15796   %}
15797   ins_pipe(vmla64);
15798 %}
15799 
15800 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15801 %{
15802   predicate(n->as_Vector()->length() == 8);
15803   match(Set dst (SubVS dst (MulVS src1 src2)));
15804   ins_cost(INSN_COST);
15805   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15806   ins_encode %{
15807     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15808             as_FloatRegister($src1$$reg),
15809             as_FloatRegister($src2$$reg));
15810   %}
15811   ins_pipe(vmla128);
15812 %}
15813 
15814 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15815 %{
15816   predicate(n->as_Vector()->length() == 2);
15817   match(Set dst (SubVI dst (MulVI src1 src2)));
15818   ins_cost(INSN_COST);
15819   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15820   ins_encode %{
15821     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15822             as_FloatRegister($src1$$reg),
15823             as_FloatRegister($src2$$reg));
15824   %}
15825   ins_pipe(vmla64);
15826 %}
15827 
15828 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15829 %{
15830   predicate(n->as_Vector()->length() == 4);
15831   match(Set dst (SubVI dst (MulVI src1 src2)));
15832   ins_cost(INSN_COST);
15833   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15834   ins_encode %{
15835     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15836             as_FloatRegister($src1$$reg),
15837             as_FloatRegister($src2$$reg));
15838   %}
15839   ins_pipe(vmla128);
15840 %}
15841 
15842 // dst - src1 * src2
15843 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
15844   predicate(UseFMA && n->as_Vector()->length() == 2);
15845   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
15846   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
15847   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
15848   ins_cost(INSN_COST);
15849   ins_encode %{
15850     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
15851             as_FloatRegister($src1$$reg),
15852             as_FloatRegister($src2$$reg));
15853   %}
15854   ins_pipe(vmuldiv_fp64);
15855 %}
15856 
15857 // dst - src1 * src2
15858 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
15859   predicate(UseFMA && n->as_Vector()->length() == 4);
15860   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
15861   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
15862   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
15863   ins_cost(INSN_COST);
15864   ins_encode %{
15865     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
15866             as_FloatRegister($src1$$reg),
15867             as_FloatRegister($src2$$reg));
15868   %}
15869   ins_pipe(vmuldiv_fp128);
15870 %}
15871 
15872 // dst - src1 * src2
15873 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
15874   predicate(UseFMA && n->as_Vector()->length() == 2);
15875   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
15876   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
15877   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
15878   ins_cost(INSN_COST);
15879   ins_encode %{
15880     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
15881             as_FloatRegister($src1$$reg),
15882             as_FloatRegister($src2$$reg));
15883   %}
15884   ins_pipe(vmuldiv_fp128);
15885 %}
15886 
15887 // --------------------------------- DIV --------------------------------------
15888 
15889 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15890 %{
15891   predicate(n->as_Vector()->length() == 2);
15892   match(Set dst (DivVF src1 src2));
15893   ins_cost(INSN_COST);
15894   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15895   ins_encode %{
15896     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15897             as_FloatRegister($src1$$reg),
15898             as_FloatRegister($src2$$reg));
15899   %}
15900   ins_pipe(vmuldiv_fp64);
15901 %}
15902 
15903 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15904 %{
15905   predicate(n->as_Vector()->length() == 4);
15906   match(Set dst (DivVF src1 src2));
15907   ins_cost(INSN_COST);
15908   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15909   ins_encode %{
15910     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15911             as_FloatRegister($src1$$reg),
15912             as_FloatRegister($src2$$reg));
15913   %}
15914   ins_pipe(vmuldiv_fp128);
15915 %}
15916 
15917 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15918 %{
15919   predicate(n->as_Vector()->length() == 2);
15920   match(Set dst (DivVD src1 src2));
15921   ins_cost(INSN_COST);
15922   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15923   ins_encode %{
15924     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15925             as_FloatRegister($src1$$reg),
15926             as_FloatRegister($src2$$reg));
15927   %}
15928   ins_pipe(vmuldiv_fp128);
15929 %}
15930 
15931 // --------------------------------- SQRT -------------------------------------
15932 
15933 instruct vsqrt2D(vecX dst, vecX src)
15934 %{
15935   predicate(n->as_Vector()->length() == 2);
15936   match(Set dst (SqrtVD src));
15937   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15938   ins_encode %{
15939     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15940              as_FloatRegister($src$$reg));
15941   %}
15942   ins_pipe(vsqrt_fp128);
15943 %}
15944 
15945 // --------------------------------- ABS --------------------------------------
15946 
15947 instruct vabs2F(vecD dst, vecD src)
15948 %{
15949   predicate(n->as_Vector()->length() == 2);
15950   match(Set dst (AbsVF src));
15951   ins_cost(INSN_COST * 3);
15952   format %{ "fabs  $dst,$src\t# vector (2S)" %}
15953   ins_encode %{
15954     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
15955             as_FloatRegister($src$$reg));
15956   %}
15957   ins_pipe(vunop_fp64);
15958 %}
15959 
15960 instruct vabs4F(vecX dst, vecX src)
15961 %{
15962   predicate(n->as_Vector()->length() == 4);
15963   match(Set dst (AbsVF src));
15964   ins_cost(INSN_COST * 3);
15965   format %{ "fabs  $dst,$src\t# vector (4S)" %}
15966   ins_encode %{
15967     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
15968             as_FloatRegister($src$$reg));
15969   %}
15970   ins_pipe(vunop_fp128);
15971 %}
15972 
15973 instruct vabs2D(vecX dst, vecX src)
15974 %{
15975   predicate(n->as_Vector()->length() == 2);
15976   match(Set dst (AbsVD src));
15977   ins_cost(INSN_COST * 3);
15978   format %{ "fabs  $dst,$src\t# vector (2D)" %}
15979   ins_encode %{
15980     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
15981             as_FloatRegister($src$$reg));
15982   %}
15983   ins_pipe(vunop_fp128);
15984 %}
15985 
15986 // --------------------------------- NEG --------------------------------------
15987 
15988 instruct vneg2F(vecD dst, vecD src)
15989 %{
15990   predicate(n->as_Vector()->length() == 2);
15991   match(Set dst (NegVF src));
15992   ins_cost(INSN_COST * 3);
15993   format %{ "fneg  $dst,$src\t# vector (2S)" %}
15994   ins_encode %{
15995     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
15996             as_FloatRegister($src$$reg));
15997   %}
15998   ins_pipe(vunop_fp64);
15999 %}
16000 
16001 instruct vneg4F(vecX dst, vecX src)
16002 %{
16003   predicate(n->as_Vector()->length() == 4);
16004   match(Set dst (NegVF src));
16005   ins_cost(INSN_COST * 3);
16006   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16007   ins_encode %{
16008     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16009             as_FloatRegister($src$$reg));
16010   %}
16011   ins_pipe(vunop_fp128);
16012 %}
16013 
16014 instruct vneg2D(vecX dst, vecX src)
16015 %{
16016   predicate(n->as_Vector()->length() == 2);
16017   match(Set dst (NegVD src));
16018   ins_cost(INSN_COST * 3);
16019   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16020   ins_encode %{
16021     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16022             as_FloatRegister($src$$reg));
16023   %}
16024   ins_pipe(vunop_fp128);
16025 %}
16026 
16027 // --------------------------------- AND --------------------------------------
16028 
16029 instruct vand8B(vecD dst, vecD src1, vecD src2)
16030 %{
16031   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16032             n->as_Vector()->length_in_bytes() == 8);
16033   match(Set dst (AndV src1 src2));
16034   ins_cost(INSN_COST);
16035   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16036   ins_encode %{
16037     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16038             as_FloatRegister($src1$$reg),
16039             as_FloatRegister($src2$$reg));
16040   %}
16041   ins_pipe(vlogical64);
16042 %}
16043 
16044 instruct vand16B(vecX dst, vecX src1, vecX src2)
16045 %{
16046   predicate(n->as_Vector()->length_in_bytes() == 16);
16047   match(Set dst (AndV src1 src2));
16048   ins_cost(INSN_COST);
16049   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16050   ins_encode %{
16051     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16052             as_FloatRegister($src1$$reg),
16053             as_FloatRegister($src2$$reg));
16054   %}
16055   ins_pipe(vlogical128);
16056 %}
16057 
16058 // --------------------------------- OR ---------------------------------------
16059 
16060 instruct vor8B(vecD dst, vecD src1, vecD src2)
16061 %{
16062   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16063             n->as_Vector()->length_in_bytes() == 8);
16064   match(Set dst (OrV src1 src2));
16065   ins_cost(INSN_COST);
16066   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16067   ins_encode %{
16068     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16069             as_FloatRegister($src1$$reg),
16070             as_FloatRegister($src2$$reg));
16071   %}
16072   ins_pipe(vlogical64);
16073 %}
16074 
16075 instruct vor16B(vecX dst, vecX src1, vecX src2)
16076 %{
16077   predicate(n->as_Vector()->length_in_bytes() == 16);
16078   match(Set dst (OrV src1 src2));
16079   ins_cost(INSN_COST);
16080   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16081   ins_encode %{
16082     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16083             as_FloatRegister($src1$$reg),
16084             as_FloatRegister($src2$$reg));
16085   %}
16086   ins_pipe(vlogical128);
16087 %}
16088 
16089 // --------------------------------- XOR --------------------------------------
16090 
16091 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16092 %{
16093   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16094             n->as_Vector()->length_in_bytes() == 8);
16095   match(Set dst (XorV src1 src2));
16096   ins_cost(INSN_COST);
16097   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16098   ins_encode %{
16099     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16100             as_FloatRegister($src1$$reg),
16101             as_FloatRegister($src2$$reg));
16102   %}
16103   ins_pipe(vlogical64);
16104 %}
16105 
16106 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16107 %{
16108   predicate(n->as_Vector()->length_in_bytes() == 16);
16109   match(Set dst (XorV src1 src2));
16110   ins_cost(INSN_COST);
16111   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16112   ins_encode %{
16113     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16114             as_FloatRegister($src1$$reg),
16115             as_FloatRegister($src2$$reg));
16116   %}
16117   ins_pipe(vlogical128);
16118 %}
16119 
16120 // ------------------------------ Shift ---------------------------------------
16121 
16122 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16123   match(Set dst (LShiftCntV cnt));
16124   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16125   ins_encode %{
16126     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16127   %}
16128   ins_pipe(vdup_reg_reg128);
16129 %}
16130 
16131 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16132 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16133   match(Set dst (RShiftCntV cnt));
16134   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16135   ins_encode %{
16136     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16137     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16138   %}
16139   ins_pipe(vdup_reg_reg128);
16140 %}
16141 
16142 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16143   predicate(n->as_Vector()->length() == 4 ||
16144             n->as_Vector()->length() == 8);
16145   match(Set dst (LShiftVB src shift));
16146   match(Set dst (RShiftVB src shift));
16147   ins_cost(INSN_COST);
16148   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16149   ins_encode %{
16150     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16151             as_FloatRegister($src$$reg),
16152             as_FloatRegister($shift$$reg));
16153   %}
16154   ins_pipe(vshift64);
16155 %}
16156 
16157 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16158   predicate(n->as_Vector()->length() == 16);
16159   match(Set dst (LShiftVB src shift));
16160   match(Set dst (RShiftVB src shift));
16161   ins_cost(INSN_COST);
16162   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16163   ins_encode %{
16164     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16165             as_FloatRegister($src$$reg),
16166             as_FloatRegister($shift$$reg));
16167   %}
16168   ins_pipe(vshift128);
16169 %}
16170 
16171 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16172   predicate(n->as_Vector()->length() == 4 ||
16173             n->as_Vector()->length() == 8);
16174   match(Set dst (URShiftVB src shift));
16175   ins_cost(INSN_COST);
16176   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16177   ins_encode %{
16178     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16179             as_FloatRegister($src$$reg),
16180             as_FloatRegister($shift$$reg));
16181   %}
16182   ins_pipe(vshift64);
16183 %}
16184 
16185 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16186   predicate(n->as_Vector()->length() == 16);
16187   match(Set dst (URShiftVB src shift));
16188   ins_cost(INSN_COST);
16189   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16190   ins_encode %{
16191     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16192             as_FloatRegister($src$$reg),
16193             as_FloatRegister($shift$$reg));
16194   %}
16195   ins_pipe(vshift128);
16196 %}
16197 
16198 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16199   predicate(n->as_Vector()->length() == 4 ||
16200             n->as_Vector()->length() == 8);
16201   match(Set dst (LShiftVB src shift));
16202   ins_cost(INSN_COST);
16203   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16204   ins_encode %{
16205     int sh = (int)$shift$$constant;
16206     if (sh >= 8) {
16207       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16208              as_FloatRegister($src$$reg),
16209              as_FloatRegister($src$$reg));
16210     } else {
16211       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16212              as_FloatRegister($src$$reg), sh);
16213     }
16214   %}
16215   ins_pipe(vshift64_imm);
16216 %}
16217 
16218 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16219   predicate(n->as_Vector()->length() == 16);
16220   match(Set dst (LShiftVB src shift));
16221   ins_cost(INSN_COST);
16222   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16223   ins_encode %{
16224     int sh = (int)$shift$$constant;
16225     if (sh >= 8) {
16226       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16227              as_FloatRegister($src$$reg),
16228              as_FloatRegister($src$$reg));
16229     } else {
16230       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16231              as_FloatRegister($src$$reg), sh);
16232     }
16233   %}
16234   ins_pipe(vshift128_imm);
16235 %}
16236 
16237 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16238   predicate(n->as_Vector()->length() == 4 ||
16239             n->as_Vector()->length() == 8);
16240   match(Set dst (RShiftVB src shift));
16241   ins_cost(INSN_COST);
16242   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16243   ins_encode %{
16244     int sh = (int)$shift$$constant;
16245     if (sh >= 8) sh = 7;
16246     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16247            as_FloatRegister($src$$reg), sh);
16248   %}
16249   ins_pipe(vshift64_imm);
16250 %}
16251 
16252 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16253   predicate(n->as_Vector()->length() == 16);
16254   match(Set dst (RShiftVB src shift));
16255   ins_cost(INSN_COST);
16256   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16257   ins_encode %{
16258     int sh = (int)$shift$$constant;
16259     if (sh >= 8) sh = 7;
16260     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16261            as_FloatRegister($src$$reg), sh);
16262   %}
16263   ins_pipe(vshift128_imm);
16264 %}
16265 
16266 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16267   predicate(n->as_Vector()->length() == 4 ||
16268             n->as_Vector()->length() == 8);
16269   match(Set dst (URShiftVB src shift));
16270   ins_cost(INSN_COST);
16271   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16272   ins_encode %{
16273     int sh = (int)$shift$$constant;
16274     if (sh >= 8) {
16275       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16276              as_FloatRegister($src$$reg),
16277              as_FloatRegister($src$$reg));
16278     } else {
16279       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16280              as_FloatRegister($src$$reg), sh);
16281     }
16282   %}
16283   ins_pipe(vshift64_imm);
16284 %}
16285 
16286 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16287   predicate(n->as_Vector()->length() == 16);
16288   match(Set dst (URShiftVB src shift));
16289   ins_cost(INSN_COST);
16290   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16291   ins_encode %{
16292     int sh = (int)$shift$$constant;
16293     if (sh >= 8) {
16294       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16295              as_FloatRegister($src$$reg),
16296              as_FloatRegister($src$$reg));
16297     } else {
16298       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16299              as_FloatRegister($src$$reg), sh);
16300     }
16301   %}
16302   ins_pipe(vshift128_imm);
16303 %}
16304 
16305 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16306   predicate(n->as_Vector()->length() == 2 ||
16307             n->as_Vector()->length() == 4);
16308   match(Set dst (LShiftVS src shift));
16309   match(Set dst (RShiftVS src shift));
16310   ins_cost(INSN_COST);
16311   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16312   ins_encode %{
16313     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16314             as_FloatRegister($src$$reg),
16315             as_FloatRegister($shift$$reg));
16316   %}
16317   ins_pipe(vshift64);
16318 %}
16319 
16320 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16321   predicate(n->as_Vector()->length() == 8);
16322   match(Set dst (LShiftVS src shift));
16323   match(Set dst (RShiftVS src shift));
16324   ins_cost(INSN_COST);
16325   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16326   ins_encode %{
16327     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16328             as_FloatRegister($src$$reg),
16329             as_FloatRegister($shift$$reg));
16330   %}
16331   ins_pipe(vshift128);
16332 %}
16333 
16334 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16335   predicate(n->as_Vector()->length() == 2 ||
16336             n->as_Vector()->length() == 4);
16337   match(Set dst (URShiftVS src shift));
16338   ins_cost(INSN_COST);
16339   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16340   ins_encode %{
16341     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16342             as_FloatRegister($src$$reg),
16343             as_FloatRegister($shift$$reg));
16344   %}
16345   ins_pipe(vshift64);
16346 %}
16347 
16348 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16349   predicate(n->as_Vector()->length() == 8);
16350   match(Set dst (URShiftVS src shift));
16351   ins_cost(INSN_COST);
16352   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16353   ins_encode %{
16354     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16355             as_FloatRegister($src$$reg),
16356             as_FloatRegister($shift$$reg));
16357   %}
16358   ins_pipe(vshift128);
16359 %}
16360 
16361 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16362   predicate(n->as_Vector()->length() == 2 ||
16363             n->as_Vector()->length() == 4);
16364   match(Set dst (LShiftVS src shift));
16365   ins_cost(INSN_COST);
16366   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16367   ins_encode %{
16368     int sh = (int)$shift$$constant;
16369     if (sh >= 16) {
16370       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16371              as_FloatRegister($src$$reg),
16372              as_FloatRegister($src$$reg));
16373     } else {
16374       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16375              as_FloatRegister($src$$reg), sh);
16376     }
16377   %}
16378   ins_pipe(vshift64_imm);
16379 %}
16380 
16381 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16382   predicate(n->as_Vector()->length() == 8);
16383   match(Set dst (LShiftVS src shift));
16384   ins_cost(INSN_COST);
16385   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16386   ins_encode %{
16387     int sh = (int)$shift$$constant;
16388     if (sh >= 16) {
16389       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16390              as_FloatRegister($src$$reg),
16391              as_FloatRegister($src$$reg));
16392     } else {
16393       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16394              as_FloatRegister($src$$reg), sh);
16395     }
16396   %}
16397   ins_pipe(vshift128_imm);
16398 %}
16399 
16400 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16401   predicate(n->as_Vector()->length() == 2 ||
16402             n->as_Vector()->length() == 4);
16403   match(Set dst (RShiftVS src shift));
16404   ins_cost(INSN_COST);
16405   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16406   ins_encode %{
16407     int sh = (int)$shift$$constant;
16408     if (sh >= 16) sh = 15;
16409     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16410            as_FloatRegister($src$$reg), sh);
16411   %}
16412   ins_pipe(vshift64_imm);
16413 %}
16414 
16415 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16416   predicate(n->as_Vector()->length() == 8);
16417   match(Set dst (RShiftVS src shift));
16418   ins_cost(INSN_COST);
16419   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16420   ins_encode %{
16421     int sh = (int)$shift$$constant;
16422     if (sh >= 16) sh = 15;
16423     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16424            as_FloatRegister($src$$reg), sh);
16425   %}
16426   ins_pipe(vshift128_imm);
16427 %}
16428 
16429 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16430   predicate(n->as_Vector()->length() == 2 ||
16431             n->as_Vector()->length() == 4);
16432   match(Set dst (URShiftVS src shift));
16433   ins_cost(INSN_COST);
16434   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16435   ins_encode %{
16436     int sh = (int)$shift$$constant;
16437     if (sh >= 16) {
16438       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16439              as_FloatRegister($src$$reg),
16440              as_FloatRegister($src$$reg));
16441     } else {
16442       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16443              as_FloatRegister($src$$reg), sh);
16444     }
16445   %}
16446   ins_pipe(vshift64_imm);
16447 %}
16448 
16449 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16450   predicate(n->as_Vector()->length() == 8);
16451   match(Set dst (URShiftVS src shift));
16452   ins_cost(INSN_COST);
16453   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16454   ins_encode %{
16455     int sh = (int)$shift$$constant;
16456     if (sh >= 16) {
16457       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16458              as_FloatRegister($src$$reg),
16459              as_FloatRegister($src$$reg));
16460     } else {
16461       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16462              as_FloatRegister($src$$reg), sh);
16463     }
16464   %}
16465   ins_pipe(vshift128_imm);
16466 %}
16467 
16468 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16469   predicate(n->as_Vector()->length() == 2);
16470   match(Set dst (LShiftVI src shift));
16471   match(Set dst (RShiftVI src shift));
16472   ins_cost(INSN_COST);
16473   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16474   ins_encode %{
16475     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16476             as_FloatRegister($src$$reg),
16477             as_FloatRegister($shift$$reg));
16478   %}
16479   ins_pipe(vshift64);
16480 %}
16481 
16482 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16483   predicate(n->as_Vector()->length() == 4);
16484   match(Set dst (LShiftVI src shift));
16485   match(Set dst (RShiftVI src shift));
16486   ins_cost(INSN_COST);
16487   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16488   ins_encode %{
16489     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16490             as_FloatRegister($src$$reg),
16491             as_FloatRegister($shift$$reg));
16492   %}
16493   ins_pipe(vshift128);
16494 %}
16495 
16496 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16497   predicate(n->as_Vector()->length() == 2);
16498   match(Set dst (URShiftVI src shift));
16499   ins_cost(INSN_COST);
16500   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16501   ins_encode %{
16502     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16503             as_FloatRegister($src$$reg),
16504             as_FloatRegister($shift$$reg));
16505   %}
16506   ins_pipe(vshift64);
16507 %}
16508 
16509 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16510   predicate(n->as_Vector()->length() == 4);
16511   match(Set dst (URShiftVI src shift));
16512   ins_cost(INSN_COST);
16513   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16514   ins_encode %{
16515     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16516             as_FloatRegister($src$$reg),
16517             as_FloatRegister($shift$$reg));
16518   %}
16519   ins_pipe(vshift128);
16520 %}
16521 
16522 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16523   predicate(n->as_Vector()->length() == 2);
16524   match(Set dst (LShiftVI src shift));
16525   ins_cost(INSN_COST);
16526   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16527   ins_encode %{
16528     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16529            as_FloatRegister($src$$reg),
16530            (int)$shift$$constant);
16531   %}
16532   ins_pipe(vshift64_imm);
16533 %}
16534 
16535 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16536   predicate(n->as_Vector()->length() == 4);
16537   match(Set dst (LShiftVI src shift));
16538   ins_cost(INSN_COST);
16539   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16540   ins_encode %{
16541     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16542            as_FloatRegister($src$$reg),
16543            (int)$shift$$constant);
16544   %}
16545   ins_pipe(vshift128_imm);
16546 %}
16547 
16548 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16549   predicate(n->as_Vector()->length() == 2);
16550   match(Set dst (RShiftVI src shift));
16551   ins_cost(INSN_COST);
16552   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16553   ins_encode %{
16554     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16555             as_FloatRegister($src$$reg),
16556             (int)$shift$$constant);
16557   %}
16558   ins_pipe(vshift64_imm);
16559 %}
16560 
16561 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16562   predicate(n->as_Vector()->length() == 4);
16563   match(Set dst (RShiftVI src shift));
16564   ins_cost(INSN_COST);
16565   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16566   ins_encode %{
16567     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16568             as_FloatRegister($src$$reg),
16569             (int)$shift$$constant);
16570   %}
16571   ins_pipe(vshift128_imm);
16572 %}
16573 
16574 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16575   predicate(n->as_Vector()->length() == 2);
16576   match(Set dst (URShiftVI src shift));
16577   ins_cost(INSN_COST);
16578   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16579   ins_encode %{
16580     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16581             as_FloatRegister($src$$reg),
16582             (int)$shift$$constant);
16583   %}
16584   ins_pipe(vshift64_imm);
16585 %}
16586 
16587 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16588   predicate(n->as_Vector()->length() == 4);
16589   match(Set dst (URShiftVI src shift));
16590   ins_cost(INSN_COST);
16591   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16592   ins_encode %{
16593     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16594             as_FloatRegister($src$$reg),
16595             (int)$shift$$constant);
16596   %}
16597   ins_pipe(vshift128_imm);
16598 %}
16599 
16600 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16601   predicate(n->as_Vector()->length() == 2);
16602   match(Set dst (LShiftVL src shift));
16603   match(Set dst (RShiftVL src shift));
16604   ins_cost(INSN_COST);
16605   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16606   ins_encode %{
16607     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16608             as_FloatRegister($src$$reg),
16609             as_FloatRegister($shift$$reg));
16610   %}
16611   ins_pipe(vshift128);
16612 %}
16613 
16614 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16615   predicate(n->as_Vector()->length() == 2);
16616   match(Set dst (URShiftVL src shift));
16617   ins_cost(INSN_COST);
16618   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16619   ins_encode %{
16620     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16621             as_FloatRegister($src$$reg),
16622             as_FloatRegister($shift$$reg));
16623   %}
16624   ins_pipe(vshift128);
16625 %}
16626 
16627 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16628   predicate(n->as_Vector()->length() == 2);
16629   match(Set dst (LShiftVL src shift));
16630   ins_cost(INSN_COST);
16631   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16632   ins_encode %{
16633     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16634            as_FloatRegister($src$$reg),
16635            (int)$shift$$constant);
16636   %}
16637   ins_pipe(vshift128_imm);
16638 %}
16639 
16640 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16641   predicate(n->as_Vector()->length() == 2);
16642   match(Set dst (RShiftVL src shift));
16643   ins_cost(INSN_COST);
16644   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16645   ins_encode %{
16646     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16647             as_FloatRegister($src$$reg),
16648             (int)$shift$$constant);
16649   %}
16650   ins_pipe(vshift128_imm);
16651 %}
16652 
16653 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16654   predicate(n->as_Vector()->length() == 2);
16655   match(Set dst (URShiftVL src shift));
16656   ins_cost(INSN_COST);
16657   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16658   ins_encode %{
16659     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16660             as_FloatRegister($src$$reg),
16661             (int)$shift$$constant);
16662   %}
16663   ins_pipe(vshift128_imm);
16664 %}
16665 
16666 //----------PEEPHOLE RULES-----------------------------------------------------
16667 // These must follow all instruction definitions as they use the names
16668 // defined in the instructions definitions.
16669 //
16670 // peepmatch ( root_instr_name [preceding_instruction]* );
16671 //
16672 // peepconstraint %{
16673 // (instruction_number.operand_name relational_op instruction_number.operand_name
16674 //  [, ...] );
16675 // // instruction numbers are zero-based using left to right order in peepmatch
16676 //
16677 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16678 // // provide an instruction_number.operand_name for each operand that appears
16679 // // in the replacement instruction's match rule
16680 //
16681 // ---------VM FLAGS---------------------------------------------------------
16682 //
16683 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16684 //
16685 // Each peephole rule is given an identifying number starting with zero and
16686 // increasing by one in the order seen by the parser.  An individual peephole
16687 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16688 // on the command-line.
16689 //
16690 // ---------CURRENT LIMITATIONS----------------------------------------------
16691 //
16692 // Only match adjacent instructions in same basic block
16693 // Only equality constraints
16694 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16695 // Only one replacement instruction
16696 //
16697 // ---------EXAMPLE----------------------------------------------------------
16698 //
16699 // // pertinent parts of existing instructions in architecture description
16700 // instruct movI(iRegINoSp dst, iRegI src)
16701 // %{
16702 //   match(Set dst (CopyI src));
16703 // %}
16704 //
16705 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16706 // %{
16707 //   match(Set dst (AddI dst src));
16708 //   effect(KILL cr);
16709 // %}
16710 //
16711 // // Change (inc mov) to lea
16712 // peephole %{
16713 //   // increment preceeded by register-register move
16714 //   peepmatch ( incI_iReg movI );
16715 //   // require that the destination register of the increment
16716 //   // match the destination register of the move
16717 //   peepconstraint ( 0.dst == 1.dst );
16718 //   // construct a replacement instruction that sets
16719 //   // the destination to ( move's source register + one )
16720 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16721 // %}
16722 //
16723 
16724 // Implementation no longer uses movX instructions since
16725 // machine-independent system no longer uses CopyX nodes.
16726 //
16727 // peephole
16728 // %{
16729 //   peepmatch (incI_iReg movI);
16730 //   peepconstraint (0.dst == 1.dst);
16731 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16732 // %}
16733 
16734 // peephole
16735 // %{
16736 //   peepmatch (decI_iReg movI);
16737 //   peepconstraint (0.dst == 1.dst);
16738 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16739 // %}
16740 
16741 // peephole
16742 // %{
16743 //   peepmatch (addI_iReg_imm movI);
16744 //   peepconstraint (0.dst == 1.dst);
16745 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16746 // %}
16747 
16748 // peephole
16749 // %{
16750 //   peepmatch (incL_iReg movL);
16751 //   peepconstraint (0.dst == 1.dst);
16752 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16753 // %}
16754 
16755 // peephole
16756 // %{
16757 //   peepmatch (decL_iReg movL);
16758 //   peepconstraint (0.dst == 1.dst);
16759 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16760 // %}
16761 
16762 // peephole
16763 // %{
16764 //   peepmatch (addL_iReg_imm movL);
16765 //   peepconstraint (0.dst == 1.dst);
16766 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16767 // %}
16768 
16769 // peephole
16770 // %{
16771 //   peepmatch (addP_iReg_imm movP);
16772 //   peepconstraint (0.dst == 1.dst);
16773 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16774 // %}
16775 
16776 // // Change load of spilled value to only a spill
16777 // instruct storeI(memory mem, iRegI src)
16778 // %{
16779 //   match(Set mem (StoreI mem src));
16780 // %}
16781 //
16782 // instruct loadI(iRegINoSp dst, memory mem)
16783 // %{
16784 //   match(Set dst (LoadI mem));
16785 // %}
16786 //
16787 
16788 //----------SMARTSPILL RULES---------------------------------------------------
16789 // These must follow all instruction definitions as they use the names
16790 // defined in the instructions definitions.
16791 
16792 // Local Variables:
16793 // mode: c++
16794 // End: