1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Class for 128 bit register v4
 961 reg_class v4_reg(
 962     V4, V4_H
 963 );
 964 
 965 // Class for 128 bit register v5
 966 reg_class v5_reg(
 967     V5, V5_H
 968 );
 969 
 970 // Class for 128 bit register v6
 971 reg_class v6_reg(
 972     V6, V6_H
 973 );
 974 
 975 // Class for 128 bit register v7
 976 reg_class v7_reg(
 977     V7, V7_H
 978 );
 979 
 980 // Class for 128 bit register v8
 981 reg_class v8_reg(
 982     V8, V8_H
 983 );
 984 
 985 // Class for 128 bit register v9
 986 reg_class v9_reg(
 987     V9, V9_H
 988 );
 989 
 990 // Class for 128 bit register v10
 991 reg_class v10_reg(
 992     V10, V10_H
 993 );
 994 
 995 // Class for 128 bit register v11
 996 reg_class v11_reg(
 997     V11, V11_H
 998 );
 999 
1000 // Class for 128 bit register v12
1001 reg_class v12_reg(
1002     V12, V12_H
1003 );
1004 
1005 // Class for 128 bit register v13
1006 reg_class v13_reg(
1007     V13, V13_H
1008 );
1009 
1010 // Class for 128 bit register v14
1011 reg_class v14_reg(
1012     V14, V14_H
1013 );
1014 
1015 // Class for 128 bit register v15
1016 reg_class v15_reg(
1017     V15, V15_H
1018 );
1019 
1020 // Class for 128 bit register v16
1021 reg_class v16_reg(
1022     V16, V16_H
1023 );
1024 
1025 // Class for 128 bit register v17
1026 reg_class v17_reg(
1027     V17, V17_H
1028 );
1029 
1030 // Class for 128 bit register v18
1031 reg_class v18_reg(
1032     V18, V18_H
1033 );
1034 
1035 // Class for 128 bit register v19
1036 reg_class v19_reg(
1037     V19, V19_H
1038 );
1039 
1040 // Class for 128 bit register v20
1041 reg_class v20_reg(
1042     V20, V20_H
1043 );
1044 
1045 // Class for 128 bit register v21
1046 reg_class v21_reg(
1047     V21, V21_H
1048 );
1049 
1050 // Class for 128 bit register v22
1051 reg_class v22_reg(
1052     V22, V22_H
1053 );
1054 
1055 // Class for 128 bit register v23
1056 reg_class v23_reg(
1057     V23, V23_H
1058 );
1059 
1060 // Class for 128 bit register v24
1061 reg_class v24_reg(
1062     V24, V24_H
1063 );
1064 
1065 // Class for 128 bit register v25
1066 reg_class v25_reg(
1067     V25, V25_H
1068 );
1069 
1070 // Class for 128 bit register v26
1071 reg_class v26_reg(
1072     V26, V26_H
1073 );
1074 
1075 // Class for 128 bit register v27
1076 reg_class v27_reg(
1077     V27, V27_H
1078 );
1079 
1080 // Class for 128 bit register v28
1081 reg_class v28_reg(
1082     V28, V28_H
1083 );
1084 
1085 // Class for 128 bit register v29
1086 reg_class v29_reg(
1087     V29, V29_H
1088 );
1089 
1090 // Class for 128 bit register v30
1091 reg_class v30_reg(
1092     V30, V30_H
1093 );
1094 
1095 // Class for 128 bit register v31
1096 reg_class v31_reg(
1097     V31, V31_H
1098 );
1099 
1100 // Singleton class for condition codes
1101 reg_class int_flags(RFLAGS);
1102 
1103 %}
1104 
1105 //----------DEFINITION BLOCK---------------------------------------------------
1106 // Define name --> value mappings to inform the ADLC of an integer valued name
1107 // Current support includes integer values in the range [0, 0x7FFFFFFF]
1108 // Format:
1109 //        int_def  <name>         ( <int_value>, <expression>);
1110 // Generated Code in ad_<arch>.hpp
1111 //        #define  <name>   (<expression>)
1112 //        // value == <int_value>
1113 // Generated code in ad_<arch>.cpp adlc_verification()
1114 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
1115 //
1116 
1117 // we follow the ppc-aix port in using a simple cost model which ranks
1118 // register operations as cheap, memory ops as more expensive and
1119 // branches as most expensive. the first two have a low as well as a
1120 // normal cost. huge cost appears to be a way of saying don't do
1121 // something
1122 
1123 definitions %{
1124   // The default cost (of a register move instruction).
1125   int_def INSN_COST            (    100,     100);
1126   int_def BRANCH_COST          (    200,     2 * INSN_COST);
1127   int_def CALL_COST            (    200,     2 * INSN_COST);
1128   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
1129 %}
1130 
1131 
1132 //----------SOURCE BLOCK-------------------------------------------------------
1133 // This is a block of C++ code which provides values, functions, and
1134 // definitions necessary in the rest of the architecture description
1135 
1136 source_hpp %{
1137 
1138 #include "asm/macroAssembler.hpp"
1139 #include "gc/shared/cardTable.hpp"
1140 #include "gc/shared/cardTableBarrierSet.hpp"
1141 #include "gc/shared/collectedHeap.hpp"
1142 #include "opto/addnode.hpp"
1143 
1144 class CallStubImpl {
1145 
1146   //--------------------------------------------------------------
1147   //---<  Used for optimization in Compile::shorten_branches  >---
1148   //--------------------------------------------------------------
1149 
1150  public:
1151   // Size of call trampoline stub.
1152   static uint size_call_trampoline() {
1153     return 0; // no call trampolines on this platform
1154   }
1155 
1156   // number of relocations needed by a call trampoline stub
1157   static uint reloc_call_trampoline() {
1158     return 0; // no call trampolines on this platform
1159   }
1160 };
1161 
1162 class HandlerImpl {
1163 
1164  public:
1165 
1166   static int emit_exception_handler(CodeBuffer &cbuf);
1167   static int emit_deopt_handler(CodeBuffer& cbuf);
1168 
1169   static uint size_exception_handler() {
1170     return MacroAssembler::far_branch_size();
1171   }
1172 
1173   static uint size_deopt_handler() {
1174     // count one adr and one far branch instruction
1175     return 4 * NativeInstruction::instruction_size;
1176   }
1177 };
1178 
1179  bool is_CAS(int opcode, bool maybe_volatile);
1180 
1181   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1182 
1183   bool unnecessary_acquire(const Node *barrier);
1184   bool needs_acquiring_load(const Node *load);
1185 
1186   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1187 
1188   bool unnecessary_release(const Node *barrier);
1189   bool unnecessary_volatile(const Node *barrier);
1190   bool needs_releasing_store(const Node *store);
1191 
1192   // predicate controlling translation of CompareAndSwapX
1193   bool needs_acquiring_load_exclusive(const Node *load);
1194 
1195   // predicate controlling translation of StoreCM
1196   bool unnecessary_storestore(const Node *storecm);
1197 
1198   // predicate controlling addressing modes
1199   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1200 %}
1201 
1202 source %{
1203 
1204   // Optimizaton of volatile gets and puts
1205   // -------------------------------------
1206   //
1207   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1208   // use to implement volatile reads and writes. For a volatile read
1209   // we simply need
1210   //
1211   //   ldar<x>
1212   //
1213   // and for a volatile write we need
1214   //
1215   //   stlr<x>
1216   //
1217   // Alternatively, we can implement them by pairing a normal
1218   // load/store with a memory barrier. For a volatile read we need
1219   //
1220   //   ldr<x>
1221   //   dmb ishld
1222   //
1223   // for a volatile write
1224   //
1225   //   dmb ish
1226   //   str<x>
1227   //   dmb ish
1228   //
1229   // We can also use ldaxr and stlxr to implement compare and swap CAS
1230   // sequences. These are normally translated to an instruction
1231   // sequence like the following
1232   //
1233   //   dmb      ish
1234   // retry:
1235   //   ldxr<x>   rval raddr
1236   //   cmp       rval rold
1237   //   b.ne done
1238   //   stlxr<x>  rval, rnew, rold
1239   //   cbnz      rval retry
1240   // done:
1241   //   cset      r0, eq
1242   //   dmb ishld
1243   //
1244   // Note that the exclusive store is already using an stlxr
1245   // instruction. That is required to ensure visibility to other
1246   // threads of the exclusive write (assuming it succeeds) before that
1247   // of any subsequent writes.
1248   //
1249   // The following instruction sequence is an improvement on the above
1250   //
1251   // retry:
1252   //   ldaxr<x>  rval raddr
1253   //   cmp       rval rold
1254   //   b.ne done
1255   //   stlxr<x>  rval, rnew, rold
1256   //   cbnz      rval retry
1257   // done:
1258   //   cset      r0, eq
1259   //
1260   // We don't need the leading dmb ish since the stlxr guarantees
1261   // visibility of prior writes in the case that the swap is
1262   // successful. Crucially we don't have to worry about the case where
1263   // the swap is not successful since no valid program should be
1264   // relying on visibility of prior changes by the attempting thread
1265   // in the case where the CAS fails.
1266   //
1267   // Similarly, we don't need the trailing dmb ishld if we substitute
1268   // an ldaxr instruction since that will provide all the guarantees we
1269   // require regarding observation of changes made by other threads
1270   // before any change to the CAS address observed by the load.
1271   //
1272   // In order to generate the desired instruction sequence we need to
1273   // be able to identify specific 'signature' ideal graph node
1274   // sequences which i) occur as a translation of a volatile reads or
1275   // writes or CAS operations and ii) do not occur through any other
1276   // translation or graph transformation. We can then provide
1277   // alternative aldc matching rules which translate these node
1278   // sequences to the desired machine code sequences. Selection of the
1279   // alternative rules can be implemented by predicates which identify
1280   // the relevant node sequences.
1281   //
1282   // The ideal graph generator translates a volatile read to the node
1283   // sequence
1284   //
1285   //   LoadX[mo_acquire]
1286   //   MemBarAcquire
1287   //
1288   // As a special case when using the compressed oops optimization we
1289   // may also see this variant
1290   //
1291   //   LoadN[mo_acquire]
1292   //   DecodeN
1293   //   MemBarAcquire
1294   //
1295   // A volatile write is translated to the node sequence
1296   //
1297   //   MemBarRelease
1298   //   StoreX[mo_release] {CardMark}-optional
1299   //   MemBarVolatile
1300   //
1301   // n.b. the above node patterns are generated with a strict
1302   // 'signature' configuration of input and output dependencies (see
1303   // the predicates below for exact details). The card mark may be as
1304   // simple as a few extra nodes or, in a few GC configurations, may
1305   // include more complex control flow between the leading and
1306   // trailing memory barriers. However, whatever the card mark
1307   // configuration these signatures are unique to translated volatile
1308   // reads/stores -- they will not appear as a result of any other
1309   // bytecode translation or inlining nor as a consequence of
1310   // optimizing transforms.
1311   //
1312   // We also want to catch inlined unsafe volatile gets and puts and
1313   // be able to implement them using either ldar<x>/stlr<x> or some
1314   // combination of ldr<x>/stlr<x> and dmb instructions.
1315   //
1316   // Inlined unsafe volatiles puts manifest as a minor variant of the
1317   // normal volatile put node sequence containing an extra cpuorder
1318   // membar
1319   //
1320   //   MemBarRelease
1321   //   MemBarCPUOrder
1322   //   StoreX[mo_release] {CardMark}-optional
1323   //   MemBarCPUOrder
1324   //   MemBarVolatile
1325   //
1326   // n.b. as an aside, a cpuorder membar is not itself subject to
1327   // matching and translation by adlc rules.  However, the rule
1328   // predicates need to detect its presence in order to correctly
1329   // select the desired adlc rules.
1330   //
1331   // Inlined unsafe volatile gets manifest as a slightly different
1332   // node sequence to a normal volatile get because of the
1333   // introduction of some CPUOrder memory barriers to bracket the
1334   // Load. However, but the same basic skeleton of a LoadX feeding a
1335   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1336   // present
1337   //
1338   //   MemBarCPUOrder
1339   //        ||       \\
1340   //   MemBarCPUOrder LoadX[mo_acquire]
1341   //        ||            |
1342   //        ||       {DecodeN} optional
1343   //        ||       /
1344   //     MemBarAcquire
1345   //
1346   // In this case the acquire membar does not directly depend on the
1347   // load. However, we can be sure that the load is generated from an
1348   // inlined unsafe volatile get if we see it dependent on this unique
1349   // sequence of membar nodes. Similarly, given an acquire membar we
1350   // can know that it was added because of an inlined unsafe volatile
1351   // get if it is fed and feeds a cpuorder membar and if its feed
1352   // membar also feeds an acquiring load.
1353   //
1354   // Finally an inlined (Unsafe) CAS operation is translated to the
1355   // following ideal graph
1356   //
1357   //   MemBarRelease
1358   //   MemBarCPUOrder
1359   //   CompareAndSwapX {CardMark}-optional
1360   //   MemBarCPUOrder
1361   //   MemBarAcquire
1362   //
1363   // So, where we can identify these volatile read and write
1364   // signatures we can choose to plant either of the above two code
1365   // sequences. For a volatile read we can simply plant a normal
1366   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1367   // also choose to inhibit translation of the MemBarAcquire and
1368   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1369   //
1370   // When we recognise a volatile store signature we can choose to
1371   // plant at a dmb ish as a translation for the MemBarRelease, a
1372   // normal str<x> and then a dmb ish for the MemBarVolatile.
1373   // Alternatively, we can inhibit translation of the MemBarRelease
1374   // and MemBarVolatile and instead plant a simple stlr<x>
1375   // instruction.
1376   //
1377   // when we recognise a CAS signature we can choose to plant a dmb
1378   // ish as a translation for the MemBarRelease, the conventional
1379   // macro-instruction sequence for the CompareAndSwap node (which
1380   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1381   // Alternatively, we can elide generation of the dmb instructions
1382   // and plant the alternative CompareAndSwap macro-instruction
1383   // sequence (which uses ldaxr<x>).
1384   //
1385   // Of course, the above only applies when we see these signature
1386   // configurations. We still want to plant dmb instructions in any
1387   // other cases where we may see a MemBarAcquire, MemBarRelease or
1388   // MemBarVolatile. For example, at the end of a constructor which
1389   // writes final/volatile fields we will see a MemBarRelease
1390   // instruction and this needs a 'dmb ish' lest we risk the
1391   // constructed object being visible without making the
1392   // final/volatile field writes visible.
1393   //
1394   // n.b. the translation rules below which rely on detection of the
1395   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1396   // If we see anything other than the signature configurations we
1397   // always just translate the loads and stores to ldr<x> and str<x>
1398   // and translate acquire, release and volatile membars to the
1399   // relevant dmb instructions.
1400   //
1401 
1402   // is_CAS(int opcode, bool maybe_volatile)
1403   //
1404   // return true if opcode is one of the possible CompareAndSwapX
1405   // values otherwise false.
1406 
1407   bool is_CAS(int opcode, bool maybe_volatile)
1408   {
1409     switch(opcode) {
1410       // We handle these
1411     case Op_CompareAndSwapI:
1412     case Op_CompareAndSwapL:
1413     case Op_CompareAndSwapP:
1414     case Op_CompareAndSwapN:
1415     case Op_ShenandoahCompareAndSwapP:
1416     case Op_ShenandoahCompareAndSwapN:
1417     case Op_CompareAndSwapB:
1418     case Op_CompareAndSwapS:
1419     case Op_GetAndSetI:
1420     case Op_GetAndSetL:
1421     case Op_GetAndSetP:
1422     case Op_GetAndSetN:
1423     case Op_GetAndAddI:
1424     case Op_GetAndAddL:
1425       return true;
1426     case Op_CompareAndExchangeI:
1427     case Op_CompareAndExchangeN:
1428     case Op_CompareAndExchangeB:
1429     case Op_CompareAndExchangeS:
1430     case Op_CompareAndExchangeL:
1431     case Op_CompareAndExchangeP:
1432     case Op_WeakCompareAndSwapB:
1433     case Op_WeakCompareAndSwapS:
1434     case Op_WeakCompareAndSwapI:
1435     case Op_WeakCompareAndSwapL:
1436     case Op_WeakCompareAndSwapP:
1437     case Op_WeakCompareAndSwapN:
1438     case Op_ShenandoahWeakCompareAndSwapP:
1439     case Op_ShenandoahWeakCompareAndSwapN:
1440     case Op_ShenandoahCompareAndExchangeP:
1441     case Op_ShenandoahCompareAndExchangeN:
1442       return maybe_volatile;
1443     default:
1444       return false;
1445     }
1446   }
1447 
1448   // helper to determine the maximum number of Phi nodes we may need to
1449   // traverse when searching from a card mark membar for the merge mem
1450   // feeding a trailing membar or vice versa
1451 
1452 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1453 
1454 bool unnecessary_acquire(const Node *barrier)
1455 {
1456   assert(barrier->is_MemBar(), "expecting a membar");
1457 
1458   if (UseBarriersForVolatile) {
1459     // we need to plant a dmb
1460     return false;
1461   }
1462 
1463   MemBarNode* mb = barrier->as_MemBar();
1464 
1465   if (mb->trailing_load()) {
1466     return true;
1467   }
1468 
1469   if (mb->trailing_load_store()) {
1470     Node* load_store = mb->in(MemBarNode::Precedent);
1471     assert(load_store->is_LoadStore(), "unexpected graph shape");
1472     return is_CAS(load_store->Opcode(), true);
1473   }
1474 
1475   return false;
1476 }
1477 
1478 bool needs_acquiring_load(const Node *n)
1479 {
1480   assert(n->is_Load(), "expecting a load");
1481   if (UseBarriersForVolatile) {
1482     // we use a normal load and a dmb
1483     return false;
1484   }
1485 
1486   LoadNode *ld = n->as_Load();
1487 
1488   return ld->is_acquire();
1489 }
1490 
1491 bool unnecessary_release(const Node *n)
1492 {
1493   assert((n->is_MemBar() &&
1494           n->Opcode() == Op_MemBarRelease),
1495          "expecting a release membar");
1496 
1497   if (UseBarriersForVolatile) {
1498     // we need to plant a dmb
1499     return false;
1500   }
1501 
1502   MemBarNode *barrier = n->as_MemBar();
1503   if (!barrier->leading()) {
1504     return false;
1505   } else {
1506     Node* trailing = barrier->trailing_membar();
1507     MemBarNode* trailing_mb = trailing->as_MemBar();
1508     assert(trailing_mb->trailing(), "Not a trailing membar?");
1509     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1510 
1511     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1512     if (mem->is_Store()) {
1513       assert(mem->as_Store()->is_release(), "");
1514       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1515       return true;
1516     } else {
1517       assert(mem->is_LoadStore(), "");
1518       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1519       return is_CAS(mem->Opcode(), true);
1520     }
1521   }
1522   return false;
1523 }
1524 
1525 bool unnecessary_volatile(const Node *n)
1526 {
1527   // assert n->is_MemBar();
1528   if (UseBarriersForVolatile) {
1529     // we need to plant a dmb
1530     return false;
1531   }
1532 
1533   MemBarNode *mbvol = n->as_MemBar();
1534 
1535   bool release = mbvol->trailing_store();
1536   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1537 #ifdef ASSERT
1538   if (release) {
1539     Node* leading = mbvol->leading_membar();
1540     assert(leading->Opcode() == Op_MemBarRelease, "");
1541     assert(leading->as_MemBar()->leading_store(), "");
1542     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1543   }
1544 #endif
1545 
1546   return release;
1547 }
1548 
1549 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1550 
1551 bool needs_releasing_store(const Node *n)
1552 {
1553   // assert n->is_Store();
1554   if (UseBarriersForVolatile) {
1555     // we use a normal store and dmb combination
1556     return false;
1557   }
1558 
1559   StoreNode *st = n->as_Store();
1560 
1561   return st->trailing_membar() != NULL;
1562 }
1563 
1564 // predicate controlling translation of CAS
1565 //
1566 // returns true if CAS needs to use an acquiring load otherwise false
1567 
1568 bool needs_acquiring_load_exclusive(const Node *n)
1569 {
1570   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1571   if (UseBarriersForVolatile) {
1572     return false;
1573   }
1574 
1575   LoadStoreNode* ldst = n->as_LoadStore();
1576   if (is_CAS(n->Opcode(), false)) {
1577     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1578   } else {
1579     return ldst->trailing_membar() != NULL;
1580   }
1581 
1582   // so we can just return true here
1583   return true;
1584 }
1585 
1586 // predicate controlling translation of StoreCM
1587 //
1588 // returns true if a StoreStore must precede the card write otherwise
1589 // false
1590 
1591 bool unnecessary_storestore(const Node *storecm)
1592 {
1593   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1594 
1595   // we need to generate a dmb ishst between an object put and the
1596   // associated card mark when we are using CMS without conditional
1597   // card marking
1598 
1599   if (UseConcMarkSweepGC && !UseCondCardMark) {
1600     return false;
1601   }
1602 
1603   // a storestore is unnecesary in all other cases
1604 
1605   return true;
1606 }
1607 
1608 
1609 #define __ _masm.
1610 
1611 // advance declarations for helper functions to convert register
1612 // indices to register objects
1613 
1614 // the ad file has to provide implementations of certain methods
1615 // expected by the generic code
1616 //
1617 // REQUIRED FUNCTIONALITY
1618 
1619 //=============================================================================
1620 
1621 // !!!!! Special hack to get all types of calls to specify the byte offset
1622 //       from the start of the call to the point where the return address
1623 //       will point.
1624 
1625 int MachCallStaticJavaNode::ret_addr_offset()
1626 {
1627   // call should be a simple bl
1628   int off = 4;
1629   return off;
1630 }
1631 
1632 int MachCallDynamicJavaNode::ret_addr_offset()
1633 {
1634   return 16; // movz, movk, movk, bl
1635 }
1636 
1637 int MachCallRuntimeNode::ret_addr_offset() {
1638   // for generated stubs the call will be
1639   //   far_call(addr)
1640   // for real runtime callouts it will be six instructions
1641   // see aarch64_enc_java_to_runtime
1642   //   adr(rscratch2, retaddr)
1643   //   lea(rscratch1, RuntimeAddress(addr)
1644   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1645   //   blrt rscratch1
1646   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1647   if (cb) {
1648     return MacroAssembler::far_branch_size();
1649   } else {
1650     return 6 * NativeInstruction::instruction_size;
1651   }
1652 }
1653 
1654 // Indicate if the safepoint node needs the polling page as an input
1655 
1656 // the shared code plants the oop data at the start of the generated
1657 // code for the safepoint node and that needs ot be at the load
1658 // instruction itself. so we cannot plant a mov of the safepoint poll
1659 // address followed by a load. setting this to true means the mov is
1660 // scheduled as a prior instruction. that's better for scheduling
1661 // anyway.
1662 
1663 bool SafePointNode::needs_polling_address_input()
1664 {
1665   return true;
1666 }
1667 
1668 //=============================================================================
1669 
1670 #ifndef PRODUCT
1671 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1672   st->print("BREAKPOINT");
1673 }
1674 #endif
1675 
1676 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1677   MacroAssembler _masm(&cbuf);
1678   __ brk(0);
1679 }
1680 
1681 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1682   return MachNode::size(ra_);
1683 }
1684 
1685 //=============================================================================
1686 
1687 #ifndef PRODUCT
1688   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1689     st->print("nop \t# %d bytes pad for loops and calls", _count);
1690   }
1691 #endif
1692 
1693   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1694     MacroAssembler _masm(&cbuf);
1695     for (int i = 0; i < _count; i++) {
1696       __ nop();
1697     }
1698   }
1699 
1700   uint MachNopNode::size(PhaseRegAlloc*) const {
1701     return _count * NativeInstruction::instruction_size;
1702   }
1703 
1704 //=============================================================================
1705 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1706 
1707 int Compile::ConstantTable::calculate_table_base_offset() const {
1708   return 0;  // absolute addressing, no offset
1709 }
1710 
1711 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1712 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1713   ShouldNotReachHere();
1714 }
1715 
1716 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1717   // Empty encoding
1718 }
1719 
1720 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1721   return 0;
1722 }
1723 
1724 #ifndef PRODUCT
1725 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1726   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1727 }
1728 #endif
1729 
1730 #ifndef PRODUCT
1731 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1732   Compile* C = ra_->C;
1733 
1734   int framesize = C->frame_slots() << LogBytesPerInt;
1735 
1736   if (C->need_stack_bang(framesize))
1737     st->print("# stack bang size=%d\n\t", framesize);
1738 
1739   if (framesize < ((1 << 9) + 2 * wordSize)) {
1740     st->print("sub  sp, sp, #%d\n\t", framesize);
1741     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1742     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1743   } else {
1744     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1745     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1746     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1747     st->print("sub  sp, sp, rscratch1");
1748   }
1749 }
1750 #endif
1751 
1752 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1753   Compile* C = ra_->C;
1754   MacroAssembler _masm(&cbuf);
1755 
1756   __ verified_entry(C, 0);
1757   __ bind(*_verified_entry);
1758 
1759   C->set_frame_complete(cbuf.insts_size());
1760 
1761   if (C->has_mach_constant_base_node()) {
1762     // NOTE: We set the table base offset here because users might be
1763     // emitted before MachConstantBaseNode.
1764     Compile::ConstantTable& constant_table = C->constant_table();
1765     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1766   }
1767 }
1768 
1769 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1770 {
1771   return MachNode::size(ra_); // too many variables; just compute it
1772                               // the hard way
1773 }
1774 
1775 int MachPrologNode::reloc() const
1776 {
1777   return 0;
1778 }
1779 
1780 //=============================================================================
1781 
1782 #ifndef PRODUCT
1783 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1784   Compile* C = ra_->C;
1785   int framesize = C->frame_slots() << LogBytesPerInt;
1786 
1787   st->print("# pop frame %d\n\t",framesize);
1788 
1789   if (framesize == 0) {
1790     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1791   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1792     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1793     st->print("add  sp, sp, #%d\n\t", framesize);
1794   } else {
1795     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1796     st->print("add  sp, sp, rscratch1\n\t");
1797     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1798   }
1799 
1800   if (do_polling() && C->is_method_compilation()) {
1801     st->print("# touch polling page\n\t");
1802     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1803     st->print("ldr zr, [rscratch1]");
1804   }
1805 }
1806 #endif
1807 
1808 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1809   Compile* C = ra_->C;
1810   MacroAssembler _masm(&cbuf);
1811   int framesize = C->frame_slots() << LogBytesPerInt;
1812 
1813   __ remove_frame(framesize);
1814 
1815   if (NotifySimulator) {
1816     __ notify(Assembler::method_reentry);
1817   }
1818 
1819   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1820     __ reserved_stack_check();
1821   }
1822 
1823   if (do_polling() && C->is_method_compilation()) {
1824     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1825   }
1826 }
1827 
1828 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1829   // Variable size. Determine dynamically.
1830   return MachNode::size(ra_);
1831 }
1832 
1833 int MachEpilogNode::reloc() const {
1834   // Return number of relocatable values contained in this instruction.
1835   return 1; // 1 for polling page.
1836 }
1837 
1838 const Pipeline * MachEpilogNode::pipeline() const {
1839   return MachNode::pipeline_class();
1840 }
1841 
1842 // This method seems to be obsolete. It is declared in machnode.hpp
1843 // and defined in all *.ad files, but it is never called. Should we
1844 // get rid of it?
1845 int MachEpilogNode::safepoint_offset() const {
1846   assert(do_polling(), "no return for this epilog node");
1847   return 4;
1848 }
1849 
1850 //=============================================================================
1851 
1852 // Figure out which register class each belongs in: rc_int, rc_float or
1853 // rc_stack.
1854 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1855 
1856 static enum RC rc_class(OptoReg::Name reg) {
1857 
1858   if (reg == OptoReg::Bad) {
1859     return rc_bad;
1860   }
1861 
1862   // we have 30 int registers * 2 halves
1863   // (rscratch1 and rscratch2 are omitted)
1864 
1865   if (reg < 60) {
1866     return rc_int;
1867   }
1868 
1869   // we have 32 float register * 2 halves
1870   if (reg < 60 + 128) {
1871     return rc_float;
1872   }
1873 
1874   // Between float regs & stack is the flags regs.
1875   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1876 
1877   return rc_stack;
1878 }
1879 
1880 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1881   Compile* C = ra_->C;
1882 
1883   // Get registers to move.
1884   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1885   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1886   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1887   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1888 
1889   enum RC src_hi_rc = rc_class(src_hi);
1890   enum RC src_lo_rc = rc_class(src_lo);
1891   enum RC dst_hi_rc = rc_class(dst_hi);
1892   enum RC dst_lo_rc = rc_class(dst_lo);
1893 
1894   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1895 
1896   if (src_hi != OptoReg::Bad) {
1897     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1898            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1899            "expected aligned-adjacent pairs");
1900   }
1901 
1902   if (src_lo == dst_lo && src_hi == dst_hi) {
1903     return 0;            // Self copy, no move.
1904   }
1905 
1906   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1907               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1908   int src_offset = ra_->reg2offset(src_lo);
1909   int dst_offset = ra_->reg2offset(dst_lo);
1910 
1911   if (bottom_type()->isa_vect() != NULL) {
1912     uint ireg = ideal_reg();
1913     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1914     if (cbuf) {
1915       MacroAssembler _masm(cbuf);
1916       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1917       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1918         // stack->stack
1919         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1920         if (ireg == Op_VecD) {
1921           __ unspill(rscratch1, true, src_offset);
1922           __ spill(rscratch1, true, dst_offset);
1923         } else {
1924           __ spill_copy128(src_offset, dst_offset);
1925         }
1926       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1927         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1928                ireg == Op_VecD ? __ T8B : __ T16B,
1929                as_FloatRegister(Matcher::_regEncode[src_lo]));
1930       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1931         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1932                        ireg == Op_VecD ? __ D : __ Q,
1933                        ra_->reg2offset(dst_lo));
1934       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1935         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1936                        ireg == Op_VecD ? __ D : __ Q,
1937                        ra_->reg2offset(src_lo));
1938       } else {
1939         ShouldNotReachHere();
1940       }
1941     }
1942   } else if (cbuf) {
1943     MacroAssembler _masm(cbuf);
1944     switch (src_lo_rc) {
1945     case rc_int:
1946       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1947         if (is64) {
1948             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1949                    as_Register(Matcher::_regEncode[src_lo]));
1950         } else {
1951             MacroAssembler _masm(cbuf);
1952             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1953                     as_Register(Matcher::_regEncode[src_lo]));
1954         }
1955       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1956         if (is64) {
1957             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1958                      as_Register(Matcher::_regEncode[src_lo]));
1959         } else {
1960             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1961                      as_Register(Matcher::_regEncode[src_lo]));
1962         }
1963       } else {                    // gpr --> stack spill
1964         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1965         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1966       }
1967       break;
1968     case rc_float:
1969       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1970         if (is64) {
1971             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1972                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1973         } else {
1974             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1975                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1976         }
1977       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1978           if (cbuf) {
1979             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1980                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1981         } else {
1982             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1983                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1984         }
1985       } else {                    // fpr --> stack spill
1986         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1987         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1988                  is64 ? __ D : __ S, dst_offset);
1989       }
1990       break;
1991     case rc_stack:
1992       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1993         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1994       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1995         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1996                    is64 ? __ D : __ S, src_offset);
1997       } else {                    // stack --> stack copy
1998         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1999         __ unspill(rscratch1, is64, src_offset);
2000         __ spill(rscratch1, is64, dst_offset);
2001       }
2002       break;
2003     default:
2004       assert(false, "bad rc_class for spill");
2005       ShouldNotReachHere();
2006     }
2007   }
2008 
2009   if (st) {
2010     st->print("spill ");
2011     if (src_lo_rc == rc_stack) {
2012       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2013     } else {
2014       st->print("%s -> ", Matcher::regName[src_lo]);
2015     }
2016     if (dst_lo_rc == rc_stack) {
2017       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2018     } else {
2019       st->print("%s", Matcher::regName[dst_lo]);
2020     }
2021     if (bottom_type()->isa_vect() != NULL) {
2022       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2023     } else {
2024       st->print("\t# spill size = %d", is64 ? 64:32);
2025     }
2026   }
2027 
2028   return 0;
2029 
2030 }
2031 
2032 #ifndef PRODUCT
2033 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2034   if (!ra_)
2035     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2036   else
2037     implementation(NULL, ra_, false, st);
2038 }
2039 #endif
2040 
2041 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2042   implementation(&cbuf, ra_, false, NULL);
2043 }
2044 
2045 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2046   return MachNode::size(ra_);
2047 }
2048 
2049 //=============================================================================
2050 
2051 #ifndef PRODUCT
2052 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2053   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2054   int reg = ra_->get_reg_first(this);
2055   st->print("add %s, rsp, #%d]\t# box lock",
2056             Matcher::regName[reg], offset);
2057 }
2058 #endif
2059 
2060 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2061   MacroAssembler _masm(&cbuf);
2062 
2063   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2064   int reg    = ra_->get_encode(this);
2065 
2066   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2067     __ add(as_Register(reg), sp, offset);
2068   } else {
2069     ShouldNotReachHere();
2070   }
2071 }
2072 
2073 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2074   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2075   return 4;
2076 }
2077 
2078 ///=============================================================================
2079 #ifndef PRODUCT
2080 void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2081 {
2082   st->print_cr("# MachVEPNode");
2083   if (!_verified) {
2084     st->print_cr("\t load_class");
2085   } else {
2086     st->print_cr("\t unpack_value_arg");
2087   }
2088 }
2089 #endif
2090 
2091 void MachVEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2092 {
2093   MacroAssembler _masm(&cbuf);
2094 
2095   if (!_verified) {
2096     Label skip;
2097     __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2098     __ br(Assembler::EQ, skip);
2099       __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2100     __ bind(skip);
2101 
2102   } else {
2103     // Unpack value type args passed as oop and then jump to
2104     // the verified entry point (skipping the unverified entry).
2105     __ unpack_value_args(ra_->C, _receiver_only);
2106     __ b(*_verified_entry);
2107   }
2108 }
2109 
2110 
2111 uint MachVEPNode::size(PhaseRegAlloc* ra_) const
2112 {
2113   return MachNode::size(ra_); // too many variables; just compute it the hard way
2114 }
2115 
2116 
2117 //=============================================================================
2118 #ifndef PRODUCT
2119 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2120 {
2121   st->print_cr("# MachUEPNode");
2122   if (UseCompressedClassPointers) {
2123     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2124     if (CompressedKlassPointers::shift() != 0) {
2125       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2126     }
2127   } else {
2128    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2129   }
2130   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2131   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2132 }
2133 #endif
2134 
2135 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2136 {
2137   // This is the unverified entry point.
2138   MacroAssembler _masm(&cbuf);
2139   Label skip;
2140 
2141   // UseCompressedClassPointers logic are inside cmp_klass
2142   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2143 
2144   // TODO
2145   // can we avoid this skip and still use a reloc?
2146   __ br(Assembler::EQ, skip);
2147   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2148   __ bind(skip);
2149 }
2150 
2151 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2152 {
2153   return MachNode::size(ra_);
2154 }
2155 
2156 // REQUIRED EMIT CODE
2157 
2158 //=============================================================================
2159 
2160 // Emit exception handler code.
2161 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2162 {
2163   // mov rscratch1 #exception_blob_entry_point
2164   // br rscratch1
2165   // Note that the code buffer's insts_mark is always relative to insts.
2166   // That's why we must use the macroassembler to generate a handler.
2167   MacroAssembler _masm(&cbuf);
2168   address base = __ start_a_stub(size_exception_handler());
2169   if (base == NULL) {
2170     ciEnv::current()->record_failure("CodeCache is full");
2171     return 0;  // CodeBuffer::expand failed
2172   }
2173   int offset = __ offset();
2174   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2175   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2176   __ end_a_stub();
2177   return offset;
2178 }
2179 
2180 // Emit deopt handler code.
2181 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2182 {
2183   // Note that the code buffer's insts_mark is always relative to insts.
2184   // That's why we must use the macroassembler to generate a handler.
2185   MacroAssembler _masm(&cbuf);
2186   address base = __ start_a_stub(size_deopt_handler());
2187   if (base == NULL) {
2188     ciEnv::current()->record_failure("CodeCache is full");
2189     return 0;  // CodeBuffer::expand failed
2190   }
2191   int offset = __ offset();
2192 
2193   __ adr(lr, __ pc());
2194   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2195 
2196   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2197   __ end_a_stub();
2198   return offset;
2199 }
2200 
2201 // REQUIRED MATCHER CODE
2202 
2203 //=============================================================================
2204 
2205 const bool Matcher::match_rule_supported(int opcode) {
2206 
2207   switch (opcode) {
2208   default:
2209     break;
2210   }
2211 
2212   if (!has_match_rule(opcode)) {
2213     return false;
2214   }
2215 
2216   return true;  // Per default match rules are supported.
2217 }
2218 
2219 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2220 
2221   // TODO
2222   // identify extra cases that we might want to provide match rules for
2223   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2224   bool ret_value = match_rule_supported(opcode);
2225   // Add rules here.
2226 
2227   return ret_value;  // Per default match rules are supported.
2228 }
2229 
2230 const bool Matcher::has_predicated_vectors(void) {
2231   return false;
2232 }
2233 
2234 const int Matcher::float_pressure(int default_pressure_threshold) {
2235   return default_pressure_threshold;
2236 }
2237 
2238 int Matcher::regnum_to_fpu_offset(int regnum)
2239 {
2240   Unimplemented();
2241   return 0;
2242 }
2243 
2244 // Is this branch offset short enough that a short branch can be used?
2245 //
2246 // NOTE: If the platform does not provide any short branch variants, then
2247 //       this method should return false for offset 0.
2248 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2249   // The passed offset is relative to address of the branch.
2250 
2251   return (-32768 <= offset && offset < 32768);
2252 }
2253 
2254 const bool Matcher::isSimpleConstant64(jlong value) {
2255   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2256   // Probably always true, even if a temp register is required.
2257   return true;
2258 }
2259 
2260 // true just means we have fast l2f conversion
2261 const bool Matcher::convL2FSupported(void) {
2262   return true;
2263 }
2264 
2265 // Vector width in bytes.
2266 const int Matcher::vector_width_in_bytes(BasicType bt) {
2267   int size = MIN2(16,(int)MaxVectorSize);
2268   // Minimum 2 values in vector
2269   if (size < 2*type2aelembytes(bt)) size = 0;
2270   // But never < 4
2271   if (size < 4) size = 0;
2272   return size;
2273 }
2274 
2275 // Limits on vector size (number of elements) loaded into vector.
2276 const int Matcher::max_vector_size(const BasicType bt) {
2277   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2278 }
2279 const int Matcher::min_vector_size(const BasicType bt) {
2280 //  For the moment limit the vector size to 8 bytes
2281     int size = 8 / type2aelembytes(bt);
2282     if (size < 2) size = 2;
2283     return size;
2284 }
2285 
2286 // Vector ideal reg.
2287 const uint Matcher::vector_ideal_reg(int len) {
2288   switch(len) {
2289     case  8: return Op_VecD;
2290     case 16: return Op_VecX;
2291   }
2292   ShouldNotReachHere();
2293   return 0;
2294 }
2295 
2296 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2297   switch(size) {
2298     case  8: return Op_VecD;
2299     case 16: return Op_VecX;
2300   }
2301   ShouldNotReachHere();
2302   return 0;
2303 }
2304 
2305 // AES support not yet implemented
2306 const bool Matcher::pass_original_key_for_aes() {
2307   return false;
2308 }
2309 
2310 // aarch64 supports misaligned vectors store/load.
2311 const bool Matcher::misaligned_vectors_ok() {
2312   return true;
2313 }
2314 
2315 // false => size gets scaled to BytesPerLong, ok.
2316 const bool Matcher::init_array_count_is_in_bytes = false;
2317 
2318 // Use conditional move (CMOVL)
2319 const int Matcher::long_cmove_cost() {
2320   // long cmoves are no more expensive than int cmoves
2321   return 0;
2322 }
2323 
2324 const int Matcher::float_cmove_cost() {
2325   // float cmoves are no more expensive than int cmoves
2326   return 0;
2327 }
2328 
2329 // Does the CPU require late expand (see block.cpp for description of late expand)?
2330 const bool Matcher::require_postalloc_expand = false;
2331 
2332 // Do we need to mask the count passed to shift instructions or does
2333 // the cpu only look at the lower 5/6 bits anyway?
2334 const bool Matcher::need_masked_shift_count = false;
2335 
2336 // This affects two different things:
2337 //  - how Decode nodes are matched
2338 //  - how ImplicitNullCheck opportunities are recognized
2339 // If true, the matcher will try to remove all Decodes and match them
2340 // (as operands) into nodes. NullChecks are not prepared to deal with
2341 // Decodes by final_graph_reshaping().
2342 // If false, final_graph_reshaping() forces the decode behind the Cmp
2343 // for a NullCheck. The matcher matches the Decode node into a register.
2344 // Implicit_null_check optimization moves the Decode along with the
2345 // memory operation back up before the NullCheck.
2346 bool Matcher::narrow_oop_use_complex_address() {
2347   return CompressedOops::shift() == 0;
2348 }
2349 
2350 bool Matcher::narrow_klass_use_complex_address() {
2351 // TODO
2352 // decide whether we need to set this to true
2353   return false;
2354 }
2355 
2356 bool Matcher::const_oop_prefer_decode() {
2357   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2358   return CompressedOops::base() == NULL;
2359 }
2360 
2361 bool Matcher::const_klass_prefer_decode() {
2362   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2363   return CompressedKlassPointers::base() == NULL;
2364 }
2365 
2366 // Is it better to copy float constants, or load them directly from
2367 // memory?  Intel can load a float constant from a direct address,
2368 // requiring no extra registers.  Most RISCs will have to materialize
2369 // an address into a register first, so they would do better to copy
2370 // the constant from stack.
2371 const bool Matcher::rematerialize_float_constants = false;
2372 
2373 // If CPU can load and store mis-aligned doubles directly then no
2374 // fixup is needed.  Else we split the double into 2 integer pieces
2375 // and move it piece-by-piece.  Only happens when passing doubles into
2376 // C code as the Java calling convention forces doubles to be aligned.
2377 const bool Matcher::misaligned_doubles_ok = true;
2378 
2379 // No-op on amd64
2380 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2381   Unimplemented();
2382 }
2383 
2384 // Advertise here if the CPU requires explicit rounding operations to
2385 // implement the UseStrictFP mode.
2386 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2387 
2388 // Are floats converted to double when stored to stack during
2389 // deoptimization?
2390 bool Matcher::float_in_double() { return false; }
2391 
2392 // Do ints take an entire long register or just half?
2393 // The relevant question is how the int is callee-saved:
2394 // the whole long is written but de-opt'ing will have to extract
2395 // the relevant 32 bits.
2396 const bool Matcher::int_in_long = true;
2397 
2398 // Return whether or not this register is ever used as an argument.
2399 // This function is used on startup to build the trampoline stubs in
2400 // generateOptoStub.  Registers not mentioned will be killed by the VM
2401 // call in the trampoline, and arguments in those registers not be
2402 // available to the callee.
2403 bool Matcher::can_be_java_arg(int reg)
2404 {
2405   return
2406     reg ==  R0_num || reg == R0_H_num ||
2407     reg ==  R1_num || reg == R1_H_num ||
2408     reg ==  R2_num || reg == R2_H_num ||
2409     reg ==  R3_num || reg == R3_H_num ||
2410     reg ==  R4_num || reg == R4_H_num ||
2411     reg ==  R5_num || reg == R5_H_num ||
2412     reg ==  R6_num || reg == R6_H_num ||
2413     reg ==  R7_num || reg == R7_H_num ||
2414     reg ==  V0_num || reg == V0_H_num ||
2415     reg ==  V1_num || reg == V1_H_num ||
2416     reg ==  V2_num || reg == V2_H_num ||
2417     reg ==  V3_num || reg == V3_H_num ||
2418     reg ==  V4_num || reg == V4_H_num ||
2419     reg ==  V5_num || reg == V5_H_num ||
2420     reg ==  V6_num || reg == V6_H_num ||
2421     reg ==  V7_num || reg == V7_H_num;
2422 }
2423 
2424 bool Matcher::is_spillable_arg(int reg)
2425 {
2426   return can_be_java_arg(reg);
2427 }
2428 
2429 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2430   return false;
2431 }
2432 
2433 RegMask Matcher::divI_proj_mask() {
2434   ShouldNotReachHere();
2435   return RegMask();
2436 }
2437 
2438 // Register for MODI projection of divmodI.
2439 RegMask Matcher::modI_proj_mask() {
2440   ShouldNotReachHere();
2441   return RegMask();
2442 }
2443 
2444 // Register for DIVL projection of divmodL.
2445 RegMask Matcher::divL_proj_mask() {
2446   ShouldNotReachHere();
2447   return RegMask();
2448 }
2449 
2450 // Register for MODL projection of divmodL.
2451 RegMask Matcher::modL_proj_mask() {
2452   ShouldNotReachHere();
2453   return RegMask();
2454 }
2455 
2456 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2457   return FP_REG_mask();
2458 }
2459 
2460 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2461   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2462     Node* u = addp->fast_out(i);
2463     if (u->is_Mem()) {
2464       int opsize = u->as_Mem()->memory_size();
2465       assert(opsize > 0, "unexpected memory operand size");
2466       if (u->as_Mem()->memory_size() != (1<<shift)) {
2467         return false;
2468       }
2469     }
2470   }
2471   return true;
2472 }
2473 
2474 const bool Matcher::convi2l_type_required = false;
2475 
2476 // Should the Matcher clone shifts on addressing modes, expecting them
2477 // to be subsumed into complex addressing expressions or compute them
2478 // into registers?
2479 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2480   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2481     return true;
2482   }
2483 
2484   Node *off = m->in(AddPNode::Offset);
2485   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2486       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2487       // Are there other uses besides address expressions?
2488       !is_visited(off)) {
2489     address_visited.set(off->_idx); // Flag as address_visited
2490     mstack.push(off->in(2), Visit);
2491     Node *conv = off->in(1);
2492     if (conv->Opcode() == Op_ConvI2L &&
2493         // Are there other uses besides address expressions?
2494         !is_visited(conv)) {
2495       address_visited.set(conv->_idx); // Flag as address_visited
2496       mstack.push(conv->in(1), Pre_Visit);
2497     } else {
2498       mstack.push(conv, Pre_Visit);
2499     }
2500     address_visited.test_set(m->_idx); // Flag as address_visited
2501     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2502     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2503     return true;
2504   } else if (off->Opcode() == Op_ConvI2L &&
2505              // Are there other uses besides address expressions?
2506              !is_visited(off)) {
2507     address_visited.test_set(m->_idx); // Flag as address_visited
2508     address_visited.set(off->_idx); // Flag as address_visited
2509     mstack.push(off->in(1), Pre_Visit);
2510     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2511     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2512     return true;
2513   }
2514   return false;
2515 }
2516 
2517 void Compile::reshape_address(AddPNode* addp) {
2518 }
2519 
2520 // helper for encoding java_to_runtime calls on sim
2521 //
2522 // this is needed to compute the extra arguments required when
2523 // planting a call to the simulator blrt instruction. the TypeFunc
2524 // can be queried to identify the counts for integral, and floating
2525 // arguments and the return type
2526 
2527 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2528 {
2529   int gps = 0;
2530   int fps = 0;
2531   const TypeTuple *domain = tf->domain_cc();
2532   int max = domain->cnt();
2533   for (int i = TypeFunc::Parms; i < max; i++) {
2534     const Type *t = domain->field_at(i);
2535     switch(t->basic_type()) {
2536     case T_FLOAT:
2537     case T_DOUBLE:
2538       fps++;
2539     default:
2540       gps++;
2541     }
2542   }
2543   gpcnt = gps;
2544   fpcnt = fps;
2545   BasicType rt = tf->return_type();
2546   switch (rt) {
2547   case T_VOID:
2548     rtype = MacroAssembler::ret_type_void;
2549     break;
2550   default:
2551     rtype = MacroAssembler::ret_type_integral;
2552     break;
2553   case T_FLOAT:
2554     rtype = MacroAssembler::ret_type_float;
2555     break;
2556   case T_DOUBLE:
2557     rtype = MacroAssembler::ret_type_double;
2558     break;
2559   }
2560 }
2561 
2562 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2563   MacroAssembler _masm(&cbuf);                                          \
2564   {                                                                     \
2565     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2566     guarantee(DISP == 0, "mode not permitted for volatile");            \
2567     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2568     __ INSN(REG, as_Register(BASE));                                    \
2569   }
2570 
2571 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2572 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2573 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2574                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2575 
2576   // Used for all non-volatile memory accesses.  The use of
2577   // $mem->opcode() to discover whether this pattern uses sign-extended
2578   // offsets is something of a kludge.
2579   static void loadStore(MacroAssembler masm, mem_insn insn,
2580                          Register reg, int opcode,
2581                          Register base, int index, int size, int disp)
2582   {
2583     Address::extend scale;
2584 
2585     // Hooboy, this is fugly.  We need a way to communicate to the
2586     // encoder that the index needs to be sign extended, so we have to
2587     // enumerate all the cases.
2588     switch (opcode) {
2589     case INDINDEXSCALEDI2L:
2590     case INDINDEXSCALEDI2LN:
2591     case INDINDEXI2L:
2592     case INDINDEXI2LN:
2593       scale = Address::sxtw(size);
2594       break;
2595     default:
2596       scale = Address::lsl(size);
2597     }
2598 
2599     if (index == -1) {
2600       (masm.*insn)(reg, Address(base, disp));
2601     } else {
2602       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2603       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2604     }
2605   }
2606 
2607   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2608                          FloatRegister reg, int opcode,
2609                          Register base, int index, int size, int disp)
2610   {
2611     Address::extend scale;
2612 
2613     switch (opcode) {
2614     case INDINDEXSCALEDI2L:
2615     case INDINDEXSCALEDI2LN:
2616       scale = Address::sxtw(size);
2617       break;
2618     default:
2619       scale = Address::lsl(size);
2620     }
2621 
2622      if (index == -1) {
2623       (masm.*insn)(reg, Address(base, disp));
2624     } else {
2625       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2626       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2627     }
2628   }
2629 
2630   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2631                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2632                          int opcode, Register base, int index, int size, int disp)
2633   {
2634     if (index == -1) {
2635       (masm.*insn)(reg, T, Address(base, disp));
2636     } else {
2637       assert(disp == 0, "unsupported address mode");
2638       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2639     }
2640   }
2641 
2642 %}
2643 
2644 
2645 
2646 //----------ENCODING BLOCK-----------------------------------------------------
2647 // This block specifies the encoding classes used by the compiler to
2648 // output byte streams.  Encoding classes are parameterized macros
2649 // used by Machine Instruction Nodes in order to generate the bit
2650 // encoding of the instruction.  Operands specify their base encoding
2651 // interface with the interface keyword.  There are currently
2652 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2653 // COND_INTER.  REG_INTER causes an operand to generate a function
2654 // which returns its register number when queried.  CONST_INTER causes
2655 // an operand to generate a function which returns the value of the
2656 // constant when queried.  MEMORY_INTER causes an operand to generate
2657 // four functions which return the Base Register, the Index Register,
2658 // the Scale Value, and the Offset Value of the operand when queried.
2659 // COND_INTER causes an operand to generate six functions which return
2660 // the encoding code (ie - encoding bits for the instruction)
2661 // associated with each basic boolean condition for a conditional
2662 // instruction.
2663 //
2664 // Instructions specify two basic values for encoding.  Again, a
2665 // function is available to check if the constant displacement is an
2666 // oop. They use the ins_encode keyword to specify their encoding
2667 // classes (which must be a sequence of enc_class names, and their
2668 // parameters, specified in the encoding block), and they use the
2669 // opcode keyword to specify, in order, their primary, secondary, and
2670 // tertiary opcode.  Only the opcode sections which a particular
2671 // instruction needs for encoding need to be specified.
2672 encode %{
2673   // Build emit functions for each basic byte or larger field in the
2674   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2675   // from C++ code in the enc_class source block.  Emit functions will
2676   // live in the main source block for now.  In future, we can
2677   // generalize this by adding a syntax that specifies the sizes of
2678   // fields in an order, so that the adlc can build the emit functions
2679   // automagically
2680 
2681   // catch all for unimplemented encodings
2682   enc_class enc_unimplemented %{
2683     MacroAssembler _masm(&cbuf);
2684     __ unimplemented("C2 catch all");
2685   %}
2686 
2687   // BEGIN Non-volatile memory access
2688 
2689   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2690     Register dst_reg = as_Register($dst$$reg);
2691     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2692                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2693   %}
2694 
2695   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2696     Register dst_reg = as_Register($dst$$reg);
2697     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2698                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2699   %}
2700 
2701   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2702     Register dst_reg = as_Register($dst$$reg);
2703     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2704                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2705   %}
2706 
2707   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2708     Register dst_reg = as_Register($dst$$reg);
2709     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2710                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2711   %}
2712 
2713   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2714     Register dst_reg = as_Register($dst$$reg);
2715     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2716                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2717   %}
2718 
2719   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2720     Register dst_reg = as_Register($dst$$reg);
2721     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2722                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2723   %}
2724 
2725   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2726     Register dst_reg = as_Register($dst$$reg);
2727     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2728                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2729   %}
2730 
2731   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2732     Register dst_reg = as_Register($dst$$reg);
2733     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2734                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2735   %}
2736 
2737   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2738     Register dst_reg = as_Register($dst$$reg);
2739     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2740                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2741   %}
2742 
2743   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2744     Register dst_reg = as_Register($dst$$reg);
2745     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2746                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2747   %}
2748 
2749   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2750     Register dst_reg = as_Register($dst$$reg);
2751     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2752                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2753   %}
2754 
2755   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2756     Register dst_reg = as_Register($dst$$reg);
2757     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2758                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2759   %}
2760 
2761   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2762     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2763     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2764                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2765   %}
2766 
2767   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2768     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2769     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2770                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2771   %}
2772 
2773   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2774     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2775     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2776        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2777   %}
2778 
2779   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2780     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2781     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2782        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2783   %}
2784 
2785   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2786     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2787     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2788        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2789   %}
2790 
2791   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2792     Register src_reg = as_Register($src$$reg);
2793     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2794                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2795   %}
2796 
2797   enc_class aarch64_enc_strb0(memory mem) %{
2798     MacroAssembler _masm(&cbuf);
2799     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2800                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2801   %}
2802 
2803   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2804     MacroAssembler _masm(&cbuf);
2805     __ membar(Assembler::StoreStore);
2806     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2807                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2808   %}
2809 
2810   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2811     Register src_reg = as_Register($src$$reg);
2812     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2813                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2814   %}
2815 
2816   enc_class aarch64_enc_strh0(memory mem) %{
2817     MacroAssembler _masm(&cbuf);
2818     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2819                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2820   %}
2821 
2822   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2823     Register src_reg = as_Register($src$$reg);
2824     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2825                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2826   %}
2827 
2828   enc_class aarch64_enc_strw0(memory mem) %{
2829     MacroAssembler _masm(&cbuf);
2830     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2831                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2832   %}
2833 
2834   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2835     Register src_reg = as_Register($src$$reg);
2836     // we sometimes get asked to store the stack pointer into the
2837     // current thread -- we cannot do that directly on AArch64
2838     if (src_reg == r31_sp) {
2839       MacroAssembler _masm(&cbuf);
2840       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2841       __ mov(rscratch2, sp);
2842       src_reg = rscratch2;
2843     }
2844     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2845                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2846   %}
2847 
2848   enc_class aarch64_enc_str0(memory mem) %{
2849     MacroAssembler _masm(&cbuf);
2850     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2851                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2852   %}
2853 
2854   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2855     FloatRegister src_reg = as_FloatRegister($src$$reg);
2856     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2857                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2858   %}
2859 
2860   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2861     FloatRegister src_reg = as_FloatRegister($src$$reg);
2862     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2863                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2864   %}
2865 
2866   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2867     FloatRegister src_reg = as_FloatRegister($src$$reg);
2868     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2869        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2870   %}
2871 
2872   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2873     FloatRegister src_reg = as_FloatRegister($src$$reg);
2874     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2875        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2876   %}
2877 
2878   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2879     FloatRegister src_reg = as_FloatRegister($src$$reg);
2880     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2881        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2882   %}
2883 
2884   // END Non-volatile memory access
2885 
2886   // volatile loads and stores
2887 
2888   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2889     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2890                  rscratch1, stlrb);
2891   %}
2892 
2893   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2894     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2895                  rscratch1, stlrh);
2896   %}
2897 
2898   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2899     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2900                  rscratch1, stlrw);
2901   %}
2902 
2903 
2904   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2905     Register dst_reg = as_Register($dst$$reg);
2906     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2907              rscratch1, ldarb);
2908     __ sxtbw(dst_reg, dst_reg);
2909   %}
2910 
2911   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2912     Register dst_reg = as_Register($dst$$reg);
2913     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2914              rscratch1, ldarb);
2915     __ sxtb(dst_reg, dst_reg);
2916   %}
2917 
2918   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2919     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2920              rscratch1, ldarb);
2921   %}
2922 
2923   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2924     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2925              rscratch1, ldarb);
2926   %}
2927 
2928   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2929     Register dst_reg = as_Register($dst$$reg);
2930     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2931              rscratch1, ldarh);
2932     __ sxthw(dst_reg, dst_reg);
2933   %}
2934 
2935   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2936     Register dst_reg = as_Register($dst$$reg);
2937     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2938              rscratch1, ldarh);
2939     __ sxth(dst_reg, dst_reg);
2940   %}
2941 
2942   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2943     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2944              rscratch1, ldarh);
2945   %}
2946 
2947   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2948     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2949              rscratch1, ldarh);
2950   %}
2951 
2952   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2953     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2954              rscratch1, ldarw);
2955   %}
2956 
2957   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2958     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2959              rscratch1, ldarw);
2960   %}
2961 
2962   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2963     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2964              rscratch1, ldar);
2965   %}
2966 
2967   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2968     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2969              rscratch1, ldarw);
2970     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2971   %}
2972 
2973   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2974     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2975              rscratch1, ldar);
2976     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2977   %}
2978 
2979   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2980     Register src_reg = as_Register($src$$reg);
2981     // we sometimes get asked to store the stack pointer into the
2982     // current thread -- we cannot do that directly on AArch64
2983     if (src_reg == r31_sp) {
2984         MacroAssembler _masm(&cbuf);
2985       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2986       __ mov(rscratch2, sp);
2987       src_reg = rscratch2;
2988     }
2989     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2990                  rscratch1, stlr);
2991   %}
2992 
2993   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2994     {
2995       MacroAssembler _masm(&cbuf);
2996       FloatRegister src_reg = as_FloatRegister($src$$reg);
2997       __ fmovs(rscratch2, src_reg);
2998     }
2999     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3000                  rscratch1, stlrw);
3001   %}
3002 
3003   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3004     {
3005       MacroAssembler _masm(&cbuf);
3006       FloatRegister src_reg = as_FloatRegister($src$$reg);
3007       __ fmovd(rscratch2, src_reg);
3008     }
3009     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3010                  rscratch1, stlr);
3011   %}
3012 
3013   // synchronized read/update encodings
3014 
3015   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
3016     MacroAssembler _masm(&cbuf);
3017     Register dst_reg = as_Register($dst$$reg);
3018     Register base = as_Register($mem$$base);
3019     int index = $mem$$index;
3020     int scale = $mem$$scale;
3021     int disp = $mem$$disp;
3022     if (index == -1) {
3023        if (disp != 0) {
3024         __ lea(rscratch1, Address(base, disp));
3025         __ ldaxr(dst_reg, rscratch1);
3026       } else {
3027         // TODO
3028         // should we ever get anything other than this case?
3029         __ ldaxr(dst_reg, base);
3030       }
3031     } else {
3032       Register index_reg = as_Register(index);
3033       if (disp == 0) {
3034         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3035         __ ldaxr(dst_reg, rscratch1);
3036       } else {
3037         __ lea(rscratch1, Address(base, disp));
3038         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3039         __ ldaxr(dst_reg, rscratch1);
3040       }
3041     }
3042   %}
3043 
3044   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3045     MacroAssembler _masm(&cbuf);
3046     Register src_reg = as_Register($src$$reg);
3047     Register base = as_Register($mem$$base);
3048     int index = $mem$$index;
3049     int scale = $mem$$scale;
3050     int disp = $mem$$disp;
3051     if (index == -1) {
3052        if (disp != 0) {
3053         __ lea(rscratch2, Address(base, disp));
3054         __ stlxr(rscratch1, src_reg, rscratch2);
3055       } else {
3056         // TODO
3057         // should we ever get anything other than this case?
3058         __ stlxr(rscratch1, src_reg, base);
3059       }
3060     } else {
3061       Register index_reg = as_Register(index);
3062       if (disp == 0) {
3063         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3064         __ stlxr(rscratch1, src_reg, rscratch2);
3065       } else {
3066         __ lea(rscratch2, Address(base, disp));
3067         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3068         __ stlxr(rscratch1, src_reg, rscratch2);
3069       }
3070     }
3071     __ cmpw(rscratch1, zr);
3072   %}
3073 
3074   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3075     MacroAssembler _masm(&cbuf);
3076     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3077     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3078                Assembler::xword, /*acquire*/ false, /*release*/ true,
3079                /*weak*/ false, noreg);
3080   %}
3081 
3082   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3083     MacroAssembler _masm(&cbuf);
3084     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3085     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3086                Assembler::word, /*acquire*/ false, /*release*/ true,
3087                /*weak*/ false, noreg);
3088   %}
3089 
3090   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3091     MacroAssembler _masm(&cbuf);
3092     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3093     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3094                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3095                /*weak*/ false, noreg);
3096   %}
3097 
3098   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3099     MacroAssembler _masm(&cbuf);
3100     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3101     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3102                Assembler::byte, /*acquire*/ false, /*release*/ true,
3103                /*weak*/ false, noreg);
3104   %}
3105 
3106 
3107   // The only difference between aarch64_enc_cmpxchg and
3108   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3109   // CompareAndSwap sequence to serve as a barrier on acquiring a
3110   // lock.
3111   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3112     MacroAssembler _masm(&cbuf);
3113     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3114     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3115                Assembler::xword, /*acquire*/ true, /*release*/ true,
3116                /*weak*/ false, noreg);
3117   %}
3118 
3119   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3120     MacroAssembler _masm(&cbuf);
3121     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3122     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3123                Assembler::word, /*acquire*/ true, /*release*/ true,
3124                /*weak*/ false, noreg);
3125   %}
3126 
3127   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3128     MacroAssembler _masm(&cbuf);
3129     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3130     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3131                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3132                /*weak*/ false, noreg);
3133   %}
3134 
3135   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3136     MacroAssembler _masm(&cbuf);
3137     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3138     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3139                Assembler::byte, /*acquire*/ true, /*release*/ true,
3140                /*weak*/ false, noreg);
3141   %}
3142 
3143   // auxiliary used for CompareAndSwapX to set result register
3144   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3145     MacroAssembler _masm(&cbuf);
3146     Register res_reg = as_Register($res$$reg);
3147     __ cset(res_reg, Assembler::EQ);
3148   %}
3149 
3150   // prefetch encodings
3151 
3152   enc_class aarch64_enc_prefetchw(memory mem) %{
3153     MacroAssembler _masm(&cbuf);
3154     Register base = as_Register($mem$$base);
3155     int index = $mem$$index;
3156     int scale = $mem$$scale;
3157     int disp = $mem$$disp;
3158     if (index == -1) {
3159       __ prfm(Address(base, disp), PSTL1KEEP);
3160     } else {
3161       Register index_reg = as_Register(index);
3162       if (disp == 0) {
3163         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3164       } else {
3165         __ lea(rscratch1, Address(base, disp));
3166         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3167       }
3168     }
3169   %}
3170 
3171   /// mov envcodings
3172 
3173   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3174     MacroAssembler _masm(&cbuf);
3175     u_int32_t con = (u_int32_t)$src$$constant;
3176     Register dst_reg = as_Register($dst$$reg);
3177     if (con == 0) {
3178       __ movw(dst_reg, zr);
3179     } else {
3180       __ movw(dst_reg, con);
3181     }
3182   %}
3183 
3184   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3185     MacroAssembler _masm(&cbuf);
3186     Register dst_reg = as_Register($dst$$reg);
3187     u_int64_t con = (u_int64_t)$src$$constant;
3188     if (con == 0) {
3189       __ mov(dst_reg, zr);
3190     } else {
3191       __ mov(dst_reg, con);
3192     }
3193   %}
3194 
3195   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3196     MacroAssembler _masm(&cbuf);
3197     Register dst_reg = as_Register($dst$$reg);
3198     address con = (address)$src$$constant;
3199     if (con == NULL || con == (address)1) {
3200       ShouldNotReachHere();
3201     } else {
3202       relocInfo::relocType rtype = $src->constant_reloc();
3203       if (rtype == relocInfo::oop_type) {
3204         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3205       } else if (rtype == relocInfo::metadata_type) {
3206         __ mov_metadata(dst_reg, (Metadata*)con);
3207       } else {
3208         assert(rtype == relocInfo::none, "unexpected reloc type");
3209         if (con < (address)(uintptr_t)os::vm_page_size()) {
3210           __ mov(dst_reg, con);
3211         } else {
3212           unsigned long offset;
3213           __ adrp(dst_reg, con, offset);
3214           __ add(dst_reg, dst_reg, offset);
3215         }
3216       }
3217     }
3218   %}
3219 
3220   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3221     MacroAssembler _masm(&cbuf);
3222     Register dst_reg = as_Register($dst$$reg);
3223     __ mov(dst_reg, zr);
3224   %}
3225 
3226   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3227     MacroAssembler _masm(&cbuf);
3228     Register dst_reg = as_Register($dst$$reg);
3229     __ mov(dst_reg, (u_int64_t)1);
3230   %}
3231 
3232   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3233     MacroAssembler _masm(&cbuf);
3234     address page = (address)$src$$constant;
3235     Register dst_reg = as_Register($dst$$reg);
3236     unsigned long off;
3237     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3238     assert(off == 0, "assumed offset == 0");
3239   %}
3240 
3241   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3242     MacroAssembler _masm(&cbuf);
3243     __ load_byte_map_base($dst$$Register);
3244   %}
3245 
3246   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3247     MacroAssembler _masm(&cbuf);
3248     Register dst_reg = as_Register($dst$$reg);
3249     address con = (address)$src$$constant;
3250     if (con == NULL) {
3251       ShouldNotReachHere();
3252     } else {
3253       relocInfo::relocType rtype = $src->constant_reloc();
3254       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3255       __ set_narrow_oop(dst_reg, (jobject)con);
3256     }
3257   %}
3258 
3259   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3260     MacroAssembler _masm(&cbuf);
3261     Register dst_reg = as_Register($dst$$reg);
3262     __ mov(dst_reg, zr);
3263   %}
3264 
3265   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3266     MacroAssembler _masm(&cbuf);
3267     Register dst_reg = as_Register($dst$$reg);
3268     address con = (address)$src$$constant;
3269     if (con == NULL) {
3270       ShouldNotReachHere();
3271     } else {
3272       relocInfo::relocType rtype = $src->constant_reloc();
3273       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3274       __ set_narrow_klass(dst_reg, (Klass *)con);
3275     }
3276   %}
3277 
3278   // arithmetic encodings
3279 
3280   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3281     MacroAssembler _masm(&cbuf);
3282     Register dst_reg = as_Register($dst$$reg);
3283     Register src_reg = as_Register($src1$$reg);
3284     int32_t con = (int32_t)$src2$$constant;
3285     // add has primary == 0, subtract has primary == 1
3286     if ($primary) { con = -con; }
3287     if (con < 0) {
3288       __ subw(dst_reg, src_reg, -con);
3289     } else {
3290       __ addw(dst_reg, src_reg, con);
3291     }
3292   %}
3293 
3294   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3295     MacroAssembler _masm(&cbuf);
3296     Register dst_reg = as_Register($dst$$reg);
3297     Register src_reg = as_Register($src1$$reg);
3298     int32_t con = (int32_t)$src2$$constant;
3299     // add has primary == 0, subtract has primary == 1
3300     if ($primary) { con = -con; }
3301     if (con < 0) {
3302       __ sub(dst_reg, src_reg, -con);
3303     } else {
3304       __ add(dst_reg, src_reg, con);
3305     }
3306   %}
3307 
3308   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3309     MacroAssembler _masm(&cbuf);
3310    Register dst_reg = as_Register($dst$$reg);
3311    Register src1_reg = as_Register($src1$$reg);
3312    Register src2_reg = as_Register($src2$$reg);
3313     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3314   %}
3315 
3316   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3317     MacroAssembler _masm(&cbuf);
3318    Register dst_reg = as_Register($dst$$reg);
3319    Register src1_reg = as_Register($src1$$reg);
3320    Register src2_reg = as_Register($src2$$reg);
3321     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3322   %}
3323 
3324   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3325     MacroAssembler _masm(&cbuf);
3326    Register dst_reg = as_Register($dst$$reg);
3327    Register src1_reg = as_Register($src1$$reg);
3328    Register src2_reg = as_Register($src2$$reg);
3329     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3330   %}
3331 
3332   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3333     MacroAssembler _masm(&cbuf);
3334    Register dst_reg = as_Register($dst$$reg);
3335    Register src1_reg = as_Register($src1$$reg);
3336    Register src2_reg = as_Register($src2$$reg);
3337     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3338   %}
3339 
3340   // compare instruction encodings
3341 
3342   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3343     MacroAssembler _masm(&cbuf);
3344     Register reg1 = as_Register($src1$$reg);
3345     Register reg2 = as_Register($src2$$reg);
3346     __ cmpw(reg1, reg2);
3347   %}
3348 
3349   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3350     MacroAssembler _masm(&cbuf);
3351     Register reg = as_Register($src1$$reg);
3352     int32_t val = $src2$$constant;
3353     if (val >= 0) {
3354       __ subsw(zr, reg, val);
3355     } else {
3356       __ addsw(zr, reg, -val);
3357     }
3358   %}
3359 
3360   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3361     MacroAssembler _masm(&cbuf);
3362     Register reg1 = as_Register($src1$$reg);
3363     u_int32_t val = (u_int32_t)$src2$$constant;
3364     __ movw(rscratch1, val);
3365     __ cmpw(reg1, rscratch1);
3366   %}
3367 
3368   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3369     MacroAssembler _masm(&cbuf);
3370     Register reg1 = as_Register($src1$$reg);
3371     Register reg2 = as_Register($src2$$reg);
3372     __ cmp(reg1, reg2);
3373   %}
3374 
3375   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3376     MacroAssembler _masm(&cbuf);
3377     Register reg = as_Register($src1$$reg);
3378     int64_t val = $src2$$constant;
3379     if (val >= 0) {
3380       __ subs(zr, reg, val);
3381     } else if (val != -val) {
3382       __ adds(zr, reg, -val);
3383     } else {
3384     // aargh, Long.MIN_VALUE is a special case
3385       __ orr(rscratch1, zr, (u_int64_t)val);
3386       __ subs(zr, reg, rscratch1);
3387     }
3388   %}
3389 
3390   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3391     MacroAssembler _masm(&cbuf);
3392     Register reg1 = as_Register($src1$$reg);
3393     u_int64_t val = (u_int64_t)$src2$$constant;
3394     __ mov(rscratch1, val);
3395     __ cmp(reg1, rscratch1);
3396   %}
3397 
3398   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3399     MacroAssembler _masm(&cbuf);
3400     Register reg1 = as_Register($src1$$reg);
3401     Register reg2 = as_Register($src2$$reg);
3402     __ cmp(reg1, reg2);
3403   %}
3404 
3405   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3406     MacroAssembler _masm(&cbuf);
3407     Register reg1 = as_Register($src1$$reg);
3408     Register reg2 = as_Register($src2$$reg);
3409     __ cmpw(reg1, reg2);
3410   %}
3411 
3412   enc_class aarch64_enc_testp(iRegP src) %{
3413     MacroAssembler _masm(&cbuf);
3414     Register reg = as_Register($src$$reg);
3415     __ cmp(reg, zr);
3416   %}
3417 
3418   enc_class aarch64_enc_testn(iRegN src) %{
3419     MacroAssembler _masm(&cbuf);
3420     Register reg = as_Register($src$$reg);
3421     __ cmpw(reg, zr);
3422   %}
3423 
3424   enc_class aarch64_enc_b(label lbl) %{
3425     MacroAssembler _masm(&cbuf);
3426     Label *L = $lbl$$label;
3427     __ b(*L);
3428   %}
3429 
3430   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3431     MacroAssembler _masm(&cbuf);
3432     Label *L = $lbl$$label;
3433     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3434   %}
3435 
3436   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3437     MacroAssembler _masm(&cbuf);
3438     Label *L = $lbl$$label;
3439     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3440   %}
3441 
3442   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3443   %{
3444      Register sub_reg = as_Register($sub$$reg);
3445      Register super_reg = as_Register($super$$reg);
3446      Register temp_reg = as_Register($temp$$reg);
3447      Register result_reg = as_Register($result$$reg);
3448 
3449      Label miss;
3450      MacroAssembler _masm(&cbuf);
3451      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3452                                      NULL, &miss,
3453                                      /*set_cond_codes:*/ true);
3454      if ($primary) {
3455        __ mov(result_reg, zr);
3456      }
3457      __ bind(miss);
3458   %}
3459 
3460   enc_class aarch64_enc_java_static_call(method meth) %{
3461     MacroAssembler _masm(&cbuf);
3462 
3463     address addr = (address)$meth$$method;
3464     address call;
3465     if (!_method) {
3466       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3467       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3468     } else {
3469       int method_index = resolved_method_index(cbuf);
3470       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3471                                                   : static_call_Relocation::spec(method_index);
3472       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3473 
3474       // Emit stub for static call
3475       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3476       if (stub == NULL) {
3477         ciEnv::current()->record_failure("CodeCache is full");
3478         return;
3479       }
3480     }
3481     if (call == NULL) {
3482       ciEnv::current()->record_failure("CodeCache is full");
3483       return;
3484     }
3485   %}
3486 
3487   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3488     MacroAssembler _masm(&cbuf);
3489     int method_index = resolved_method_index(cbuf);
3490     address call = __ ic_call((address)$meth$$method, method_index);
3491     if (call == NULL) {
3492       ciEnv::current()->record_failure("CodeCache is full");
3493       return;
3494     }
3495   %}
3496 
3497   enc_class aarch64_enc_call_epilog() %{
3498     MacroAssembler _masm(&cbuf);
3499     if (VerifyStackAtCalls) {
3500       // Check that stack depth is unchanged: find majik cookie on stack
3501       __ call_Unimplemented();
3502     }
3503   %}
3504 
3505   enc_class aarch64_enc_java_to_runtime(method meth) %{
3506     MacroAssembler _masm(&cbuf);
3507 
3508     // some calls to generated routines (arraycopy code) are scheduled
3509     // by C2 as runtime calls. if so we can call them using a br (they
3510     // will be in a reachable segment) otherwise we have to use a blrt
3511     // which loads the absolute address into a register.
3512     address entry = (address)$meth$$method;
3513     CodeBlob *cb = CodeCache::find_blob(entry);
3514     if (cb) {
3515       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3516       if (call == NULL) {
3517         ciEnv::current()->record_failure("CodeCache is full");
3518         return;
3519       }
3520     } else {
3521       int gpcnt;
3522       int fpcnt;
3523       int rtype;
3524       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3525       Label retaddr;
3526       __ adr(rscratch2, retaddr);
3527       __ lea(rscratch1, RuntimeAddress(entry));
3528       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3529       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3530       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3531       __ bind(retaddr);
3532       __ add(sp, sp, 2 * wordSize);
3533     }
3534   %}
3535 
3536   enc_class aarch64_enc_rethrow() %{
3537     MacroAssembler _masm(&cbuf);
3538     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3539   %}
3540 
3541   enc_class aarch64_enc_ret() %{
3542     MacroAssembler _masm(&cbuf);
3543     __ ret(lr);
3544   %}
3545 
3546   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3547     MacroAssembler _masm(&cbuf);
3548     Register target_reg = as_Register($jump_target$$reg);
3549     __ br(target_reg);
3550   %}
3551 
3552   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3553     MacroAssembler _masm(&cbuf);
3554     Register target_reg = as_Register($jump_target$$reg);
3555     // exception oop should be in r0
3556     // ret addr has been popped into lr
3557     // callee expects it in r3
3558     __ mov(r3, lr);
3559     __ br(target_reg);
3560   %}
3561 
3562   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3563     MacroAssembler _masm(&cbuf);
3564     Register oop = as_Register($object$$reg);
3565     Register box = as_Register($box$$reg);
3566     Register disp_hdr = as_Register($tmp$$reg);
3567     Register tmp = as_Register($tmp2$$reg);
3568     Label cont;
3569     Label object_has_monitor;
3570     Label cas_failed;
3571 
3572     assert_different_registers(oop, box, tmp, disp_hdr);
3573 
3574     // Load markOop from object into displaced_header.
3575     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3576 
3577     if (UseBiasedLocking && !UseOptoBiasInlining) {
3578       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3579     }
3580 
3581     // Check for existing monitor
3582     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3583 
3584     // Set tmp to be (markOop of object | UNLOCK_VALUE).
3585     __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
3586 
3587     // Initialize the box. (Must happen before we update the object mark!)
3588     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3589 
3590     // Compare object markOop with an unlocked value (tmp) and if
3591     // equal exchange the stack address of our box with object markOop.
3592     // On failure disp_hdr contains the possibly locked markOop.
3593     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3594                /*release*/ true, /*weak*/ false, disp_hdr);
3595     __ br(Assembler::EQ, cont);
3596 
3597     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3598 
3599     // If the compare-and-exchange succeeded, then we found an unlocked
3600     // object, will have now locked it will continue at label cont
3601 
3602     __ bind(cas_failed);
3603     // We did not see an unlocked object so try the fast recursive case.
3604 
3605     // Check if the owner is self by comparing the value in the
3606     // markOop of object (disp_hdr) with the stack pointer.
3607     __ mov(rscratch1, sp);
3608     __ sub(disp_hdr, disp_hdr, rscratch1);
3609     __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
3610     // If condition is true we are cont and hence we can store 0 as the
3611     // displaced header in the box, which indicates that it is a recursive lock.
3612     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3613     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3614 
3615     __ b(cont);
3616 
3617     // Handle existing monitor.
3618     __ bind(object_has_monitor);
3619 
3620     // The object's monitor m is unlocked iff m->owner == NULL,
3621     // otherwise m->owner may contain a thread or a stack address.
3622     //
3623     // Try to CAS m->owner from NULL to current thread.
3624     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3625     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3626                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3627 
3628     // Store a non-null value into the box to avoid looking like a re-entrant
3629     // lock. The fast-path monitor unlock code checks for
3630     // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
3631     // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
3632     __ mov(tmp, (address)markOopDesc::unused_mark());
3633     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3634 
3635     __ bind(cont);
3636     // flag == EQ indicates success
3637     // flag == NE indicates failure
3638   %}
3639 
3640   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3641     MacroAssembler _masm(&cbuf);
3642     Register oop = as_Register($object$$reg);
3643     Register box = as_Register($box$$reg);
3644     Register disp_hdr = as_Register($tmp$$reg);
3645     Register tmp = as_Register($tmp2$$reg);
3646     Label cont;
3647     Label object_has_monitor;
3648 
3649     assert_different_registers(oop, box, tmp, disp_hdr);
3650 
3651     if (UseBiasedLocking && !UseOptoBiasInlining) {
3652       __ biased_locking_exit(oop, tmp, cont);
3653     }
3654 
3655     // Find the lock address and load the displaced header from the stack.
3656     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3657 
3658     // If the displaced header is 0, we have a recursive unlock.
3659     __ cmp(disp_hdr, zr);
3660     __ br(Assembler::EQ, cont);
3661 
3662     // Handle existing monitor.
3663     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3664     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3665 
3666     // Check if it is still a light weight lock, this is is true if we
3667     // see the stack address of the basicLock in the markOop of the
3668     // object.
3669 
3670     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3671                /*release*/ true, /*weak*/ false, tmp);
3672     __ b(cont);
3673 
3674     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3675 
3676     // Handle existing monitor.
3677     __ bind(object_has_monitor);
3678     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3679     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3680     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3681     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3682     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3683     __ cmp(rscratch1, zr); // Sets flags for result
3684     __ br(Assembler::NE, cont);
3685 
3686     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3687     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3688     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3689     __ cmp(rscratch1, zr); // Sets flags for result
3690     __ cbnz(rscratch1, cont);
3691     // need a release store here
3692     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3693     __ stlr(zr, tmp); // set unowned
3694 
3695     __ bind(cont);
3696     // flag == EQ indicates success
3697     // flag == NE indicates failure
3698   %}
3699 
3700 %}
3701 
3702 //----------FRAME--------------------------------------------------------------
3703 // Definition of frame structure and management information.
3704 //
3705 //  S T A C K   L A Y O U T    Allocators stack-slot number
3706 //                             |   (to get allocators register number
3707 //  G  Owned by    |        |  v    add OptoReg::stack0())
3708 //  r   CALLER     |        |
3709 //  o     |        +--------+      pad to even-align allocators stack-slot
3710 //  w     V        |  pad0  |        numbers; owned by CALLER
3711 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3712 //  h     ^        |   in   |  5
3713 //        |        |  args  |  4   Holes in incoming args owned by SELF
3714 //  |     |        |        |  3
3715 //  |     |        +--------+
3716 //  V     |        | old out|      Empty on Intel, window on Sparc
3717 //        |    old |preserve|      Must be even aligned.
3718 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3719 //        |        |   in   |  3   area for Intel ret address
3720 //     Owned by    |preserve|      Empty on Sparc.
3721 //       SELF      +--------+
3722 //        |        |  pad2  |  2   pad to align old SP
3723 //        |        +--------+  1
3724 //        |        | locks  |  0
3725 //        |        +--------+----> OptoReg::stack0(), even aligned
3726 //        |        |  pad1  | 11   pad to align new SP
3727 //        |        +--------+
3728 //        |        |        | 10
3729 //        |        | spills |  9   spills
3730 //        V        |        |  8   (pad0 slot for callee)
3731 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3732 //        ^        |  out   |  7
3733 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3734 //     Owned by    +--------+
3735 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3736 //        |    new |preserve|      Must be even-aligned.
3737 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3738 //        |        |        |
3739 //
3740 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3741 //         known from SELF's arguments and the Java calling convention.
3742 //         Region 6-7 is determined per call site.
3743 // Note 2: If the calling convention leaves holes in the incoming argument
3744 //         area, those holes are owned by SELF.  Holes in the outgoing area
3745 //         are owned by the CALLEE.  Holes should not be nessecary in the
3746 //         incoming area, as the Java calling convention is completely under
3747 //         the control of the AD file.  Doubles can be sorted and packed to
3748 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3749 //         varargs C calling conventions.
3750 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3751 //         even aligned with pad0 as needed.
3752 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3753 //           (the latter is true on Intel but is it false on AArch64?)
3754 //         region 6-11 is even aligned; it may be padded out more so that
3755 //         the region from SP to FP meets the minimum stack alignment.
3756 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3757 //         alignment.  Region 11, pad1, may be dynamically extended so that
3758 //         SP meets the minimum alignment.
3759 
3760 frame %{
3761   // What direction does stack grow in (assumed to be same for C & Java)
3762   stack_direction(TOWARDS_LOW);
3763 
3764   // These three registers define part of the calling convention
3765   // between compiled code and the interpreter.
3766 
3767   // Inline Cache Register or methodOop for I2C.
3768   inline_cache_reg(R12);
3769 
3770   // Method Oop Register when calling interpreter.
3771   interpreter_method_oop_reg(R12);
3772 
3773   // Number of stack slots consumed by locking an object
3774   sync_stack_slots(2);
3775 
3776   // Compiled code's Frame Pointer
3777   frame_pointer(R31);
3778 
3779   // Interpreter stores its frame pointer in a register which is
3780   // stored to the stack by I2CAdaptors.
3781   // I2CAdaptors convert from interpreted java to compiled java.
3782   interpreter_frame_pointer(R29);
3783 
3784   // Stack alignment requirement
3785   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3786 
3787   // Number of stack slots between incoming argument block and the start of
3788   // a new frame.  The PROLOG must add this many slots to the stack.  The
3789   // EPILOG must remove this many slots. aarch64 needs two slots for
3790   // return address and fp.
3791   // TODO think this is correct but check
3792   in_preserve_stack_slots(4);
3793 
3794   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3795   // for calls to C.  Supports the var-args backing area for register parms.
3796   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3797 
3798   // The after-PROLOG location of the return address.  Location of
3799   // return address specifies a type (REG or STACK) and a number
3800   // representing the register number (i.e. - use a register name) or
3801   // stack slot.
3802   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3803   // Otherwise, it is above the locks and verification slot and alignment word
3804   // TODO this may well be correct but need to check why that - 2 is there
3805   // ppc port uses 0 but we definitely need to allow for fixed_slots
3806   // which folds in the space used for monitors
3807   return_addr(STACK - 2 +
3808               align_up((Compile::current()->in_preserve_stack_slots() +
3809                         Compile::current()->fixed_slots()),
3810                        stack_alignment_in_slots()));
3811 
3812   // Body of function which returns an integer array locating
3813   // arguments either in registers or in stack slots.  Passed an array
3814   // of ideal registers called "sig" and a "length" count.  Stack-slot
3815   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3816   // arguments for a CALLEE.  Incoming stack arguments are
3817   // automatically biased by the preserve_stack_slots field above.
3818 
3819   calling_convention
3820   %{
3821     // No difference between ingoing/outgoing just pass false
3822     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3823   %}
3824 
3825   c_calling_convention
3826   %{
3827     // This is obviously always outgoing
3828     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3829   %}
3830 
3831   // Location of compiled Java return values.  Same as C for now.
3832   return_value
3833   %{
3834     // TODO do we allow ideal_reg == Op_RegN???
3835     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3836            "only return normal values");
3837 
3838     static const int lo[Op_RegL + 1] = { // enum name
3839       0,                                 // Op_Node
3840       0,                                 // Op_Set
3841       R0_num,                            // Op_RegN
3842       R0_num,                            // Op_RegI
3843       R0_num,                            // Op_RegP
3844       V0_num,                            // Op_RegF
3845       V0_num,                            // Op_RegD
3846       R0_num                             // Op_RegL
3847     };
3848 
3849     static const int hi[Op_RegL + 1] = { // enum name
3850       0,                                 // Op_Node
3851       0,                                 // Op_Set
3852       OptoReg::Bad,                       // Op_RegN
3853       OptoReg::Bad,                      // Op_RegI
3854       R0_H_num,                          // Op_RegP
3855       OptoReg::Bad,                      // Op_RegF
3856       V0_H_num,                          // Op_RegD
3857       R0_H_num                           // Op_RegL
3858     };
3859 
3860     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3861   %}
3862 %}
3863 
3864 //----------ATTRIBUTES---------------------------------------------------------
3865 //----------Operand Attributes-------------------------------------------------
3866 op_attrib op_cost(1);        // Required cost attribute
3867 
3868 //----------Instruction Attributes---------------------------------------------
3869 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3870 ins_attrib ins_size(32);        // Required size attribute (in bits)
3871 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3872                                 // a non-matching short branch variant
3873                                 // of some long branch?
3874 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3875                                 // be a power of 2) specifies the
3876                                 // alignment that some part of the
3877                                 // instruction (not necessarily the
3878                                 // start) requires.  If > 1, a
3879                                 // compute_padding() function must be
3880                                 // provided for the instruction
3881 
3882 //----------OPERANDS-----------------------------------------------------------
3883 // Operand definitions must precede instruction definitions for correct parsing
3884 // in the ADLC because operands constitute user defined types which are used in
3885 // instruction definitions.
3886 
3887 //----------Simple Operands----------------------------------------------------
3888 
3889 // Integer operands 32 bit
3890 // 32 bit immediate
3891 operand immI()
3892 %{
3893   match(ConI);
3894 
3895   op_cost(0);
3896   format %{ %}
3897   interface(CONST_INTER);
3898 %}
3899 
3900 // 32 bit zero
3901 operand immI0()
3902 %{
3903   predicate(n->get_int() == 0);
3904   match(ConI);
3905 
3906   op_cost(0);
3907   format %{ %}
3908   interface(CONST_INTER);
3909 %}
3910 
3911 // 32 bit unit increment
3912 operand immI_1()
3913 %{
3914   predicate(n->get_int() == 1);
3915   match(ConI);
3916 
3917   op_cost(0);
3918   format %{ %}
3919   interface(CONST_INTER);
3920 %}
3921 
3922 // 32 bit unit decrement
3923 operand immI_M1()
3924 %{
3925   predicate(n->get_int() == -1);
3926   match(ConI);
3927 
3928   op_cost(0);
3929   format %{ %}
3930   interface(CONST_INTER);
3931 %}
3932 
3933 // Shift values for add/sub extension shift
3934 operand immIExt()
3935 %{
3936   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3937   match(ConI);
3938 
3939   op_cost(0);
3940   format %{ %}
3941   interface(CONST_INTER);
3942 %}
3943 
3944 operand immI_le_4()
3945 %{
3946   predicate(n->get_int() <= 4);
3947   match(ConI);
3948 
3949   op_cost(0);
3950   format %{ %}
3951   interface(CONST_INTER);
3952 %}
3953 
3954 operand immI_31()
3955 %{
3956   predicate(n->get_int() == 31);
3957   match(ConI);
3958 
3959   op_cost(0);
3960   format %{ %}
3961   interface(CONST_INTER);
3962 %}
3963 
3964 operand immI_8()
3965 %{
3966   predicate(n->get_int() == 8);
3967   match(ConI);
3968 
3969   op_cost(0);
3970   format %{ %}
3971   interface(CONST_INTER);
3972 %}
3973 
3974 operand immI_16()
3975 %{
3976   predicate(n->get_int() == 16);
3977   match(ConI);
3978 
3979   op_cost(0);
3980   format %{ %}
3981   interface(CONST_INTER);
3982 %}
3983 
3984 operand immI_24()
3985 %{
3986   predicate(n->get_int() == 24);
3987   match(ConI);
3988 
3989   op_cost(0);
3990   format %{ %}
3991   interface(CONST_INTER);
3992 %}
3993 
3994 operand immI_32()
3995 %{
3996   predicate(n->get_int() == 32);
3997   match(ConI);
3998 
3999   op_cost(0);
4000   format %{ %}
4001   interface(CONST_INTER);
4002 %}
4003 
4004 operand immI_48()
4005 %{
4006   predicate(n->get_int() == 48);
4007   match(ConI);
4008 
4009   op_cost(0);
4010   format %{ %}
4011   interface(CONST_INTER);
4012 %}
4013 
4014 operand immI_56()
4015 %{
4016   predicate(n->get_int() == 56);
4017   match(ConI);
4018 
4019   op_cost(0);
4020   format %{ %}
4021   interface(CONST_INTER);
4022 %}
4023 
4024 operand immI_63()
4025 %{
4026   predicate(n->get_int() == 63);
4027   match(ConI);
4028 
4029   op_cost(0);
4030   format %{ %}
4031   interface(CONST_INTER);
4032 %}
4033 
4034 operand immI_64()
4035 %{
4036   predicate(n->get_int() == 64);
4037   match(ConI);
4038 
4039   op_cost(0);
4040   format %{ %}
4041   interface(CONST_INTER);
4042 %}
4043 
4044 operand immI_255()
4045 %{
4046   predicate(n->get_int() == 255);
4047   match(ConI);
4048 
4049   op_cost(0);
4050   format %{ %}
4051   interface(CONST_INTER);
4052 %}
4053 
4054 operand immI_65535()
4055 %{
4056   predicate(n->get_int() == 65535);
4057   match(ConI);
4058 
4059   op_cost(0);
4060   format %{ %}
4061   interface(CONST_INTER);
4062 %}
4063 
4064 operand immL_255()
4065 %{
4066   predicate(n->get_long() == 255L);
4067   match(ConL);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 operand immL_65535()
4075 %{
4076   predicate(n->get_long() == 65535L);
4077   match(ConL);
4078 
4079   op_cost(0);
4080   format %{ %}
4081   interface(CONST_INTER);
4082 %}
4083 
4084 operand immL_4294967295()
4085 %{
4086   predicate(n->get_long() == 4294967295L);
4087   match(ConL);
4088 
4089   op_cost(0);
4090   format %{ %}
4091   interface(CONST_INTER);
4092 %}
4093 
4094 operand immL_bitmask()
4095 %{
4096   predicate((n->get_long() != 0)
4097             && ((n->get_long() & 0xc000000000000000l) == 0)
4098             && is_power_of_2(n->get_long() + 1));
4099   match(ConL);
4100 
4101   op_cost(0);
4102   format %{ %}
4103   interface(CONST_INTER);
4104 %}
4105 
4106 operand immI_bitmask()
4107 %{
4108   predicate((n->get_int() != 0)
4109             && ((n->get_int() & 0xc0000000) == 0)
4110             && is_power_of_2(n->get_int() + 1));
4111   match(ConI);
4112 
4113   op_cost(0);
4114   format %{ %}
4115   interface(CONST_INTER);
4116 %}
4117 
4118 // Scale values for scaled offset addressing modes (up to long but not quad)
4119 operand immIScale()
4120 %{
4121   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4122   match(ConI);
4123 
4124   op_cost(0);
4125   format %{ %}
4126   interface(CONST_INTER);
4127 %}
4128 
4129 // 26 bit signed offset -- for pc-relative branches
4130 operand immI26()
4131 %{
4132   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4133   match(ConI);
4134 
4135   op_cost(0);
4136   format %{ %}
4137   interface(CONST_INTER);
4138 %}
4139 
4140 // 19 bit signed offset -- for pc-relative loads
4141 operand immI19()
4142 %{
4143   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4144   match(ConI);
4145 
4146   op_cost(0);
4147   format %{ %}
4148   interface(CONST_INTER);
4149 %}
4150 
4151 // 12 bit unsigned offset -- for base plus immediate loads
4152 operand immIU12()
4153 %{
4154   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4155   match(ConI);
4156 
4157   op_cost(0);
4158   format %{ %}
4159   interface(CONST_INTER);
4160 %}
4161 
4162 operand immLU12()
4163 %{
4164   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4165   match(ConL);
4166 
4167   op_cost(0);
4168   format %{ %}
4169   interface(CONST_INTER);
4170 %}
4171 
4172 // Offset for scaled or unscaled immediate loads and stores
4173 operand immIOffset()
4174 %{
4175   predicate(Address::offset_ok_for_immed(n->get_int()));
4176   match(ConI);
4177 
4178   op_cost(0);
4179   format %{ %}
4180   interface(CONST_INTER);
4181 %}
4182 
4183 operand immIOffset4()
4184 %{
4185   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4186   match(ConI);
4187 
4188   op_cost(0);
4189   format %{ %}
4190   interface(CONST_INTER);
4191 %}
4192 
4193 operand immIOffset8()
4194 %{
4195   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4196   match(ConI);
4197 
4198   op_cost(0);
4199   format %{ %}
4200   interface(CONST_INTER);
4201 %}
4202 
4203 operand immIOffset16()
4204 %{
4205   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4206   match(ConI);
4207 
4208   op_cost(0);
4209   format %{ %}
4210   interface(CONST_INTER);
4211 %}
4212 
4213 operand immLoffset()
4214 %{
4215   predicate(Address::offset_ok_for_immed(n->get_long()));
4216   match(ConL);
4217 
4218   op_cost(0);
4219   format %{ %}
4220   interface(CONST_INTER);
4221 %}
4222 
4223 operand immLoffset4()
4224 %{
4225   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4226   match(ConL);
4227 
4228   op_cost(0);
4229   format %{ %}
4230   interface(CONST_INTER);
4231 %}
4232 
4233 operand immLoffset8()
4234 %{
4235   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4236   match(ConL);
4237 
4238   op_cost(0);
4239   format %{ %}
4240   interface(CONST_INTER);
4241 %}
4242 
4243 operand immLoffset16()
4244 %{
4245   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4246   match(ConL);
4247 
4248   op_cost(0);
4249   format %{ %}
4250   interface(CONST_INTER);
4251 %}
4252 
4253 // 32 bit integer valid for add sub immediate
4254 operand immIAddSub()
4255 %{
4256   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4257   match(ConI);
4258   op_cost(0);
4259   format %{ %}
4260   interface(CONST_INTER);
4261 %}
4262 
4263 // 32 bit unsigned integer valid for logical immediate
4264 // TODO -- check this is right when e.g the mask is 0x80000000
4265 operand immILog()
4266 %{
4267   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4268   match(ConI);
4269 
4270   op_cost(0);
4271   format %{ %}
4272   interface(CONST_INTER);
4273 %}
4274 
4275 // Integer operands 64 bit
4276 // 64 bit immediate
4277 operand immL()
4278 %{
4279   match(ConL);
4280 
4281   op_cost(0);
4282   format %{ %}
4283   interface(CONST_INTER);
4284 %}
4285 
4286 // 64 bit zero
4287 operand immL0()
4288 %{
4289   predicate(n->get_long() == 0);
4290   match(ConL);
4291 
4292   op_cost(0);
4293   format %{ %}
4294   interface(CONST_INTER);
4295 %}
4296 
4297 // 64 bit unit increment
4298 operand immL_1()
4299 %{
4300   predicate(n->get_long() == 1);
4301   match(ConL);
4302 
4303   op_cost(0);
4304   format %{ %}
4305   interface(CONST_INTER);
4306 %}
4307 
4308 // 64 bit unit decrement
4309 operand immL_M1()
4310 %{
4311   predicate(n->get_long() == -1);
4312   match(ConL);
4313 
4314   op_cost(0);
4315   format %{ %}
4316   interface(CONST_INTER);
4317 %}
4318 
4319 // 32 bit offset of pc in thread anchor
4320 
4321 operand immL_pc_off()
4322 %{
4323   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4324                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4325   match(ConL);
4326 
4327   op_cost(0);
4328   format %{ %}
4329   interface(CONST_INTER);
4330 %}
4331 
4332 // 64 bit integer valid for add sub immediate
4333 operand immLAddSub()
4334 %{
4335   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4336   match(ConL);
4337   op_cost(0);
4338   format %{ %}
4339   interface(CONST_INTER);
4340 %}
4341 
4342 // 64 bit integer valid for logical immediate
4343 operand immLLog()
4344 %{
4345   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4346   match(ConL);
4347   op_cost(0);
4348   format %{ %}
4349   interface(CONST_INTER);
4350 %}
4351 
4352 // Long Immediate: low 32-bit mask
4353 operand immL_32bits()
4354 %{
4355   predicate(n->get_long() == 0xFFFFFFFFL);
4356   match(ConL);
4357   op_cost(0);
4358   format %{ %}
4359   interface(CONST_INTER);
4360 %}
4361 
4362 // Pointer operands
4363 // Pointer Immediate
4364 operand immP()
4365 %{
4366   match(ConP);
4367 
4368   op_cost(0);
4369   format %{ %}
4370   interface(CONST_INTER);
4371 %}
4372 
4373 // NULL Pointer Immediate
4374 operand immP0()
4375 %{
4376   predicate(n->get_ptr() == 0);
4377   match(ConP);
4378 
4379   op_cost(0);
4380   format %{ %}
4381   interface(CONST_INTER);
4382 %}
4383 
4384 // Pointer Immediate One
4385 // this is used in object initialization (initial object header)
4386 operand immP_1()
4387 %{
4388   predicate(n->get_ptr() == 1);
4389   match(ConP);
4390 
4391   op_cost(0);
4392   format %{ %}
4393   interface(CONST_INTER);
4394 %}
4395 
4396 // Polling Page Pointer Immediate
4397 operand immPollPage()
4398 %{
4399   predicate((address)n->get_ptr() == os::get_polling_page());
4400   match(ConP);
4401 
4402   op_cost(0);
4403   format %{ %}
4404   interface(CONST_INTER);
4405 %}
4406 
4407 // Card Table Byte Map Base
4408 operand immByteMapBase()
4409 %{
4410   // Get base of card map
4411   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4412             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4413   match(ConP);
4414 
4415   op_cost(0);
4416   format %{ %}
4417   interface(CONST_INTER);
4418 %}
4419 
4420 // Pointer Immediate Minus One
4421 // this is used when we want to write the current PC to the thread anchor
4422 operand immP_M1()
4423 %{
4424   predicate(n->get_ptr() == -1);
4425   match(ConP);
4426 
4427   op_cost(0);
4428   format %{ %}
4429   interface(CONST_INTER);
4430 %}
4431 
4432 // Pointer Immediate Minus Two
4433 // this is used when we want to write the current PC to the thread anchor
4434 operand immP_M2()
4435 %{
4436   predicate(n->get_ptr() == -2);
4437   match(ConP);
4438 
4439   op_cost(0);
4440   format %{ %}
4441   interface(CONST_INTER);
4442 %}
4443 
4444 // Float and Double operands
4445 // Double Immediate
4446 operand immD()
4447 %{
4448   match(ConD);
4449   op_cost(0);
4450   format %{ %}
4451   interface(CONST_INTER);
4452 %}
4453 
4454 // Double Immediate: +0.0d
4455 operand immD0()
4456 %{
4457   predicate(jlong_cast(n->getd()) == 0);
4458   match(ConD);
4459 
4460   op_cost(0);
4461   format %{ %}
4462   interface(CONST_INTER);
4463 %}
4464 
4465 // constant 'double +0.0'.
4466 operand immDPacked()
4467 %{
4468   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4469   match(ConD);
4470   op_cost(0);
4471   format %{ %}
4472   interface(CONST_INTER);
4473 %}
4474 
4475 // Float Immediate
4476 operand immF()
4477 %{
4478   match(ConF);
4479   op_cost(0);
4480   format %{ %}
4481   interface(CONST_INTER);
4482 %}
4483 
4484 // Float Immediate: +0.0f.
4485 operand immF0()
4486 %{
4487   predicate(jint_cast(n->getf()) == 0);
4488   match(ConF);
4489 
4490   op_cost(0);
4491   format %{ %}
4492   interface(CONST_INTER);
4493 %}
4494 
4495 //
4496 operand immFPacked()
4497 %{
4498   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4499   match(ConF);
4500   op_cost(0);
4501   format %{ %}
4502   interface(CONST_INTER);
4503 %}
4504 
4505 // Narrow pointer operands
4506 // Narrow Pointer Immediate
4507 operand immN()
4508 %{
4509   match(ConN);
4510 
4511   op_cost(0);
4512   format %{ %}
4513   interface(CONST_INTER);
4514 %}
4515 
4516 // Narrow NULL Pointer Immediate
4517 operand immN0()
4518 %{
4519   predicate(n->get_narrowcon() == 0);
4520   match(ConN);
4521 
4522   op_cost(0);
4523   format %{ %}
4524   interface(CONST_INTER);
4525 %}
4526 
4527 operand immNKlass()
4528 %{
4529   match(ConNKlass);
4530 
4531   op_cost(0);
4532   format %{ %}
4533   interface(CONST_INTER);
4534 %}
4535 
4536 // Integer 32 bit Register Operands
4537 // Integer 32 bitRegister (excludes SP)
4538 operand iRegI()
4539 %{
4540   constraint(ALLOC_IN_RC(any_reg32));
4541   match(RegI);
4542   match(iRegINoSp);
4543   op_cost(0);
4544   format %{ %}
4545   interface(REG_INTER);
4546 %}
4547 
4548 // Integer 32 bit Register not Special
4549 operand iRegINoSp()
4550 %{
4551   constraint(ALLOC_IN_RC(no_special_reg32));
4552   match(RegI);
4553   op_cost(0);
4554   format %{ %}
4555   interface(REG_INTER);
4556 %}
4557 
4558 // Integer 64 bit Register Operands
4559 // Integer 64 bit Register (includes SP)
4560 operand iRegL()
4561 %{
4562   constraint(ALLOC_IN_RC(any_reg));
4563   match(RegL);
4564   match(iRegLNoSp);
4565   op_cost(0);
4566   format %{ %}
4567   interface(REG_INTER);
4568 %}
4569 
4570 // Integer 64 bit Register not Special
4571 operand iRegLNoSp()
4572 %{
4573   constraint(ALLOC_IN_RC(no_special_reg));
4574   match(RegL);
4575   match(iRegL_R0);
4576   format %{ %}
4577   interface(REG_INTER);
4578 %}
4579 
4580 // Pointer Register Operands
4581 // Pointer Register
4582 operand iRegP()
4583 %{
4584   constraint(ALLOC_IN_RC(ptr_reg));
4585   match(RegP);
4586   match(iRegPNoSp);
4587   match(iRegP_R0);
4588   //match(iRegP_R2);
4589   //match(iRegP_R4);
4590   //match(iRegP_R5);
4591   match(thread_RegP);
4592   op_cost(0);
4593   format %{ %}
4594   interface(REG_INTER);
4595 %}
4596 
4597 // Pointer 64 bit Register not Special
4598 operand iRegPNoSp()
4599 %{
4600   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4601   match(RegP);
4602   // match(iRegP);
4603   // match(iRegP_R0);
4604   // match(iRegP_R2);
4605   // match(iRegP_R4);
4606   // match(iRegP_R5);
4607   // match(thread_RegP);
4608   op_cost(0);
4609   format %{ %}
4610   interface(REG_INTER);
4611 %}
4612 
4613 // Pointer 64 bit Register R0 only
4614 operand iRegP_R0()
4615 %{
4616   constraint(ALLOC_IN_RC(r0_reg));
4617   match(RegP);
4618   // match(iRegP);
4619   match(iRegPNoSp);
4620   op_cost(0);
4621   format %{ %}
4622   interface(REG_INTER);
4623 %}
4624 
4625 // Pointer 64 bit Register R1 only
4626 operand iRegP_R1()
4627 %{
4628   constraint(ALLOC_IN_RC(r1_reg));
4629   match(RegP);
4630   // match(iRegP);
4631   match(iRegPNoSp);
4632   op_cost(0);
4633   format %{ %}
4634   interface(REG_INTER);
4635 %}
4636 
4637 // Pointer 64 bit Register R2 only
4638 operand iRegP_R2()
4639 %{
4640   constraint(ALLOC_IN_RC(r2_reg));
4641   match(RegP);
4642   // match(iRegP);
4643   match(iRegPNoSp);
4644   op_cost(0);
4645   format %{ %}
4646   interface(REG_INTER);
4647 %}
4648 
4649 // Pointer 64 bit Register R3 only
4650 operand iRegP_R3()
4651 %{
4652   constraint(ALLOC_IN_RC(r3_reg));
4653   match(RegP);
4654   // match(iRegP);
4655   match(iRegPNoSp);
4656   op_cost(0);
4657   format %{ %}
4658   interface(REG_INTER);
4659 %}
4660 
4661 // Pointer 64 bit Register R4 only
4662 operand iRegP_R4()
4663 %{
4664   constraint(ALLOC_IN_RC(r4_reg));
4665   match(RegP);
4666   // match(iRegP);
4667   match(iRegPNoSp);
4668   op_cost(0);
4669   format %{ %}
4670   interface(REG_INTER);
4671 %}
4672 
4673 // Pointer 64 bit Register R5 only
4674 operand iRegP_R5()
4675 %{
4676   constraint(ALLOC_IN_RC(r5_reg));
4677   match(RegP);
4678   // match(iRegP);
4679   match(iRegPNoSp);
4680   op_cost(0);
4681   format %{ %}
4682   interface(REG_INTER);
4683 %}
4684 
4685 // Pointer 64 bit Register R10 only
4686 operand iRegP_R10()
4687 %{
4688   constraint(ALLOC_IN_RC(r10_reg));
4689   match(RegP);
4690   // match(iRegP);
4691   match(iRegPNoSp);
4692   op_cost(0);
4693   format %{ %}
4694   interface(REG_INTER);
4695 %}
4696 
4697 // Long 64 bit Register R0 only
4698 operand iRegL_R0()
4699 %{
4700   constraint(ALLOC_IN_RC(r0_reg));
4701   match(RegL);
4702   match(iRegLNoSp);
4703   op_cost(0);
4704   format %{ %}
4705   interface(REG_INTER);
4706 %}
4707 
4708 // Long 64 bit Register R2 only
4709 operand iRegL_R2()
4710 %{
4711   constraint(ALLOC_IN_RC(r2_reg));
4712   match(RegL);
4713   match(iRegLNoSp);
4714   op_cost(0);
4715   format %{ %}
4716   interface(REG_INTER);
4717 %}
4718 
4719 // Long 64 bit Register R3 only
4720 operand iRegL_R3()
4721 %{
4722   constraint(ALLOC_IN_RC(r3_reg));
4723   match(RegL);
4724   match(iRegLNoSp);
4725   op_cost(0);
4726   format %{ %}
4727   interface(REG_INTER);
4728 %}
4729 
4730 // Long 64 bit Register R11 only
4731 operand iRegL_R11()
4732 %{
4733   constraint(ALLOC_IN_RC(r11_reg));
4734   match(RegL);
4735   match(iRegLNoSp);
4736   op_cost(0);
4737   format %{ %}
4738   interface(REG_INTER);
4739 %}
4740 
4741 // Pointer 64 bit Register FP only
4742 operand iRegP_FP()
4743 %{
4744   constraint(ALLOC_IN_RC(fp_reg));
4745   match(RegP);
4746   // match(iRegP);
4747   op_cost(0);
4748   format %{ %}
4749   interface(REG_INTER);
4750 %}
4751 
4752 // Register R0 only
4753 operand iRegI_R0()
4754 %{
4755   constraint(ALLOC_IN_RC(int_r0_reg));
4756   match(RegI);
4757   match(iRegINoSp);
4758   op_cost(0);
4759   format %{ %}
4760   interface(REG_INTER);
4761 %}
4762 
4763 // Register R2 only
4764 operand iRegI_R2()
4765 %{
4766   constraint(ALLOC_IN_RC(int_r2_reg));
4767   match(RegI);
4768   match(iRegINoSp);
4769   op_cost(0);
4770   format %{ %}
4771   interface(REG_INTER);
4772 %}
4773 
4774 // Register R3 only
4775 operand iRegI_R3()
4776 %{
4777   constraint(ALLOC_IN_RC(int_r3_reg));
4778   match(RegI);
4779   match(iRegINoSp);
4780   op_cost(0);
4781   format %{ %}
4782   interface(REG_INTER);
4783 %}
4784 
4785 
4786 // Register R4 only
4787 operand iRegI_R4()
4788 %{
4789   constraint(ALLOC_IN_RC(int_r4_reg));
4790   match(RegI);
4791   match(iRegINoSp);
4792   op_cost(0);
4793   format %{ %}
4794   interface(REG_INTER);
4795 %}
4796 
4797 
4798 // Pointer Register Operands
4799 // Narrow Pointer Register
4800 operand iRegN()
4801 %{
4802   constraint(ALLOC_IN_RC(any_reg32));
4803   match(RegN);
4804   match(iRegNNoSp);
4805   op_cost(0);
4806   format %{ %}
4807   interface(REG_INTER);
4808 %}
4809 
4810 operand iRegN_R0()
4811 %{
4812   constraint(ALLOC_IN_RC(r0_reg));
4813   match(iRegN);
4814   op_cost(0);
4815   format %{ %}
4816   interface(REG_INTER);
4817 %}
4818 
4819 operand iRegN_R2()
4820 %{
4821   constraint(ALLOC_IN_RC(r2_reg));
4822   match(iRegN);
4823   op_cost(0);
4824   format %{ %}
4825   interface(REG_INTER);
4826 %}
4827 
4828 operand iRegN_R3()
4829 %{
4830   constraint(ALLOC_IN_RC(r3_reg));
4831   match(iRegN);
4832   op_cost(0);
4833   format %{ %}
4834   interface(REG_INTER);
4835 %}
4836 
4837 // Integer 64 bit Register not Special
4838 operand iRegNNoSp()
4839 %{
4840   constraint(ALLOC_IN_RC(no_special_reg32));
4841   match(RegN);
4842   op_cost(0);
4843   format %{ %}
4844   interface(REG_INTER);
4845 %}
4846 
4847 // heap base register -- used for encoding immN0
4848 
4849 operand iRegIHeapbase()
4850 %{
4851   constraint(ALLOC_IN_RC(heapbase_reg));
4852   match(RegI);
4853   op_cost(0);
4854   format %{ %}
4855   interface(REG_INTER);
4856 %}
4857 
4858 // Float Register
4859 // Float register operands
4860 operand vRegF()
4861 %{
4862   constraint(ALLOC_IN_RC(float_reg));
4863   match(RegF);
4864 
4865   op_cost(0);
4866   format %{ %}
4867   interface(REG_INTER);
4868 %}
4869 
4870 // Double Register
4871 // Double register operands
4872 operand vRegD()
4873 %{
4874   constraint(ALLOC_IN_RC(double_reg));
4875   match(RegD);
4876 
4877   op_cost(0);
4878   format %{ %}
4879   interface(REG_INTER);
4880 %}
4881 
4882 operand vecD()
4883 %{
4884   constraint(ALLOC_IN_RC(vectord_reg));
4885   match(VecD);
4886 
4887   op_cost(0);
4888   format %{ %}
4889   interface(REG_INTER);
4890 %}
4891 
4892 operand vecX()
4893 %{
4894   constraint(ALLOC_IN_RC(vectorx_reg));
4895   match(VecX);
4896 
4897   op_cost(0);
4898   format %{ %}
4899   interface(REG_INTER);
4900 %}
4901 
4902 operand vRegD_V0()
4903 %{
4904   constraint(ALLOC_IN_RC(v0_reg));
4905   match(RegD);
4906   op_cost(0);
4907   format %{ %}
4908   interface(REG_INTER);
4909 %}
4910 
4911 operand vRegD_V1()
4912 %{
4913   constraint(ALLOC_IN_RC(v1_reg));
4914   match(RegD);
4915   op_cost(0);
4916   format %{ %}
4917   interface(REG_INTER);
4918 %}
4919 
4920 operand vRegD_V2()
4921 %{
4922   constraint(ALLOC_IN_RC(v2_reg));
4923   match(RegD);
4924   op_cost(0);
4925   format %{ %}
4926   interface(REG_INTER);
4927 %}
4928 
4929 operand vRegD_V3()
4930 %{
4931   constraint(ALLOC_IN_RC(v3_reg));
4932   match(RegD);
4933   op_cost(0);
4934   format %{ %}
4935   interface(REG_INTER);
4936 %}
4937 
4938 operand vRegD_V4()
4939 %{
4940   constraint(ALLOC_IN_RC(v4_reg));
4941   match(RegD);
4942   op_cost(0);
4943   format %{ %}
4944   interface(REG_INTER);
4945 %}
4946 
4947 operand vRegD_V5()
4948 %{
4949   constraint(ALLOC_IN_RC(v5_reg));
4950   match(RegD);
4951   op_cost(0);
4952   format %{ %}
4953   interface(REG_INTER);
4954 %}
4955 
4956 operand vRegD_V6()
4957 %{
4958   constraint(ALLOC_IN_RC(v6_reg));
4959   match(RegD);
4960   op_cost(0);
4961   format %{ %}
4962   interface(REG_INTER);
4963 %}
4964 
4965 operand vRegD_V7()
4966 %{
4967   constraint(ALLOC_IN_RC(v7_reg));
4968   match(RegD);
4969   op_cost(0);
4970   format %{ %}
4971   interface(REG_INTER);
4972 %}
4973 
4974 operand vRegD_V8()
4975 %{
4976   constraint(ALLOC_IN_RC(v8_reg));
4977   match(RegD);
4978   op_cost(0);
4979   format %{ %}
4980   interface(REG_INTER);
4981 %}
4982 
4983 operand vRegD_V9()
4984 %{
4985   constraint(ALLOC_IN_RC(v9_reg));
4986   match(RegD);
4987   op_cost(0);
4988   format %{ %}
4989   interface(REG_INTER);
4990 %}
4991 
4992 operand vRegD_V10()
4993 %{
4994   constraint(ALLOC_IN_RC(v10_reg));
4995   match(RegD);
4996   op_cost(0);
4997   format %{ %}
4998   interface(REG_INTER);
4999 %}
5000 
5001 operand vRegD_V11()
5002 %{
5003   constraint(ALLOC_IN_RC(v11_reg));
5004   match(RegD);
5005   op_cost(0);
5006   format %{ %}
5007   interface(REG_INTER);
5008 %}
5009 
5010 operand vRegD_V12()
5011 %{
5012   constraint(ALLOC_IN_RC(v12_reg));
5013   match(RegD);
5014   op_cost(0);
5015   format %{ %}
5016   interface(REG_INTER);
5017 %}
5018 
5019 operand vRegD_V13()
5020 %{
5021   constraint(ALLOC_IN_RC(v13_reg));
5022   match(RegD);
5023   op_cost(0);
5024   format %{ %}
5025   interface(REG_INTER);
5026 %}
5027 
5028 operand vRegD_V14()
5029 %{
5030   constraint(ALLOC_IN_RC(v14_reg));
5031   match(RegD);
5032   op_cost(0);
5033   format %{ %}
5034   interface(REG_INTER);
5035 %}
5036 
5037 operand vRegD_V15()
5038 %{
5039   constraint(ALLOC_IN_RC(v15_reg));
5040   match(RegD);
5041   op_cost(0);
5042   format %{ %}
5043   interface(REG_INTER);
5044 %}
5045 
5046 operand vRegD_V16()
5047 %{
5048   constraint(ALLOC_IN_RC(v16_reg));
5049   match(RegD);
5050   op_cost(0);
5051   format %{ %}
5052   interface(REG_INTER);
5053 %}
5054 
5055 operand vRegD_V17()
5056 %{
5057   constraint(ALLOC_IN_RC(v17_reg));
5058   match(RegD);
5059   op_cost(0);
5060   format %{ %}
5061   interface(REG_INTER);
5062 %}
5063 
5064 operand vRegD_V18()
5065 %{
5066   constraint(ALLOC_IN_RC(v18_reg));
5067   match(RegD);
5068   op_cost(0);
5069   format %{ %}
5070   interface(REG_INTER);
5071 %}
5072 
5073 operand vRegD_V19()
5074 %{
5075   constraint(ALLOC_IN_RC(v19_reg));
5076   match(RegD);
5077   op_cost(0);
5078   format %{ %}
5079   interface(REG_INTER);
5080 %}
5081 
5082 operand vRegD_V20()
5083 %{
5084   constraint(ALLOC_IN_RC(v20_reg));
5085   match(RegD);
5086   op_cost(0);
5087   format %{ %}
5088   interface(REG_INTER);
5089 %}
5090 
5091 operand vRegD_V21()
5092 %{
5093   constraint(ALLOC_IN_RC(v21_reg));
5094   match(RegD);
5095   op_cost(0);
5096   format %{ %}
5097   interface(REG_INTER);
5098 %}
5099 
5100 operand vRegD_V22()
5101 %{
5102   constraint(ALLOC_IN_RC(v22_reg));
5103   match(RegD);
5104   op_cost(0);
5105   format %{ %}
5106   interface(REG_INTER);
5107 %}
5108 
5109 operand vRegD_V23()
5110 %{
5111   constraint(ALLOC_IN_RC(v23_reg));
5112   match(RegD);
5113   op_cost(0);
5114   format %{ %}
5115   interface(REG_INTER);
5116 %}
5117 
5118 operand vRegD_V24()
5119 %{
5120   constraint(ALLOC_IN_RC(v24_reg));
5121   match(RegD);
5122   op_cost(0);
5123   format %{ %}
5124   interface(REG_INTER);
5125 %}
5126 
5127 operand vRegD_V25()
5128 %{
5129   constraint(ALLOC_IN_RC(v25_reg));
5130   match(RegD);
5131   op_cost(0);
5132   format %{ %}
5133   interface(REG_INTER);
5134 %}
5135 
5136 operand vRegD_V26()
5137 %{
5138   constraint(ALLOC_IN_RC(v26_reg));
5139   match(RegD);
5140   op_cost(0);
5141   format %{ %}
5142   interface(REG_INTER);
5143 %}
5144 
5145 operand vRegD_V27()
5146 %{
5147   constraint(ALLOC_IN_RC(v27_reg));
5148   match(RegD);
5149   op_cost(0);
5150   format %{ %}
5151   interface(REG_INTER);
5152 %}
5153 
5154 operand vRegD_V28()
5155 %{
5156   constraint(ALLOC_IN_RC(v28_reg));
5157   match(RegD);
5158   op_cost(0);
5159   format %{ %}
5160   interface(REG_INTER);
5161 %}
5162 
5163 operand vRegD_V29()
5164 %{
5165   constraint(ALLOC_IN_RC(v29_reg));
5166   match(RegD);
5167   op_cost(0);
5168   format %{ %}
5169   interface(REG_INTER);
5170 %}
5171 
5172 operand vRegD_V30()
5173 %{
5174   constraint(ALLOC_IN_RC(v30_reg));
5175   match(RegD);
5176   op_cost(0);
5177   format %{ %}
5178   interface(REG_INTER);
5179 %}
5180 
5181 operand vRegD_V31()
5182 %{
5183   constraint(ALLOC_IN_RC(v31_reg));
5184   match(RegD);
5185   op_cost(0);
5186   format %{ %}
5187   interface(REG_INTER);
5188 %}
5189 
5190 // Flags register, used as output of signed compare instructions
5191 
5192 // note that on AArch64 we also use this register as the output for
5193 // for floating point compare instructions (CmpF CmpD). this ensures
5194 // that ordered inequality tests use GT, GE, LT or LE none of which
5195 // pass through cases where the result is unordered i.e. one or both
5196 // inputs to the compare is a NaN. this means that the ideal code can
5197 // replace e.g. a GT with an LE and not end up capturing the NaN case
5198 // (where the comparison should always fail). EQ and NE tests are
5199 // always generated in ideal code so that unordered folds into the NE
5200 // case, matching the behaviour of AArch64 NE.
5201 //
5202 // This differs from x86 where the outputs of FP compares use a
5203 // special FP flags registers and where compares based on this
5204 // register are distinguished into ordered inequalities (cmpOpUCF) and
5205 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5206 // to explicitly handle the unordered case in branches. x86 also has
5207 // to include extra CMoveX rules to accept a cmpOpUCF input.
5208 
5209 operand rFlagsReg()
5210 %{
5211   constraint(ALLOC_IN_RC(int_flags));
5212   match(RegFlags);
5213 
5214   op_cost(0);
5215   format %{ "RFLAGS" %}
5216   interface(REG_INTER);
5217 %}
5218 
5219 // Flags register, used as output of unsigned compare instructions
5220 operand rFlagsRegU()
5221 %{
5222   constraint(ALLOC_IN_RC(int_flags));
5223   match(RegFlags);
5224 
5225   op_cost(0);
5226   format %{ "RFLAGSU" %}
5227   interface(REG_INTER);
5228 %}
5229 
5230 // Special Registers
5231 
5232 // Method Register
5233 operand inline_cache_RegP(iRegP reg)
5234 %{
5235   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5236   match(reg);
5237   match(iRegPNoSp);
5238   op_cost(0);
5239   format %{ %}
5240   interface(REG_INTER);
5241 %}
5242 
5243 operand interpreter_method_oop_RegP(iRegP reg)
5244 %{
5245   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5246   match(reg);
5247   match(iRegPNoSp);
5248   op_cost(0);
5249   format %{ %}
5250   interface(REG_INTER);
5251 %}
5252 
5253 // Thread Register
5254 operand thread_RegP(iRegP reg)
5255 %{
5256   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5257   match(reg);
5258   op_cost(0);
5259   format %{ %}
5260   interface(REG_INTER);
5261 %}
5262 
5263 operand lr_RegP(iRegP reg)
5264 %{
5265   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5266   match(reg);
5267   op_cost(0);
5268   format %{ %}
5269   interface(REG_INTER);
5270 %}
5271 
5272 //----------Memory Operands----------------------------------------------------
5273 
5274 operand indirect(iRegP reg)
5275 %{
5276   constraint(ALLOC_IN_RC(ptr_reg));
5277   match(reg);
5278   op_cost(0);
5279   format %{ "[$reg]" %}
5280   interface(MEMORY_INTER) %{
5281     base($reg);
5282     index(0xffffffff);
5283     scale(0x0);
5284     disp(0x0);
5285   %}
5286 %}
5287 
5288 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5289 %{
5290   constraint(ALLOC_IN_RC(ptr_reg));
5291   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5292   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5293   op_cost(0);
5294   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5295   interface(MEMORY_INTER) %{
5296     base($reg);
5297     index($ireg);
5298     scale($scale);
5299     disp(0x0);
5300   %}
5301 %}
5302 
5303 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5304 %{
5305   constraint(ALLOC_IN_RC(ptr_reg));
5306   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5307   match(AddP reg (LShiftL lreg scale));
5308   op_cost(0);
5309   format %{ "$reg, $lreg lsl($scale)" %}
5310   interface(MEMORY_INTER) %{
5311     base($reg);
5312     index($lreg);
5313     scale($scale);
5314     disp(0x0);
5315   %}
5316 %}
5317 
5318 operand indIndexI2L(iRegP reg, iRegI ireg)
5319 %{
5320   constraint(ALLOC_IN_RC(ptr_reg));
5321   match(AddP reg (ConvI2L ireg));
5322   op_cost(0);
5323   format %{ "$reg, $ireg, 0, I2L" %}
5324   interface(MEMORY_INTER) %{
5325     base($reg);
5326     index($ireg);
5327     scale(0x0);
5328     disp(0x0);
5329   %}
5330 %}
5331 
5332 operand indIndex(iRegP reg, iRegL lreg)
5333 %{
5334   constraint(ALLOC_IN_RC(ptr_reg));
5335   match(AddP reg lreg);
5336   op_cost(0);
5337   format %{ "$reg, $lreg" %}
5338   interface(MEMORY_INTER) %{
5339     base($reg);
5340     index($lreg);
5341     scale(0x0);
5342     disp(0x0);
5343   %}
5344 %}
5345 
5346 operand indOffI(iRegP reg, immIOffset off)
5347 %{
5348   constraint(ALLOC_IN_RC(ptr_reg));
5349   match(AddP reg off);
5350   op_cost(0);
5351   format %{ "[$reg, $off]" %}
5352   interface(MEMORY_INTER) %{
5353     base($reg);
5354     index(0xffffffff);
5355     scale(0x0);
5356     disp($off);
5357   %}
5358 %}
5359 
5360 operand indOffI4(iRegP reg, immIOffset4 off)
5361 %{
5362   constraint(ALLOC_IN_RC(ptr_reg));
5363   match(AddP reg off);
5364   op_cost(0);
5365   format %{ "[$reg, $off]" %}
5366   interface(MEMORY_INTER) %{
5367     base($reg);
5368     index(0xffffffff);
5369     scale(0x0);
5370     disp($off);
5371   %}
5372 %}
5373 
5374 operand indOffI8(iRegP reg, immIOffset8 off)
5375 %{
5376   constraint(ALLOC_IN_RC(ptr_reg));
5377   match(AddP reg off);
5378   op_cost(0);
5379   format %{ "[$reg, $off]" %}
5380   interface(MEMORY_INTER) %{
5381     base($reg);
5382     index(0xffffffff);
5383     scale(0x0);
5384     disp($off);
5385   %}
5386 %}
5387 
5388 operand indOffI16(iRegP reg, immIOffset16 off)
5389 %{
5390   constraint(ALLOC_IN_RC(ptr_reg));
5391   match(AddP reg off);
5392   op_cost(0);
5393   format %{ "[$reg, $off]" %}
5394   interface(MEMORY_INTER) %{
5395     base($reg);
5396     index(0xffffffff);
5397     scale(0x0);
5398     disp($off);
5399   %}
5400 %}
5401 
5402 operand indOffL(iRegP reg, immLoffset off)
5403 %{
5404   constraint(ALLOC_IN_RC(ptr_reg));
5405   match(AddP reg off);
5406   op_cost(0);
5407   format %{ "[$reg, $off]" %}
5408   interface(MEMORY_INTER) %{
5409     base($reg);
5410     index(0xffffffff);
5411     scale(0x0);
5412     disp($off);
5413   %}
5414 %}
5415 
5416 operand indOffL4(iRegP reg, immLoffset4 off)
5417 %{
5418   constraint(ALLOC_IN_RC(ptr_reg));
5419   match(AddP reg off);
5420   op_cost(0);
5421   format %{ "[$reg, $off]" %}
5422   interface(MEMORY_INTER) %{
5423     base($reg);
5424     index(0xffffffff);
5425     scale(0x0);
5426     disp($off);
5427   %}
5428 %}
5429 
5430 operand indOffL8(iRegP reg, immLoffset8 off)
5431 %{
5432   constraint(ALLOC_IN_RC(ptr_reg));
5433   match(AddP reg off);
5434   op_cost(0);
5435   format %{ "[$reg, $off]" %}
5436   interface(MEMORY_INTER) %{
5437     base($reg);
5438     index(0xffffffff);
5439     scale(0x0);
5440     disp($off);
5441   %}
5442 %}
5443 
5444 operand indOffL16(iRegP reg, immLoffset16 off)
5445 %{
5446   constraint(ALLOC_IN_RC(ptr_reg));
5447   match(AddP reg off);
5448   op_cost(0);
5449   format %{ "[$reg, $off]" %}
5450   interface(MEMORY_INTER) %{
5451     base($reg);
5452     index(0xffffffff);
5453     scale(0x0);
5454     disp($off);
5455   %}
5456 %}
5457 
5458 operand indirectN(iRegN reg)
5459 %{
5460   predicate(CompressedOops::shift() == 0);
5461   constraint(ALLOC_IN_RC(ptr_reg));
5462   match(DecodeN reg);
5463   op_cost(0);
5464   format %{ "[$reg]\t# narrow" %}
5465   interface(MEMORY_INTER) %{
5466     base($reg);
5467     index(0xffffffff);
5468     scale(0x0);
5469     disp(0x0);
5470   %}
5471 %}
5472 
5473 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5474 %{
5475   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5476   constraint(ALLOC_IN_RC(ptr_reg));
5477   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5478   op_cost(0);
5479   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5480   interface(MEMORY_INTER) %{
5481     base($reg);
5482     index($ireg);
5483     scale($scale);
5484     disp(0x0);
5485   %}
5486 %}
5487 
5488 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5489 %{
5490   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5491   constraint(ALLOC_IN_RC(ptr_reg));
5492   match(AddP (DecodeN reg) (LShiftL lreg scale));
5493   op_cost(0);
5494   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5495   interface(MEMORY_INTER) %{
5496     base($reg);
5497     index($lreg);
5498     scale($scale);
5499     disp(0x0);
5500   %}
5501 %}
5502 
5503 operand indIndexI2LN(iRegN reg, iRegI ireg)
5504 %{
5505   predicate(CompressedOops::shift() == 0);
5506   constraint(ALLOC_IN_RC(ptr_reg));
5507   match(AddP (DecodeN reg) (ConvI2L ireg));
5508   op_cost(0);
5509   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5510   interface(MEMORY_INTER) %{
5511     base($reg);
5512     index($ireg);
5513     scale(0x0);
5514     disp(0x0);
5515   %}
5516 %}
5517 
5518 operand indIndexN(iRegN reg, iRegL lreg)
5519 %{
5520   predicate(CompressedOops::shift() == 0);
5521   constraint(ALLOC_IN_RC(ptr_reg));
5522   match(AddP (DecodeN reg) lreg);
5523   op_cost(0);
5524   format %{ "$reg, $lreg\t# narrow" %}
5525   interface(MEMORY_INTER) %{
5526     base($reg);
5527     index($lreg);
5528     scale(0x0);
5529     disp(0x0);
5530   %}
5531 %}
5532 
5533 operand indOffIN(iRegN reg, immIOffset off)
5534 %{
5535   predicate(CompressedOops::shift() == 0);
5536   constraint(ALLOC_IN_RC(ptr_reg));
5537   match(AddP (DecodeN reg) off);
5538   op_cost(0);
5539   format %{ "[$reg, $off]\t# narrow" %}
5540   interface(MEMORY_INTER) %{
5541     base($reg);
5542     index(0xffffffff);
5543     scale(0x0);
5544     disp($off);
5545   %}
5546 %}
5547 
5548 operand indOffLN(iRegN reg, immLoffset off)
5549 %{
5550   predicate(CompressedOops::shift() == 0);
5551   constraint(ALLOC_IN_RC(ptr_reg));
5552   match(AddP (DecodeN reg) off);
5553   op_cost(0);
5554   format %{ "[$reg, $off]\t# narrow" %}
5555   interface(MEMORY_INTER) %{
5556     base($reg);
5557     index(0xffffffff);
5558     scale(0x0);
5559     disp($off);
5560   %}
5561 %}
5562 
5563 
5564 
5565 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5566 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5567 %{
5568   constraint(ALLOC_IN_RC(ptr_reg));
5569   match(AddP reg off);
5570   op_cost(0);
5571   format %{ "[$reg, $off]" %}
5572   interface(MEMORY_INTER) %{
5573     base($reg);
5574     index(0xffffffff);
5575     scale(0x0);
5576     disp($off);
5577   %}
5578 %}
5579 
5580 //----------Special Memory Operands--------------------------------------------
5581 // Stack Slot Operand - This operand is used for loading and storing temporary
5582 //                      values on the stack where a match requires a value to
5583 //                      flow through memory.
5584 operand stackSlotP(sRegP reg)
5585 %{
5586   constraint(ALLOC_IN_RC(stack_slots));
5587   op_cost(100);
5588   // No match rule because this operand is only generated in matching
5589   // match(RegP);
5590   format %{ "[$reg]" %}
5591   interface(MEMORY_INTER) %{
5592     base(0x1e);  // RSP
5593     index(0x0);  // No Index
5594     scale(0x0);  // No Scale
5595     disp($reg);  // Stack Offset
5596   %}
5597 %}
5598 
5599 operand stackSlotI(sRegI reg)
5600 %{
5601   constraint(ALLOC_IN_RC(stack_slots));
5602   // No match rule because this operand is only generated in matching
5603   // match(RegI);
5604   format %{ "[$reg]" %}
5605   interface(MEMORY_INTER) %{
5606     base(0x1e);  // RSP
5607     index(0x0);  // No Index
5608     scale(0x0);  // No Scale
5609     disp($reg);  // Stack Offset
5610   %}
5611 %}
5612 
5613 operand stackSlotF(sRegF reg)
5614 %{
5615   constraint(ALLOC_IN_RC(stack_slots));
5616   // No match rule because this operand is only generated in matching
5617   // match(RegF);
5618   format %{ "[$reg]" %}
5619   interface(MEMORY_INTER) %{
5620     base(0x1e);  // RSP
5621     index(0x0);  // No Index
5622     scale(0x0);  // No Scale
5623     disp($reg);  // Stack Offset
5624   %}
5625 %}
5626 
5627 operand stackSlotD(sRegD reg)
5628 %{
5629   constraint(ALLOC_IN_RC(stack_slots));
5630   // No match rule because this operand is only generated in matching
5631   // match(RegD);
5632   format %{ "[$reg]" %}
5633   interface(MEMORY_INTER) %{
5634     base(0x1e);  // RSP
5635     index(0x0);  // No Index
5636     scale(0x0);  // No Scale
5637     disp($reg);  // Stack Offset
5638   %}
5639 %}
5640 
5641 operand stackSlotL(sRegL reg)
5642 %{
5643   constraint(ALLOC_IN_RC(stack_slots));
5644   // No match rule because this operand is only generated in matching
5645   // match(RegL);
5646   format %{ "[$reg]" %}
5647   interface(MEMORY_INTER) %{
5648     base(0x1e);  // RSP
5649     index(0x0);  // No Index
5650     scale(0x0);  // No Scale
5651     disp($reg);  // Stack Offset
5652   %}
5653 %}
5654 
5655 // Operands for expressing Control Flow
5656 // NOTE: Label is a predefined operand which should not be redefined in
5657 //       the AD file. It is generically handled within the ADLC.
5658 
5659 //----------Conditional Branch Operands----------------------------------------
5660 // Comparison Op  - This is the operation of the comparison, and is limited to
5661 //                  the following set of codes:
5662 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5663 //
5664 // Other attributes of the comparison, such as unsignedness, are specified
5665 // by the comparison instruction that sets a condition code flags register.
5666 // That result is represented by a flags operand whose subtype is appropriate
5667 // to the unsignedness (etc.) of the comparison.
5668 //
5669 // Later, the instruction which matches both the Comparison Op (a Bool) and
5670 // the flags (produced by the Cmp) specifies the coding of the comparison op
5671 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5672 
5673 // used for signed integral comparisons and fp comparisons
5674 
5675 operand cmpOp()
5676 %{
5677   match(Bool);
5678 
5679   format %{ "" %}
5680   interface(COND_INTER) %{
5681     equal(0x0, "eq");
5682     not_equal(0x1, "ne");
5683     less(0xb, "lt");
5684     greater_equal(0xa, "ge");
5685     less_equal(0xd, "le");
5686     greater(0xc, "gt");
5687     overflow(0x6, "vs");
5688     no_overflow(0x7, "vc");
5689   %}
5690 %}
5691 
5692 // used for unsigned integral comparisons
5693 
5694 operand cmpOpU()
5695 %{
5696   match(Bool);
5697 
5698   format %{ "" %}
5699   interface(COND_INTER) %{
5700     equal(0x0, "eq");
5701     not_equal(0x1, "ne");
5702     less(0x3, "lo");
5703     greater_equal(0x2, "hs");
5704     less_equal(0x9, "ls");
5705     greater(0x8, "hi");
5706     overflow(0x6, "vs");
5707     no_overflow(0x7, "vc");
5708   %}
5709 %}
5710 
5711 // used for certain integral comparisons which can be
5712 // converted to cbxx or tbxx instructions
5713 
5714 operand cmpOpEqNe()
5715 %{
5716   match(Bool);
5717   match(CmpOp);
5718   op_cost(0);
5719   predicate(n->as_Bool()->_test._test == BoolTest::ne
5720             || n->as_Bool()->_test._test == BoolTest::eq);
5721 
5722   format %{ "" %}
5723   interface(COND_INTER) %{
5724     equal(0x0, "eq");
5725     not_equal(0x1, "ne");
5726     less(0xb, "lt");
5727     greater_equal(0xa, "ge");
5728     less_equal(0xd, "le");
5729     greater(0xc, "gt");
5730     overflow(0x6, "vs");
5731     no_overflow(0x7, "vc");
5732   %}
5733 %}
5734 
5735 // used for certain integral comparisons which can be
5736 // converted to cbxx or tbxx instructions
5737 
5738 operand cmpOpLtGe()
5739 %{
5740   match(Bool);
5741   match(CmpOp);
5742   op_cost(0);
5743 
5744   predicate(n->as_Bool()->_test._test == BoolTest::lt
5745             || n->as_Bool()->_test._test == BoolTest::ge);
5746 
5747   format %{ "" %}
5748   interface(COND_INTER) %{
5749     equal(0x0, "eq");
5750     not_equal(0x1, "ne");
5751     less(0xb, "lt");
5752     greater_equal(0xa, "ge");
5753     less_equal(0xd, "le");
5754     greater(0xc, "gt");
5755     overflow(0x6, "vs");
5756     no_overflow(0x7, "vc");
5757   %}
5758 %}
5759 
5760 // used for certain unsigned integral comparisons which can be
5761 // converted to cbxx or tbxx instructions
5762 
5763 operand cmpOpUEqNeLtGe()
5764 %{
5765   match(Bool);
5766   match(CmpOp);
5767   op_cost(0);
5768 
5769   predicate(n->as_Bool()->_test._test == BoolTest::eq
5770             || n->as_Bool()->_test._test == BoolTest::ne
5771             || n->as_Bool()->_test._test == BoolTest::lt
5772             || n->as_Bool()->_test._test == BoolTest::ge);
5773 
5774   format %{ "" %}
5775   interface(COND_INTER) %{
5776     equal(0x0, "eq");
5777     not_equal(0x1, "ne");
5778     less(0xb, "lt");
5779     greater_equal(0xa, "ge");
5780     less_equal(0xd, "le");
5781     greater(0xc, "gt");
5782     overflow(0x6, "vs");
5783     no_overflow(0x7, "vc");
5784   %}
5785 %}
5786 
5787 // Special operand allowing long args to int ops to be truncated for free
5788 
5789 operand iRegL2I(iRegL reg) %{
5790 
5791   op_cost(0);
5792 
5793   match(ConvL2I reg);
5794 
5795   format %{ "l2i($reg)" %}
5796 
5797   interface(REG_INTER)
5798 %}
5799 
5800 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5801 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5802 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5803 
5804 //----------OPERAND CLASSES----------------------------------------------------
5805 // Operand Classes are groups of operands that are used as to simplify
5806 // instruction definitions by not requiring the AD writer to specify
5807 // separate instructions for every form of operand when the
5808 // instruction accepts multiple operand types with the same basic
5809 // encoding and format. The classic case of this is memory operands.
5810 
5811 // memory is used to define read/write location for load/store
5812 // instruction defs. we can turn a memory op into an Address
5813 
5814 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5815                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5816 
5817 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5818 // operations. it allows the src to be either an iRegI or a (ConvL2I
5819 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5820 // can be elided because the 32-bit instruction will just employ the
5821 // lower 32 bits anyway.
5822 //
5823 // n.b. this does not elide all L2I conversions. if the truncated
5824 // value is consumed by more than one operation then the ConvL2I
5825 // cannot be bundled into the consuming nodes so an l2i gets planted
5826 // (actually a movw $dst $src) and the downstream instructions consume
5827 // the result of the l2i as an iRegI input. That's a shame since the
5828 // movw is actually redundant but its not too costly.
5829 
5830 opclass iRegIorL2I(iRegI, iRegL2I);
5831 
5832 //----------PIPELINE-----------------------------------------------------------
5833 // Rules which define the behavior of the target architectures pipeline.
5834 
5835 // For specific pipelines, eg A53, define the stages of that pipeline
5836 //pipe_desc(ISS, EX1, EX2, WR);
5837 #define ISS S0
5838 #define EX1 S1
5839 #define EX2 S2
5840 #define WR  S3
5841 
5842 // Integer ALU reg operation
5843 pipeline %{
5844 
5845 attributes %{
5846   // ARM instructions are of fixed length
5847   fixed_size_instructions;        // Fixed size instructions TODO does
5848   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5849   // ARM instructions come in 32-bit word units
5850   instruction_unit_size = 4;         // An instruction is 4 bytes long
5851   instruction_fetch_unit_size = 64;  // The processor fetches one line
5852   instruction_fetch_units = 1;       // of 64 bytes
5853 
5854   // List of nop instructions
5855   nops( MachNop );
5856 %}
5857 
5858 // We don't use an actual pipeline model so don't care about resources
5859 // or description. we do use pipeline classes to introduce fixed
5860 // latencies
5861 
5862 //----------RESOURCES----------------------------------------------------------
5863 // Resources are the functional units available to the machine
5864 
5865 resources( INS0, INS1, INS01 = INS0 | INS1,
5866            ALU0, ALU1, ALU = ALU0 | ALU1,
5867            MAC,
5868            DIV,
5869            BRANCH,
5870            LDST,
5871            NEON_FP);
5872 
5873 //----------PIPELINE DESCRIPTION-----------------------------------------------
5874 // Pipeline Description specifies the stages in the machine's pipeline
5875 
5876 // Define the pipeline as a generic 6 stage pipeline
5877 pipe_desc(S0, S1, S2, S3, S4, S5);
5878 
5879 //----------PIPELINE CLASSES---------------------------------------------------
5880 // Pipeline Classes describe the stages in which input and output are
5881 // referenced by the hardware pipeline.
5882 
5883 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5884 %{
5885   single_instruction;
5886   src1   : S1(read);
5887   src2   : S2(read);
5888   dst    : S5(write);
5889   INS01  : ISS;
5890   NEON_FP : S5;
5891 %}
5892 
5893 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5894 %{
5895   single_instruction;
5896   src1   : S1(read);
5897   src2   : S2(read);
5898   dst    : S5(write);
5899   INS01  : ISS;
5900   NEON_FP : S5;
5901 %}
5902 
5903 pipe_class fp_uop_s(vRegF dst, vRegF src)
5904 %{
5905   single_instruction;
5906   src    : S1(read);
5907   dst    : S5(write);
5908   INS01  : ISS;
5909   NEON_FP : S5;
5910 %}
5911 
5912 pipe_class fp_uop_d(vRegD dst, vRegD src)
5913 %{
5914   single_instruction;
5915   src    : S1(read);
5916   dst    : S5(write);
5917   INS01  : ISS;
5918   NEON_FP : S5;
5919 %}
5920 
5921 pipe_class fp_d2f(vRegF dst, vRegD src)
5922 %{
5923   single_instruction;
5924   src    : S1(read);
5925   dst    : S5(write);
5926   INS01  : ISS;
5927   NEON_FP : S5;
5928 %}
5929 
5930 pipe_class fp_f2d(vRegD dst, vRegF src)
5931 %{
5932   single_instruction;
5933   src    : S1(read);
5934   dst    : S5(write);
5935   INS01  : ISS;
5936   NEON_FP : S5;
5937 %}
5938 
5939 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5940 %{
5941   single_instruction;
5942   src    : S1(read);
5943   dst    : S5(write);
5944   INS01  : ISS;
5945   NEON_FP : S5;
5946 %}
5947 
5948 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5949 %{
5950   single_instruction;
5951   src    : S1(read);
5952   dst    : S5(write);
5953   INS01  : ISS;
5954   NEON_FP : S5;
5955 %}
5956 
5957 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5958 %{
5959   single_instruction;
5960   src    : S1(read);
5961   dst    : S5(write);
5962   INS01  : ISS;
5963   NEON_FP : S5;
5964 %}
5965 
5966 pipe_class fp_l2f(vRegF dst, iRegL src)
5967 %{
5968   single_instruction;
5969   src    : S1(read);
5970   dst    : S5(write);
5971   INS01  : ISS;
5972   NEON_FP : S5;
5973 %}
5974 
5975 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5976 %{
5977   single_instruction;
5978   src    : S1(read);
5979   dst    : S5(write);
5980   INS01  : ISS;
5981   NEON_FP : S5;
5982 %}
5983 
5984 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5985 %{
5986   single_instruction;
5987   src    : S1(read);
5988   dst    : S5(write);
5989   INS01  : ISS;
5990   NEON_FP : S5;
5991 %}
5992 
5993 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5994 %{
5995   single_instruction;
5996   src    : S1(read);
5997   dst    : S5(write);
5998   INS01  : ISS;
5999   NEON_FP : S5;
6000 %}
6001 
6002 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6003 %{
6004   single_instruction;
6005   src    : S1(read);
6006   dst    : S5(write);
6007   INS01  : ISS;
6008   NEON_FP : S5;
6009 %}
6010 
6011 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6012 %{
6013   single_instruction;
6014   src1   : S1(read);
6015   src2   : S2(read);
6016   dst    : S5(write);
6017   INS0   : ISS;
6018   NEON_FP : S5;
6019 %}
6020 
6021 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6022 %{
6023   single_instruction;
6024   src1   : S1(read);
6025   src2   : S2(read);
6026   dst    : S5(write);
6027   INS0   : ISS;
6028   NEON_FP : S5;
6029 %}
6030 
6031 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6032 %{
6033   single_instruction;
6034   cr     : S1(read);
6035   src1   : S1(read);
6036   src2   : S1(read);
6037   dst    : S3(write);
6038   INS01  : ISS;
6039   NEON_FP : S3;
6040 %}
6041 
6042 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6043 %{
6044   single_instruction;
6045   cr     : S1(read);
6046   src1   : S1(read);
6047   src2   : S1(read);
6048   dst    : S3(write);
6049   INS01  : ISS;
6050   NEON_FP : S3;
6051 %}
6052 
6053 pipe_class fp_imm_s(vRegF dst)
6054 %{
6055   single_instruction;
6056   dst    : S3(write);
6057   INS01  : ISS;
6058   NEON_FP : S3;
6059 %}
6060 
6061 pipe_class fp_imm_d(vRegD dst)
6062 %{
6063   single_instruction;
6064   dst    : S3(write);
6065   INS01  : ISS;
6066   NEON_FP : S3;
6067 %}
6068 
6069 pipe_class fp_load_constant_s(vRegF dst)
6070 %{
6071   single_instruction;
6072   dst    : S4(write);
6073   INS01  : ISS;
6074   NEON_FP : S4;
6075 %}
6076 
6077 pipe_class fp_load_constant_d(vRegD dst)
6078 %{
6079   single_instruction;
6080   dst    : S4(write);
6081   INS01  : ISS;
6082   NEON_FP : S4;
6083 %}
6084 
6085 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6086 %{
6087   single_instruction;
6088   dst    : S5(write);
6089   src1   : S1(read);
6090   src2   : S1(read);
6091   INS01  : ISS;
6092   NEON_FP : S5;
6093 %}
6094 
6095 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6096 %{
6097   single_instruction;
6098   dst    : S5(write);
6099   src1   : S1(read);
6100   src2   : S1(read);
6101   INS0   : ISS;
6102   NEON_FP : S5;
6103 %}
6104 
6105 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6106 %{
6107   single_instruction;
6108   dst    : S5(write);
6109   src1   : S1(read);
6110   src2   : S1(read);
6111   dst    : S1(read);
6112   INS01  : ISS;
6113   NEON_FP : S5;
6114 %}
6115 
6116 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6117 %{
6118   single_instruction;
6119   dst    : S5(write);
6120   src1   : S1(read);
6121   src2   : S1(read);
6122   dst    : S1(read);
6123   INS0   : ISS;
6124   NEON_FP : S5;
6125 %}
6126 
6127 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6128 %{
6129   single_instruction;
6130   dst    : S4(write);
6131   src1   : S2(read);
6132   src2   : S2(read);
6133   INS01  : ISS;
6134   NEON_FP : S4;
6135 %}
6136 
6137 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6138 %{
6139   single_instruction;
6140   dst    : S4(write);
6141   src1   : S2(read);
6142   src2   : S2(read);
6143   INS0   : ISS;
6144   NEON_FP : S4;
6145 %}
6146 
6147 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6148 %{
6149   single_instruction;
6150   dst    : S3(write);
6151   src1   : S2(read);
6152   src2   : S2(read);
6153   INS01  : ISS;
6154   NEON_FP : S3;
6155 %}
6156 
6157 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6158 %{
6159   single_instruction;
6160   dst    : S3(write);
6161   src1   : S2(read);
6162   src2   : S2(read);
6163   INS0   : ISS;
6164   NEON_FP : S3;
6165 %}
6166 
6167 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6168 %{
6169   single_instruction;
6170   dst    : S3(write);
6171   src    : S1(read);
6172   shift  : S1(read);
6173   INS01  : ISS;
6174   NEON_FP : S3;
6175 %}
6176 
6177 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6178 %{
6179   single_instruction;
6180   dst    : S3(write);
6181   src    : S1(read);
6182   shift  : S1(read);
6183   INS0   : ISS;
6184   NEON_FP : S3;
6185 %}
6186 
6187 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6188 %{
6189   single_instruction;
6190   dst    : S3(write);
6191   src    : S1(read);
6192   INS01  : ISS;
6193   NEON_FP : S3;
6194 %}
6195 
6196 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6197 %{
6198   single_instruction;
6199   dst    : S3(write);
6200   src    : S1(read);
6201   INS0   : ISS;
6202   NEON_FP : S3;
6203 %}
6204 
6205 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6206 %{
6207   single_instruction;
6208   dst    : S5(write);
6209   src1   : S1(read);
6210   src2   : S1(read);
6211   INS01  : ISS;
6212   NEON_FP : S5;
6213 %}
6214 
6215 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6216 %{
6217   single_instruction;
6218   dst    : S5(write);
6219   src1   : S1(read);
6220   src2   : S1(read);
6221   INS0   : ISS;
6222   NEON_FP : S5;
6223 %}
6224 
6225 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6226 %{
6227   single_instruction;
6228   dst    : S5(write);
6229   src1   : S1(read);
6230   src2   : S1(read);
6231   INS0   : ISS;
6232   NEON_FP : S5;
6233 %}
6234 
6235 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6236 %{
6237   single_instruction;
6238   dst    : S5(write);
6239   src1   : S1(read);
6240   src2   : S1(read);
6241   INS0   : ISS;
6242   NEON_FP : S5;
6243 %}
6244 
6245 pipe_class vsqrt_fp128(vecX dst, vecX src)
6246 %{
6247   single_instruction;
6248   dst    : S5(write);
6249   src    : S1(read);
6250   INS0   : ISS;
6251   NEON_FP : S5;
6252 %}
6253 
6254 pipe_class vunop_fp64(vecD dst, vecD src)
6255 %{
6256   single_instruction;
6257   dst    : S5(write);
6258   src    : S1(read);
6259   INS01  : ISS;
6260   NEON_FP : S5;
6261 %}
6262 
6263 pipe_class vunop_fp128(vecX dst, vecX src)
6264 %{
6265   single_instruction;
6266   dst    : S5(write);
6267   src    : S1(read);
6268   INS0   : ISS;
6269   NEON_FP : S5;
6270 %}
6271 
6272 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6273 %{
6274   single_instruction;
6275   dst    : S3(write);
6276   src    : S1(read);
6277   INS01  : ISS;
6278   NEON_FP : S3;
6279 %}
6280 
6281 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6282 %{
6283   single_instruction;
6284   dst    : S3(write);
6285   src    : S1(read);
6286   INS01  : ISS;
6287   NEON_FP : S3;
6288 %}
6289 
6290 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6291 %{
6292   single_instruction;
6293   dst    : S3(write);
6294   src    : S1(read);
6295   INS01  : ISS;
6296   NEON_FP : S3;
6297 %}
6298 
6299 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6300 %{
6301   single_instruction;
6302   dst    : S3(write);
6303   src    : S1(read);
6304   INS01  : ISS;
6305   NEON_FP : S3;
6306 %}
6307 
6308 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6309 %{
6310   single_instruction;
6311   dst    : S3(write);
6312   src    : S1(read);
6313   INS01  : ISS;
6314   NEON_FP : S3;
6315 %}
6316 
6317 pipe_class vmovi_reg_imm64(vecD dst)
6318 %{
6319   single_instruction;
6320   dst    : S3(write);
6321   INS01  : ISS;
6322   NEON_FP : S3;
6323 %}
6324 
6325 pipe_class vmovi_reg_imm128(vecX dst)
6326 %{
6327   single_instruction;
6328   dst    : S3(write);
6329   INS0   : ISS;
6330   NEON_FP : S3;
6331 %}
6332 
6333 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6334 %{
6335   single_instruction;
6336   dst    : S5(write);
6337   mem    : ISS(read);
6338   INS01  : ISS;
6339   NEON_FP : S3;
6340 %}
6341 
6342 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6343 %{
6344   single_instruction;
6345   dst    : S5(write);
6346   mem    : ISS(read);
6347   INS01  : ISS;
6348   NEON_FP : S3;
6349 %}
6350 
6351 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6352 %{
6353   single_instruction;
6354   mem    : ISS(read);
6355   src    : S2(read);
6356   INS01  : ISS;
6357   NEON_FP : S3;
6358 %}
6359 
6360 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6361 %{
6362   single_instruction;
6363   mem    : ISS(read);
6364   src    : S2(read);
6365   INS01  : ISS;
6366   NEON_FP : S3;
6367 %}
6368 
6369 //------- Integer ALU operations --------------------------
6370 
6371 // Integer ALU reg-reg operation
6372 // Operands needed in EX1, result generated in EX2
6373 // Eg.  ADD     x0, x1, x2
6374 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6375 %{
6376   single_instruction;
6377   dst    : EX2(write);
6378   src1   : EX1(read);
6379   src2   : EX1(read);
6380   INS01  : ISS; // Dual issue as instruction 0 or 1
6381   ALU    : EX2;
6382 %}
6383 
6384 // Integer ALU reg-reg operation with constant shift
6385 // Shifted register must be available in LATE_ISS instead of EX1
6386 // Eg.  ADD     x0, x1, x2, LSL #2
6387 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6388 %{
6389   single_instruction;
6390   dst    : EX2(write);
6391   src1   : EX1(read);
6392   src2   : ISS(read);
6393   INS01  : ISS;
6394   ALU    : EX2;
6395 %}
6396 
6397 // Integer ALU reg operation with constant shift
6398 // Eg.  LSL     x0, x1, #shift
6399 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6400 %{
6401   single_instruction;
6402   dst    : EX2(write);
6403   src1   : ISS(read);
6404   INS01  : ISS;
6405   ALU    : EX2;
6406 %}
6407 
6408 // Integer ALU reg-reg operation with variable shift
6409 // Both operands must be available in LATE_ISS instead of EX1
6410 // Result is available in EX1 instead of EX2
6411 // Eg.  LSLV    x0, x1, x2
6412 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6413 %{
6414   single_instruction;
6415   dst    : EX1(write);
6416   src1   : ISS(read);
6417   src2   : ISS(read);
6418   INS01  : ISS;
6419   ALU    : EX1;
6420 %}
6421 
6422 // Integer ALU reg-reg operation with extract
6423 // As for _vshift above, but result generated in EX2
6424 // Eg.  EXTR    x0, x1, x2, #N
6425 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6426 %{
6427   single_instruction;
6428   dst    : EX2(write);
6429   src1   : ISS(read);
6430   src2   : ISS(read);
6431   INS1   : ISS; // Can only dual issue as Instruction 1
6432   ALU    : EX1;
6433 %}
6434 
6435 // Integer ALU reg operation
6436 // Eg.  NEG     x0, x1
6437 pipe_class ialu_reg(iRegI dst, iRegI src)
6438 %{
6439   single_instruction;
6440   dst    : EX2(write);
6441   src    : EX1(read);
6442   INS01  : ISS;
6443   ALU    : EX2;
6444 %}
6445 
6446 // Integer ALU reg mmediate operation
6447 // Eg.  ADD     x0, x1, #N
6448 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6449 %{
6450   single_instruction;
6451   dst    : EX2(write);
6452   src1   : EX1(read);
6453   INS01  : ISS;
6454   ALU    : EX2;
6455 %}
6456 
6457 // Integer ALU immediate operation (no source operands)
6458 // Eg.  MOV     x0, #N
6459 pipe_class ialu_imm(iRegI dst)
6460 %{
6461   single_instruction;
6462   dst    : EX1(write);
6463   INS01  : ISS;
6464   ALU    : EX1;
6465 %}
6466 
6467 //------- Compare operation -------------------------------
6468 
6469 // Compare reg-reg
6470 // Eg.  CMP     x0, x1
6471 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6472 %{
6473   single_instruction;
6474 //  fixed_latency(16);
6475   cr     : EX2(write);
6476   op1    : EX1(read);
6477   op2    : EX1(read);
6478   INS01  : ISS;
6479   ALU    : EX2;
6480 %}
6481 
6482 // Compare reg-reg
6483 // Eg.  CMP     x0, #N
6484 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6485 %{
6486   single_instruction;
6487 //  fixed_latency(16);
6488   cr     : EX2(write);
6489   op1    : EX1(read);
6490   INS01  : ISS;
6491   ALU    : EX2;
6492 %}
6493 
6494 //------- Conditional instructions ------------------------
6495 
6496 // Conditional no operands
6497 // Eg.  CSINC   x0, zr, zr, <cond>
6498 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6499 %{
6500   single_instruction;
6501   cr     : EX1(read);
6502   dst    : EX2(write);
6503   INS01  : ISS;
6504   ALU    : EX2;
6505 %}
6506 
6507 // Conditional 2 operand
6508 // EG.  CSEL    X0, X1, X2, <cond>
6509 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6510 %{
6511   single_instruction;
6512   cr     : EX1(read);
6513   src1   : EX1(read);
6514   src2   : EX1(read);
6515   dst    : EX2(write);
6516   INS01  : ISS;
6517   ALU    : EX2;
6518 %}
6519 
6520 // Conditional 2 operand
6521 // EG.  CSEL    X0, X1, X2, <cond>
6522 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6523 %{
6524   single_instruction;
6525   cr     : EX1(read);
6526   src    : EX1(read);
6527   dst    : EX2(write);
6528   INS01  : ISS;
6529   ALU    : EX2;
6530 %}
6531 
6532 //------- Multiply pipeline operations --------------------
6533 
6534 // Multiply reg-reg
6535 // Eg.  MUL     w0, w1, w2
6536 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6537 %{
6538   single_instruction;
6539   dst    : WR(write);
6540   src1   : ISS(read);
6541   src2   : ISS(read);
6542   INS01  : ISS;
6543   MAC    : WR;
6544 %}
6545 
6546 // Multiply accumulate
6547 // Eg.  MADD    w0, w1, w2, w3
6548 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6549 %{
6550   single_instruction;
6551   dst    : WR(write);
6552   src1   : ISS(read);
6553   src2   : ISS(read);
6554   src3   : ISS(read);
6555   INS01  : ISS;
6556   MAC    : WR;
6557 %}
6558 
6559 // Eg.  MUL     w0, w1, w2
6560 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6561 %{
6562   single_instruction;
6563   fixed_latency(3); // Maximum latency for 64 bit mul
6564   dst    : WR(write);
6565   src1   : ISS(read);
6566   src2   : ISS(read);
6567   INS01  : ISS;
6568   MAC    : WR;
6569 %}
6570 
6571 // Multiply accumulate
6572 // Eg.  MADD    w0, w1, w2, w3
6573 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6574 %{
6575   single_instruction;
6576   fixed_latency(3); // Maximum latency for 64 bit mul
6577   dst    : WR(write);
6578   src1   : ISS(read);
6579   src2   : ISS(read);
6580   src3   : ISS(read);
6581   INS01  : ISS;
6582   MAC    : WR;
6583 %}
6584 
6585 //------- Divide pipeline operations --------------------
6586 
6587 // Eg.  SDIV    w0, w1, w2
6588 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6589 %{
6590   single_instruction;
6591   fixed_latency(8); // Maximum latency for 32 bit divide
6592   dst    : WR(write);
6593   src1   : ISS(read);
6594   src2   : ISS(read);
6595   INS0   : ISS; // Can only dual issue as instruction 0
6596   DIV    : WR;
6597 %}
6598 
6599 // Eg.  SDIV    x0, x1, x2
6600 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6601 %{
6602   single_instruction;
6603   fixed_latency(16); // Maximum latency for 64 bit divide
6604   dst    : WR(write);
6605   src1   : ISS(read);
6606   src2   : ISS(read);
6607   INS0   : ISS; // Can only dual issue as instruction 0
6608   DIV    : WR;
6609 %}
6610 
6611 //------- Load pipeline operations ------------------------
6612 
6613 // Load - prefetch
6614 // Eg.  PFRM    <mem>
6615 pipe_class iload_prefetch(memory mem)
6616 %{
6617   single_instruction;
6618   mem    : ISS(read);
6619   INS01  : ISS;
6620   LDST   : WR;
6621 %}
6622 
6623 // Load - reg, mem
6624 // Eg.  LDR     x0, <mem>
6625 pipe_class iload_reg_mem(iRegI dst, memory mem)
6626 %{
6627   single_instruction;
6628   dst    : WR(write);
6629   mem    : ISS(read);
6630   INS01  : ISS;
6631   LDST   : WR;
6632 %}
6633 
6634 // Load - reg, reg
6635 // Eg.  LDR     x0, [sp, x1]
6636 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6637 %{
6638   single_instruction;
6639   dst    : WR(write);
6640   src    : ISS(read);
6641   INS01  : ISS;
6642   LDST   : WR;
6643 %}
6644 
6645 //------- Store pipeline operations -----------------------
6646 
6647 // Store - zr, mem
6648 // Eg.  STR     zr, <mem>
6649 pipe_class istore_mem(memory mem)
6650 %{
6651   single_instruction;
6652   mem    : ISS(read);
6653   INS01  : ISS;
6654   LDST   : WR;
6655 %}
6656 
6657 // Store - reg, mem
6658 // Eg.  STR     x0, <mem>
6659 pipe_class istore_reg_mem(iRegI src, memory mem)
6660 %{
6661   single_instruction;
6662   mem    : ISS(read);
6663   src    : EX2(read);
6664   INS01  : ISS;
6665   LDST   : WR;
6666 %}
6667 
6668 // Store - reg, reg
6669 // Eg. STR      x0, [sp, x1]
6670 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6671 %{
6672   single_instruction;
6673   dst    : ISS(read);
6674   src    : EX2(read);
6675   INS01  : ISS;
6676   LDST   : WR;
6677 %}
6678 
6679 //------- Store pipeline operations -----------------------
6680 
6681 // Branch
6682 pipe_class pipe_branch()
6683 %{
6684   single_instruction;
6685   INS01  : ISS;
6686   BRANCH : EX1;
6687 %}
6688 
6689 // Conditional branch
6690 pipe_class pipe_branch_cond(rFlagsReg cr)
6691 %{
6692   single_instruction;
6693   cr     : EX1(read);
6694   INS01  : ISS;
6695   BRANCH : EX1;
6696 %}
6697 
6698 // Compare & Branch
6699 // EG.  CBZ/CBNZ
6700 pipe_class pipe_cmp_branch(iRegI op1)
6701 %{
6702   single_instruction;
6703   op1    : EX1(read);
6704   INS01  : ISS;
6705   BRANCH : EX1;
6706 %}
6707 
6708 //------- Synchronisation operations ----------------------
6709 
6710 // Any operation requiring serialization.
6711 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6712 pipe_class pipe_serial()
6713 %{
6714   single_instruction;
6715   force_serialization;
6716   fixed_latency(16);
6717   INS01  : ISS(2); // Cannot dual issue with any other instruction
6718   LDST   : WR;
6719 %}
6720 
6721 // Generic big/slow expanded idiom - also serialized
6722 pipe_class pipe_slow()
6723 %{
6724   instruction_count(10);
6725   multiple_bundles;
6726   force_serialization;
6727   fixed_latency(16);
6728   INS01  : ISS(2); // Cannot dual issue with any other instruction
6729   LDST   : WR;
6730 %}
6731 
6732 // Empty pipeline class
6733 pipe_class pipe_class_empty()
6734 %{
6735   single_instruction;
6736   fixed_latency(0);
6737 %}
6738 
6739 // Default pipeline class.
6740 pipe_class pipe_class_default()
6741 %{
6742   single_instruction;
6743   fixed_latency(2);
6744 %}
6745 
6746 // Pipeline class for compares.
6747 pipe_class pipe_class_compare()
6748 %{
6749   single_instruction;
6750   fixed_latency(16);
6751 %}
6752 
6753 // Pipeline class for memory operations.
6754 pipe_class pipe_class_memory()
6755 %{
6756   single_instruction;
6757   fixed_latency(16);
6758 %}
6759 
6760 // Pipeline class for call.
6761 pipe_class pipe_class_call()
6762 %{
6763   single_instruction;
6764   fixed_latency(100);
6765 %}
6766 
6767 // Define the class for the Nop node.
6768 define %{
6769    MachNop = pipe_class_empty;
6770 %}
6771 
6772 %}
6773 //----------INSTRUCTIONS-------------------------------------------------------
6774 //
6775 // match      -- States which machine-independent subtree may be replaced
6776 //               by this instruction.
6777 // ins_cost   -- The estimated cost of this instruction is used by instruction
6778 //               selection to identify a minimum cost tree of machine
6779 //               instructions that matches a tree of machine-independent
6780 //               instructions.
6781 // format     -- A string providing the disassembly for this instruction.
6782 //               The value of an instruction's operand may be inserted
6783 //               by referring to it with a '$' prefix.
6784 // opcode     -- Three instruction opcodes may be provided.  These are referred
6785 //               to within an encode class as $primary, $secondary, and $tertiary
6786 //               rrspectively.  The primary opcode is commonly used to
6787 //               indicate the type of machine instruction, while secondary
6788 //               and tertiary are often used for prefix options or addressing
6789 //               modes.
6790 // ins_encode -- A list of encode classes with parameters. The encode class
6791 //               name must have been defined in an 'enc_class' specification
6792 //               in the encode section of the architecture description.
6793 
6794 // ============================================================================
6795 // Memory (Load/Store) Instructions
6796 
6797 // Load Instructions
6798 
6799 // Load Byte (8 bit signed)
6800 instruct loadB(iRegINoSp dst, memory mem)
6801 %{
6802   match(Set dst (LoadB mem));
6803   predicate(!needs_acquiring_load(n));
6804 
6805   ins_cost(4 * INSN_COST);
6806   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6807 
6808   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6809 
6810   ins_pipe(iload_reg_mem);
6811 %}
6812 
6813 // Load Byte (8 bit signed) into long
6814 instruct loadB2L(iRegLNoSp dst, memory mem)
6815 %{
6816   match(Set dst (ConvI2L (LoadB mem)));
6817   predicate(!needs_acquiring_load(n->in(1)));
6818 
6819   ins_cost(4 * INSN_COST);
6820   format %{ "ldrsb  $dst, $mem\t# byte" %}
6821 
6822   ins_encode(aarch64_enc_ldrsb(dst, mem));
6823 
6824   ins_pipe(iload_reg_mem);
6825 %}
6826 
6827 // Load Byte (8 bit unsigned)
6828 instruct loadUB(iRegINoSp dst, memory mem)
6829 %{
6830   match(Set dst (LoadUB mem));
6831   predicate(!needs_acquiring_load(n));
6832 
6833   ins_cost(4 * INSN_COST);
6834   format %{ "ldrbw  $dst, $mem\t# byte" %}
6835 
6836   ins_encode(aarch64_enc_ldrb(dst, mem));
6837 
6838   ins_pipe(iload_reg_mem);
6839 %}
6840 
6841 // Load Byte (8 bit unsigned) into long
6842 instruct loadUB2L(iRegLNoSp dst, memory mem)
6843 %{
6844   match(Set dst (ConvI2L (LoadUB mem)));
6845   predicate(!needs_acquiring_load(n->in(1)));
6846 
6847   ins_cost(4 * INSN_COST);
6848   format %{ "ldrb  $dst, $mem\t# byte" %}
6849 
6850   ins_encode(aarch64_enc_ldrb(dst, mem));
6851 
6852   ins_pipe(iload_reg_mem);
6853 %}
6854 
6855 // Load Short (16 bit signed)
6856 instruct loadS(iRegINoSp dst, memory mem)
6857 %{
6858   match(Set dst (LoadS mem));
6859   predicate(!needs_acquiring_load(n));
6860 
6861   ins_cost(4 * INSN_COST);
6862   format %{ "ldrshw  $dst, $mem\t# short" %}
6863 
6864   ins_encode(aarch64_enc_ldrshw(dst, mem));
6865 
6866   ins_pipe(iload_reg_mem);
6867 %}
6868 
6869 // Load Short (16 bit signed) into long
6870 instruct loadS2L(iRegLNoSp dst, memory mem)
6871 %{
6872   match(Set dst (ConvI2L (LoadS mem)));
6873   predicate(!needs_acquiring_load(n->in(1)));
6874 
6875   ins_cost(4 * INSN_COST);
6876   format %{ "ldrsh  $dst, $mem\t# short" %}
6877 
6878   ins_encode(aarch64_enc_ldrsh(dst, mem));
6879 
6880   ins_pipe(iload_reg_mem);
6881 %}
6882 
6883 // Load Char (16 bit unsigned)
6884 instruct loadUS(iRegINoSp dst, memory mem)
6885 %{
6886   match(Set dst (LoadUS mem));
6887   predicate(!needs_acquiring_load(n));
6888 
6889   ins_cost(4 * INSN_COST);
6890   format %{ "ldrh  $dst, $mem\t# short" %}
6891 
6892   ins_encode(aarch64_enc_ldrh(dst, mem));
6893 
6894   ins_pipe(iload_reg_mem);
6895 %}
6896 
6897 // Load Short/Char (16 bit unsigned) into long
6898 instruct loadUS2L(iRegLNoSp dst, memory mem)
6899 %{
6900   match(Set dst (ConvI2L (LoadUS mem)));
6901   predicate(!needs_acquiring_load(n->in(1)));
6902 
6903   ins_cost(4 * INSN_COST);
6904   format %{ "ldrh  $dst, $mem\t# short" %}
6905 
6906   ins_encode(aarch64_enc_ldrh(dst, mem));
6907 
6908   ins_pipe(iload_reg_mem);
6909 %}
6910 
6911 // Load Integer (32 bit signed)
6912 instruct loadI(iRegINoSp dst, memory mem)
6913 %{
6914   match(Set dst (LoadI mem));
6915   predicate(!needs_acquiring_load(n));
6916 
6917   ins_cost(4 * INSN_COST);
6918   format %{ "ldrw  $dst, $mem\t# int" %}
6919 
6920   ins_encode(aarch64_enc_ldrw(dst, mem));
6921 
6922   ins_pipe(iload_reg_mem);
6923 %}
6924 
6925 // Load Integer (32 bit signed) into long
6926 instruct loadI2L(iRegLNoSp dst, memory mem)
6927 %{
6928   match(Set dst (ConvI2L (LoadI mem)));
6929   predicate(!needs_acquiring_load(n->in(1)));
6930 
6931   ins_cost(4 * INSN_COST);
6932   format %{ "ldrsw  $dst, $mem\t# int" %}
6933 
6934   ins_encode(aarch64_enc_ldrsw(dst, mem));
6935 
6936   ins_pipe(iload_reg_mem);
6937 %}
6938 
6939 // Load Integer (32 bit unsigned) into long
6940 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6941 %{
6942   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6943   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6944 
6945   ins_cost(4 * INSN_COST);
6946   format %{ "ldrw  $dst, $mem\t# int" %}
6947 
6948   ins_encode(aarch64_enc_ldrw(dst, mem));
6949 
6950   ins_pipe(iload_reg_mem);
6951 %}
6952 
6953 // Load Long (64 bit signed)
6954 instruct loadL(iRegLNoSp dst, memory mem)
6955 %{
6956   match(Set dst (LoadL mem));
6957   predicate(!needs_acquiring_load(n));
6958 
6959   ins_cost(4 * INSN_COST);
6960   format %{ "ldr  $dst, $mem\t# int" %}
6961 
6962   ins_encode(aarch64_enc_ldr(dst, mem));
6963 
6964   ins_pipe(iload_reg_mem);
6965 %}
6966 
6967 // Load Range
6968 instruct loadRange(iRegINoSp dst, memory mem)
6969 %{
6970   match(Set dst (LoadRange mem));
6971 
6972   ins_cost(4 * INSN_COST);
6973   format %{ "ldrw  $dst, $mem\t# range" %}
6974 
6975   ins_encode(aarch64_enc_ldrw(dst, mem));
6976 
6977   ins_pipe(iload_reg_mem);
6978 %}
6979 
6980 // Load Pointer
6981 instruct loadP(iRegPNoSp dst, memory mem)
6982 %{
6983   match(Set dst (LoadP mem));
6984   predicate(!needs_acquiring_load(n));
6985 
6986   ins_cost(4 * INSN_COST);
6987   format %{ "ldr  $dst, $mem\t# ptr" %}
6988 
6989   ins_encode(aarch64_enc_ldr(dst, mem));
6990 
6991   ins_pipe(iload_reg_mem);
6992 %}
6993 
6994 // Load Compressed Pointer
6995 instruct loadN(iRegNNoSp dst, memory mem)
6996 %{
6997   match(Set dst (LoadN mem));
6998   predicate(!needs_acquiring_load(n));
6999 
7000   ins_cost(4 * INSN_COST);
7001   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7002 
7003   ins_encode(aarch64_enc_ldrw(dst, mem));
7004 
7005   ins_pipe(iload_reg_mem);
7006 %}
7007 
7008 // Load Klass Pointer
7009 instruct loadKlass(iRegPNoSp dst, memory mem)
7010 %{
7011   match(Set dst (LoadKlass mem));
7012   predicate(!needs_acquiring_load(n));
7013 
7014   ins_cost(4 * INSN_COST);
7015   format %{ "ldr  $dst, $mem\t# class" %}
7016 
7017   ins_encode(aarch64_enc_ldr(dst, mem));
7018 
7019   ins_pipe(iload_reg_mem);
7020 %}
7021 
7022 // Load Narrow Klass Pointer
7023 instruct loadNKlass(iRegNNoSp dst, memory mem)
7024 %{
7025   match(Set dst (LoadNKlass mem));
7026   predicate(!needs_acquiring_load(n));
7027 
7028   ins_cost(4 * INSN_COST);
7029   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7030 
7031   ins_encode(aarch64_enc_ldrw(dst, mem));
7032 
7033   ins_pipe(iload_reg_mem);
7034 %}
7035 
7036 // Load Float
7037 instruct loadF(vRegF dst, memory mem)
7038 %{
7039   match(Set dst (LoadF mem));
7040   predicate(!needs_acquiring_load(n));
7041 
7042   ins_cost(4 * INSN_COST);
7043   format %{ "ldrs  $dst, $mem\t# float" %}
7044 
7045   ins_encode( aarch64_enc_ldrs(dst, mem) );
7046 
7047   ins_pipe(pipe_class_memory);
7048 %}
7049 
7050 // Load Double
7051 instruct loadD(vRegD dst, memory mem)
7052 %{
7053   match(Set dst (LoadD mem));
7054   predicate(!needs_acquiring_load(n));
7055 
7056   ins_cost(4 * INSN_COST);
7057   format %{ "ldrd  $dst, $mem\t# double" %}
7058 
7059   ins_encode( aarch64_enc_ldrd(dst, mem) );
7060 
7061   ins_pipe(pipe_class_memory);
7062 %}
7063 
7064 
7065 // Load Int Constant
7066 instruct loadConI(iRegINoSp dst, immI src)
7067 %{
7068   match(Set dst src);
7069 
7070   ins_cost(INSN_COST);
7071   format %{ "mov $dst, $src\t# int" %}
7072 
7073   ins_encode( aarch64_enc_movw_imm(dst, src) );
7074 
7075   ins_pipe(ialu_imm);
7076 %}
7077 
7078 // Load Long Constant
7079 instruct loadConL(iRegLNoSp dst, immL src)
7080 %{
7081   match(Set dst src);
7082 
7083   ins_cost(INSN_COST);
7084   format %{ "mov $dst, $src\t# long" %}
7085 
7086   ins_encode( aarch64_enc_mov_imm(dst, src) );
7087 
7088   ins_pipe(ialu_imm);
7089 %}
7090 
7091 // Load Pointer Constant
7092 
7093 instruct loadConP(iRegPNoSp dst, immP con)
7094 %{
7095   match(Set dst con);
7096 
7097   ins_cost(INSN_COST * 4);
7098   format %{
7099     "mov  $dst, $con\t# ptr\n\t"
7100   %}
7101 
7102   ins_encode(aarch64_enc_mov_p(dst, con));
7103 
7104   ins_pipe(ialu_imm);
7105 %}
7106 
7107 // Load Null Pointer Constant
7108 
7109 instruct loadConP0(iRegPNoSp dst, immP0 con)
7110 %{
7111   match(Set dst con);
7112 
7113   ins_cost(INSN_COST);
7114   format %{ "mov  $dst, $con\t# NULL ptr" %}
7115 
7116   ins_encode(aarch64_enc_mov_p0(dst, con));
7117 
7118   ins_pipe(ialu_imm);
7119 %}
7120 
7121 // Load Pointer Constant One
7122 
7123 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7124 %{
7125   match(Set dst con);
7126 
7127   ins_cost(INSN_COST);
7128   format %{ "mov  $dst, $con\t# NULL ptr" %}
7129 
7130   ins_encode(aarch64_enc_mov_p1(dst, con));
7131 
7132   ins_pipe(ialu_imm);
7133 %}
7134 
7135 // Load Poll Page Constant
7136 
7137 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7138 %{
7139   match(Set dst con);
7140 
7141   ins_cost(INSN_COST);
7142   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7143 
7144   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7145 
7146   ins_pipe(ialu_imm);
7147 %}
7148 
7149 // Load Byte Map Base Constant
7150 
7151 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7152 %{
7153   match(Set dst con);
7154 
7155   ins_cost(INSN_COST);
7156   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7157 
7158   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7159 
7160   ins_pipe(ialu_imm);
7161 %}
7162 
7163 // Load Narrow Pointer Constant
7164 
7165 instruct loadConN(iRegNNoSp dst, immN con)
7166 %{
7167   match(Set dst con);
7168 
7169   ins_cost(INSN_COST * 4);
7170   format %{ "mov  $dst, $con\t# compressed ptr" %}
7171 
7172   ins_encode(aarch64_enc_mov_n(dst, con));
7173 
7174   ins_pipe(ialu_imm);
7175 %}
7176 
7177 // Load Narrow Null Pointer Constant
7178 
7179 instruct loadConN0(iRegNNoSp dst, immN0 con)
7180 %{
7181   match(Set dst con);
7182 
7183   ins_cost(INSN_COST);
7184   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7185 
7186   ins_encode(aarch64_enc_mov_n0(dst, con));
7187 
7188   ins_pipe(ialu_imm);
7189 %}
7190 
7191 // Load Narrow Klass Constant
7192 
7193 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7194 %{
7195   match(Set dst con);
7196 
7197   ins_cost(INSN_COST);
7198   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7199 
7200   ins_encode(aarch64_enc_mov_nk(dst, con));
7201 
7202   ins_pipe(ialu_imm);
7203 %}
7204 
7205 // Load Packed Float Constant
7206 
7207 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7208   match(Set dst con);
7209   ins_cost(INSN_COST * 4);
7210   format %{ "fmovs  $dst, $con"%}
7211   ins_encode %{
7212     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7213   %}
7214 
7215   ins_pipe(fp_imm_s);
7216 %}
7217 
7218 // Load Float Constant
7219 
7220 instruct loadConF(vRegF dst, immF con) %{
7221   match(Set dst con);
7222 
7223   ins_cost(INSN_COST * 4);
7224 
7225   format %{
7226     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7227   %}
7228 
7229   ins_encode %{
7230     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7231   %}
7232 
7233   ins_pipe(fp_load_constant_s);
7234 %}
7235 
7236 // Load Packed Double Constant
7237 
7238 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7239   match(Set dst con);
7240   ins_cost(INSN_COST);
7241   format %{ "fmovd  $dst, $con"%}
7242   ins_encode %{
7243     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7244   %}
7245 
7246   ins_pipe(fp_imm_d);
7247 %}
7248 
7249 // Load Double Constant
7250 
7251 instruct loadConD(vRegD dst, immD con) %{
7252   match(Set dst con);
7253 
7254   ins_cost(INSN_COST * 5);
7255   format %{
7256     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7257   %}
7258 
7259   ins_encode %{
7260     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7261   %}
7262 
7263   ins_pipe(fp_load_constant_d);
7264 %}
7265 
7266 // Store Instructions
7267 
7268 // Store CMS card-mark Immediate
7269 instruct storeimmCM0(immI0 zero, memory mem)
7270 %{
7271   match(Set mem (StoreCM mem zero));
7272   predicate(unnecessary_storestore(n));
7273 
7274   ins_cost(INSN_COST);
7275   format %{ "storestore (elided)\n\t"
7276             "strb zr, $mem\t# byte" %}
7277 
7278   ins_encode(aarch64_enc_strb0(mem));
7279 
7280   ins_pipe(istore_mem);
7281 %}
7282 
7283 // Store CMS card-mark Immediate with intervening StoreStore
7284 // needed when using CMS with no conditional card marking
7285 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7286 %{
7287   match(Set mem (StoreCM mem zero));
7288 
7289   ins_cost(INSN_COST * 2);
7290   format %{ "storestore\n\t"
7291             "dmb ishst"
7292             "\n\tstrb zr, $mem\t# byte" %}
7293 
7294   ins_encode(aarch64_enc_strb0_ordered(mem));
7295 
7296   ins_pipe(istore_mem);
7297 %}
7298 
7299 // Store Byte
7300 instruct storeB(iRegIorL2I src, memory mem)
7301 %{
7302   match(Set mem (StoreB mem src));
7303   predicate(!needs_releasing_store(n));
7304 
7305   ins_cost(INSN_COST);
7306   format %{ "strb  $src, $mem\t# byte" %}
7307 
7308   ins_encode(aarch64_enc_strb(src, mem));
7309 
7310   ins_pipe(istore_reg_mem);
7311 %}
7312 
7313 
7314 instruct storeimmB0(immI0 zero, memory mem)
7315 %{
7316   match(Set mem (StoreB mem zero));
7317   predicate(!needs_releasing_store(n));
7318 
7319   ins_cost(INSN_COST);
7320   format %{ "strb rscractch2, $mem\t# byte" %}
7321 
7322   ins_encode(aarch64_enc_strb0(mem));
7323 
7324   ins_pipe(istore_mem);
7325 %}
7326 
7327 // Store Char/Short
7328 instruct storeC(iRegIorL2I src, memory mem)
7329 %{
7330   match(Set mem (StoreC mem src));
7331   predicate(!needs_releasing_store(n));
7332 
7333   ins_cost(INSN_COST);
7334   format %{ "strh  $src, $mem\t# short" %}
7335 
7336   ins_encode(aarch64_enc_strh(src, mem));
7337 
7338   ins_pipe(istore_reg_mem);
7339 %}
7340 
7341 instruct storeimmC0(immI0 zero, memory mem)
7342 %{
7343   match(Set mem (StoreC mem zero));
7344   predicate(!needs_releasing_store(n));
7345 
7346   ins_cost(INSN_COST);
7347   format %{ "strh  zr, $mem\t# short" %}
7348 
7349   ins_encode(aarch64_enc_strh0(mem));
7350 
7351   ins_pipe(istore_mem);
7352 %}
7353 
7354 // Store Integer
7355 
7356 instruct storeI(iRegIorL2I src, memory mem)
7357 %{
7358   match(Set mem(StoreI mem src));
7359   predicate(!needs_releasing_store(n));
7360 
7361   ins_cost(INSN_COST);
7362   format %{ "strw  $src, $mem\t# int" %}
7363 
7364   ins_encode(aarch64_enc_strw(src, mem));
7365 
7366   ins_pipe(istore_reg_mem);
7367 %}
7368 
7369 instruct storeimmI0(immI0 zero, memory mem)
7370 %{
7371   match(Set mem(StoreI mem zero));
7372   predicate(!needs_releasing_store(n));
7373 
7374   ins_cost(INSN_COST);
7375   format %{ "strw  zr, $mem\t# int" %}
7376 
7377   ins_encode(aarch64_enc_strw0(mem));
7378 
7379   ins_pipe(istore_mem);
7380 %}
7381 
7382 // Store Long (64 bit signed)
7383 instruct storeL(iRegL src, memory mem)
7384 %{
7385   match(Set mem (StoreL mem src));
7386   predicate(!needs_releasing_store(n));
7387 
7388   ins_cost(INSN_COST);
7389   format %{ "str  $src, $mem\t# int" %}
7390 
7391   ins_encode(aarch64_enc_str(src, mem));
7392 
7393   ins_pipe(istore_reg_mem);
7394 %}
7395 
7396 // Store Long (64 bit signed)
7397 instruct storeimmL0(immL0 zero, memory mem)
7398 %{
7399   match(Set mem (StoreL mem zero));
7400   predicate(!needs_releasing_store(n));
7401 
7402   ins_cost(INSN_COST);
7403   format %{ "str  zr, $mem\t# int" %}
7404 
7405   ins_encode(aarch64_enc_str0(mem));
7406 
7407   ins_pipe(istore_mem);
7408 %}
7409 
7410 // Store Pointer
7411 instruct storeP(iRegP src, memory mem)
7412 %{
7413   match(Set mem (StoreP mem src));
7414   predicate(!needs_releasing_store(n));
7415 
7416   ins_cost(INSN_COST);
7417   format %{ "str  $src, $mem\t# ptr" %}
7418 
7419   ins_encode(aarch64_enc_str(src, mem));
7420 
7421   ins_pipe(istore_reg_mem);
7422 %}
7423 
7424 // Store Pointer
7425 instruct storeimmP0(immP0 zero, memory mem)
7426 %{
7427   match(Set mem (StoreP mem zero));
7428   predicate(!needs_releasing_store(n));
7429 
7430   ins_cost(INSN_COST);
7431   format %{ "str zr, $mem\t# ptr" %}
7432 
7433   ins_encode(aarch64_enc_str0(mem));
7434 
7435   ins_pipe(istore_mem);
7436 %}
7437 
7438 // Store Compressed Pointer
7439 instruct storeN(iRegN src, memory mem)
7440 %{
7441   match(Set mem (StoreN mem src));
7442   predicate(!needs_releasing_store(n));
7443 
7444   ins_cost(INSN_COST);
7445   format %{ "strw  $src, $mem\t# compressed ptr" %}
7446 
7447   ins_encode(aarch64_enc_strw(src, mem));
7448 
7449   ins_pipe(istore_reg_mem);
7450 %}
7451 
7452 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7453 %{
7454   match(Set mem (StoreN mem zero));
7455   predicate(CompressedOops::base() == NULL &&
7456             CompressedKlassPointers::base() == NULL &&
7457             (!needs_releasing_store(n)));
7458 
7459   ins_cost(INSN_COST);
7460   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7461 
7462   ins_encode(aarch64_enc_strw(heapbase, mem));
7463 
7464   ins_pipe(istore_reg_mem);
7465 %}
7466 
7467 // Store Float
7468 instruct storeF(vRegF src, memory mem)
7469 %{
7470   match(Set mem (StoreF mem src));
7471   predicate(!needs_releasing_store(n));
7472 
7473   ins_cost(INSN_COST);
7474   format %{ "strs  $src, $mem\t# float" %}
7475 
7476   ins_encode( aarch64_enc_strs(src, mem) );
7477 
7478   ins_pipe(pipe_class_memory);
7479 %}
7480 
7481 // TODO
7482 // implement storeImmF0 and storeFImmPacked
7483 
7484 // Store Double
7485 instruct storeD(vRegD src, memory mem)
7486 %{
7487   match(Set mem (StoreD mem src));
7488   predicate(!needs_releasing_store(n));
7489 
7490   ins_cost(INSN_COST);
7491   format %{ "strd  $src, $mem\t# double" %}
7492 
7493   ins_encode( aarch64_enc_strd(src, mem) );
7494 
7495   ins_pipe(pipe_class_memory);
7496 %}
7497 
7498 // Store Compressed Klass Pointer
7499 instruct storeNKlass(iRegN src, memory mem)
7500 %{
7501   predicate(!needs_releasing_store(n));
7502   match(Set mem (StoreNKlass mem src));
7503 
7504   ins_cost(INSN_COST);
7505   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7506 
7507   ins_encode(aarch64_enc_strw(src, mem));
7508 
7509   ins_pipe(istore_reg_mem);
7510 %}
7511 
7512 // TODO
7513 // implement storeImmD0 and storeDImmPacked
7514 
7515 // prefetch instructions
7516 // Must be safe to execute with invalid address (cannot fault).
7517 
7518 instruct prefetchalloc( memory mem ) %{
7519   match(PrefetchAllocation mem);
7520 
7521   ins_cost(INSN_COST);
7522   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7523 
7524   ins_encode( aarch64_enc_prefetchw(mem) );
7525 
7526   ins_pipe(iload_prefetch);
7527 %}
7528 
7529 //  ---------------- volatile loads and stores ----------------
7530 
7531 // Load Byte (8 bit signed)
7532 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7533 %{
7534   match(Set dst (LoadB mem));
7535 
7536   ins_cost(VOLATILE_REF_COST);
7537   format %{ "ldarsb  $dst, $mem\t# byte" %}
7538 
7539   ins_encode(aarch64_enc_ldarsb(dst, mem));
7540 
7541   ins_pipe(pipe_serial);
7542 %}
7543 
7544 // Load Byte (8 bit signed) into long
7545 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7546 %{
7547   match(Set dst (ConvI2L (LoadB mem)));
7548 
7549   ins_cost(VOLATILE_REF_COST);
7550   format %{ "ldarsb  $dst, $mem\t# byte" %}
7551 
7552   ins_encode(aarch64_enc_ldarsb(dst, mem));
7553 
7554   ins_pipe(pipe_serial);
7555 %}
7556 
7557 // Load Byte (8 bit unsigned)
7558 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7559 %{
7560   match(Set dst (LoadUB mem));
7561 
7562   ins_cost(VOLATILE_REF_COST);
7563   format %{ "ldarb  $dst, $mem\t# byte" %}
7564 
7565   ins_encode(aarch64_enc_ldarb(dst, mem));
7566 
7567   ins_pipe(pipe_serial);
7568 %}
7569 
7570 // Load Byte (8 bit unsigned) into long
7571 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7572 %{
7573   match(Set dst (ConvI2L (LoadUB mem)));
7574 
7575   ins_cost(VOLATILE_REF_COST);
7576   format %{ "ldarb  $dst, $mem\t# byte" %}
7577 
7578   ins_encode(aarch64_enc_ldarb(dst, mem));
7579 
7580   ins_pipe(pipe_serial);
7581 %}
7582 
7583 // Load Short (16 bit signed)
7584 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7585 %{
7586   match(Set dst (LoadS mem));
7587 
7588   ins_cost(VOLATILE_REF_COST);
7589   format %{ "ldarshw  $dst, $mem\t# short" %}
7590 
7591   ins_encode(aarch64_enc_ldarshw(dst, mem));
7592 
7593   ins_pipe(pipe_serial);
7594 %}
7595 
7596 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7597 %{
7598   match(Set dst (LoadUS mem));
7599 
7600   ins_cost(VOLATILE_REF_COST);
7601   format %{ "ldarhw  $dst, $mem\t# short" %}
7602 
7603   ins_encode(aarch64_enc_ldarhw(dst, mem));
7604 
7605   ins_pipe(pipe_serial);
7606 %}
7607 
7608 // Load Short/Char (16 bit unsigned) into long
7609 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7610 %{
7611   match(Set dst (ConvI2L (LoadUS mem)));
7612 
7613   ins_cost(VOLATILE_REF_COST);
7614   format %{ "ldarh  $dst, $mem\t# short" %}
7615 
7616   ins_encode(aarch64_enc_ldarh(dst, mem));
7617 
7618   ins_pipe(pipe_serial);
7619 %}
7620 
7621 // Load Short/Char (16 bit signed) into long
7622 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7623 %{
7624   match(Set dst (ConvI2L (LoadS mem)));
7625 
7626   ins_cost(VOLATILE_REF_COST);
7627   format %{ "ldarh  $dst, $mem\t# short" %}
7628 
7629   ins_encode(aarch64_enc_ldarsh(dst, mem));
7630 
7631   ins_pipe(pipe_serial);
7632 %}
7633 
7634 // Load Integer (32 bit signed)
7635 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7636 %{
7637   match(Set dst (LoadI mem));
7638 
7639   ins_cost(VOLATILE_REF_COST);
7640   format %{ "ldarw  $dst, $mem\t# int" %}
7641 
7642   ins_encode(aarch64_enc_ldarw(dst, mem));
7643 
7644   ins_pipe(pipe_serial);
7645 %}
7646 
7647 // Load Integer (32 bit unsigned) into long
7648 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7649 %{
7650   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7651 
7652   ins_cost(VOLATILE_REF_COST);
7653   format %{ "ldarw  $dst, $mem\t# int" %}
7654 
7655   ins_encode(aarch64_enc_ldarw(dst, mem));
7656 
7657   ins_pipe(pipe_serial);
7658 %}
7659 
7660 // Load Long (64 bit signed)
7661 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7662 %{
7663   match(Set dst (LoadL mem));
7664 
7665   ins_cost(VOLATILE_REF_COST);
7666   format %{ "ldar  $dst, $mem\t# int" %}
7667 
7668   ins_encode(aarch64_enc_ldar(dst, mem));
7669 
7670   ins_pipe(pipe_serial);
7671 %}
7672 
7673 // Load Pointer
7674 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7675 %{
7676   match(Set dst (LoadP mem));
7677 
7678   ins_cost(VOLATILE_REF_COST);
7679   format %{ "ldar  $dst, $mem\t# ptr" %}
7680 
7681   ins_encode(aarch64_enc_ldar(dst, mem));
7682 
7683   ins_pipe(pipe_serial);
7684 %}
7685 
7686 // Load Compressed Pointer
7687 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7688 %{
7689   match(Set dst (LoadN mem));
7690 
7691   ins_cost(VOLATILE_REF_COST);
7692   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7693 
7694   ins_encode(aarch64_enc_ldarw(dst, mem));
7695 
7696   ins_pipe(pipe_serial);
7697 %}
7698 
7699 // Load Float
7700 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7701 %{
7702   match(Set dst (LoadF mem));
7703 
7704   ins_cost(VOLATILE_REF_COST);
7705   format %{ "ldars  $dst, $mem\t# float" %}
7706 
7707   ins_encode( aarch64_enc_fldars(dst, mem) );
7708 
7709   ins_pipe(pipe_serial);
7710 %}
7711 
7712 // Load Double
7713 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7714 %{
7715   match(Set dst (LoadD mem));
7716 
7717   ins_cost(VOLATILE_REF_COST);
7718   format %{ "ldard  $dst, $mem\t# double" %}
7719 
7720   ins_encode( aarch64_enc_fldard(dst, mem) );
7721 
7722   ins_pipe(pipe_serial);
7723 %}
7724 
7725 // Store Byte
7726 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7727 %{
7728   match(Set mem (StoreB mem src));
7729 
7730   ins_cost(VOLATILE_REF_COST);
7731   format %{ "stlrb  $src, $mem\t# byte" %}
7732 
7733   ins_encode(aarch64_enc_stlrb(src, mem));
7734 
7735   ins_pipe(pipe_class_memory);
7736 %}
7737 
7738 // Store Char/Short
7739 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7740 %{
7741   match(Set mem (StoreC mem src));
7742 
7743   ins_cost(VOLATILE_REF_COST);
7744   format %{ "stlrh  $src, $mem\t# short" %}
7745 
7746   ins_encode(aarch64_enc_stlrh(src, mem));
7747 
7748   ins_pipe(pipe_class_memory);
7749 %}
7750 
7751 // Store Integer
7752 
7753 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7754 %{
7755   match(Set mem(StoreI mem src));
7756 
7757   ins_cost(VOLATILE_REF_COST);
7758   format %{ "stlrw  $src, $mem\t# int" %}
7759 
7760   ins_encode(aarch64_enc_stlrw(src, mem));
7761 
7762   ins_pipe(pipe_class_memory);
7763 %}
7764 
7765 // Store Long (64 bit signed)
7766 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7767 %{
7768   match(Set mem (StoreL mem src));
7769 
7770   ins_cost(VOLATILE_REF_COST);
7771   format %{ "stlr  $src, $mem\t# int" %}
7772 
7773   ins_encode(aarch64_enc_stlr(src, mem));
7774 
7775   ins_pipe(pipe_class_memory);
7776 %}
7777 
7778 // Store Pointer
7779 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7780 %{
7781   match(Set mem (StoreP mem src));
7782 
7783   ins_cost(VOLATILE_REF_COST);
7784   format %{ "stlr  $src, $mem\t# ptr" %}
7785 
7786   ins_encode(aarch64_enc_stlr(src, mem));
7787 
7788   ins_pipe(pipe_class_memory);
7789 %}
7790 
7791 // Store Compressed Pointer
7792 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7793 %{
7794   match(Set mem (StoreN mem src));
7795 
7796   ins_cost(VOLATILE_REF_COST);
7797   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7798 
7799   ins_encode(aarch64_enc_stlrw(src, mem));
7800 
7801   ins_pipe(pipe_class_memory);
7802 %}
7803 
7804 // Store Float
7805 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7806 %{
7807   match(Set mem (StoreF mem src));
7808 
7809   ins_cost(VOLATILE_REF_COST);
7810   format %{ "stlrs  $src, $mem\t# float" %}
7811 
7812   ins_encode( aarch64_enc_fstlrs(src, mem) );
7813 
7814   ins_pipe(pipe_class_memory);
7815 %}
7816 
7817 // TODO
7818 // implement storeImmF0 and storeFImmPacked
7819 
7820 // Store Double
7821 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7822 %{
7823   match(Set mem (StoreD mem src));
7824 
7825   ins_cost(VOLATILE_REF_COST);
7826   format %{ "stlrd  $src, $mem\t# double" %}
7827 
7828   ins_encode( aarch64_enc_fstlrd(src, mem) );
7829 
7830   ins_pipe(pipe_class_memory);
7831 %}
7832 
7833 //  ---------------- end of volatile loads and stores ----------------
7834 
7835 // ============================================================================
7836 // BSWAP Instructions
7837 
7838 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7839   match(Set dst (ReverseBytesI src));
7840 
7841   ins_cost(INSN_COST);
7842   format %{ "revw  $dst, $src" %}
7843 
7844   ins_encode %{
7845     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7846   %}
7847 
7848   ins_pipe(ialu_reg);
7849 %}
7850 
7851 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7852   match(Set dst (ReverseBytesL src));
7853 
7854   ins_cost(INSN_COST);
7855   format %{ "rev  $dst, $src" %}
7856 
7857   ins_encode %{
7858     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7859   %}
7860 
7861   ins_pipe(ialu_reg);
7862 %}
7863 
7864 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7865   match(Set dst (ReverseBytesUS src));
7866 
7867   ins_cost(INSN_COST);
7868   format %{ "rev16w  $dst, $src" %}
7869 
7870   ins_encode %{
7871     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7872   %}
7873 
7874   ins_pipe(ialu_reg);
7875 %}
7876 
7877 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7878   match(Set dst (ReverseBytesS src));
7879 
7880   ins_cost(INSN_COST);
7881   format %{ "rev16w  $dst, $src\n\t"
7882             "sbfmw $dst, $dst, #0, #15" %}
7883 
7884   ins_encode %{
7885     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7886     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7887   %}
7888 
7889   ins_pipe(ialu_reg);
7890 %}
7891 
7892 // ============================================================================
7893 // Zero Count Instructions
7894 
7895 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7896   match(Set dst (CountLeadingZerosI src));
7897 
7898   ins_cost(INSN_COST);
7899   format %{ "clzw  $dst, $src" %}
7900   ins_encode %{
7901     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7902   %}
7903 
7904   ins_pipe(ialu_reg);
7905 %}
7906 
7907 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7908   match(Set dst (CountLeadingZerosL src));
7909 
7910   ins_cost(INSN_COST);
7911   format %{ "clz   $dst, $src" %}
7912   ins_encode %{
7913     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7914   %}
7915 
7916   ins_pipe(ialu_reg);
7917 %}
7918 
7919 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7920   match(Set dst (CountTrailingZerosI src));
7921 
7922   ins_cost(INSN_COST * 2);
7923   format %{ "rbitw  $dst, $src\n\t"
7924             "clzw   $dst, $dst" %}
7925   ins_encode %{
7926     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7927     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7928   %}
7929 
7930   ins_pipe(ialu_reg);
7931 %}
7932 
7933 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7934   match(Set dst (CountTrailingZerosL src));
7935 
7936   ins_cost(INSN_COST * 2);
7937   format %{ "rbit   $dst, $src\n\t"
7938             "clz    $dst, $dst" %}
7939   ins_encode %{
7940     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7941     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7942   %}
7943 
7944   ins_pipe(ialu_reg);
7945 %}
7946 
7947 //---------- Population Count Instructions -------------------------------------
7948 //
7949 
7950 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7951   predicate(UsePopCountInstruction);
7952   match(Set dst (PopCountI src));
7953   effect(TEMP tmp);
7954   ins_cost(INSN_COST * 13);
7955 
7956   format %{ "movw   $src, $src\n\t"
7957             "mov    $tmp, $src\t# vector (1D)\n\t"
7958             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7959             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7960             "mov    $dst, $tmp\t# vector (1D)" %}
7961   ins_encode %{
7962     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7963     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7964     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7965     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7966     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7967   %}
7968 
7969   ins_pipe(pipe_class_default);
7970 %}
7971 
7972 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7973   predicate(UsePopCountInstruction);
7974   match(Set dst (PopCountI (LoadI mem)));
7975   effect(TEMP tmp);
7976   ins_cost(INSN_COST * 13);
7977 
7978   format %{ "ldrs   $tmp, $mem\n\t"
7979             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7980             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7981             "mov    $dst, $tmp\t# vector (1D)" %}
7982   ins_encode %{
7983     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7984     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7985                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7986     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7987     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7988     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7989   %}
7990 
7991   ins_pipe(pipe_class_default);
7992 %}
7993 
7994 // Note: Long.bitCount(long) returns an int.
7995 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7996   predicate(UsePopCountInstruction);
7997   match(Set dst (PopCountL src));
7998   effect(TEMP tmp);
7999   ins_cost(INSN_COST * 13);
8000 
8001   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8002             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8003             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8004             "mov    $dst, $tmp\t# vector (1D)" %}
8005   ins_encode %{
8006     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8007     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8008     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8009     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8010   %}
8011 
8012   ins_pipe(pipe_class_default);
8013 %}
8014 
8015 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8016   predicate(UsePopCountInstruction);
8017   match(Set dst (PopCountL (LoadL mem)));
8018   effect(TEMP tmp);
8019   ins_cost(INSN_COST * 13);
8020 
8021   format %{ "ldrd   $tmp, $mem\n\t"
8022             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8023             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8024             "mov    $dst, $tmp\t# vector (1D)" %}
8025   ins_encode %{
8026     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8027     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8028                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8029     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8030     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8031     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8032   %}
8033 
8034   ins_pipe(pipe_class_default);
8035 %}
8036 
8037 // ============================================================================
8038 // MemBar Instruction
8039 
8040 instruct load_fence() %{
8041   match(LoadFence);
8042   ins_cost(VOLATILE_REF_COST);
8043 
8044   format %{ "load_fence" %}
8045 
8046   ins_encode %{
8047     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8048   %}
8049   ins_pipe(pipe_serial);
8050 %}
8051 
8052 instruct unnecessary_membar_acquire() %{
8053   predicate(unnecessary_acquire(n));
8054   match(MemBarAcquire);
8055   ins_cost(0);
8056 
8057   format %{ "membar_acquire (elided)" %}
8058 
8059   ins_encode %{
8060     __ block_comment("membar_acquire (elided)");
8061   %}
8062 
8063   ins_pipe(pipe_class_empty);
8064 %}
8065 
8066 instruct membar_acquire() %{
8067   match(MemBarAcquire);
8068   ins_cost(VOLATILE_REF_COST);
8069 
8070   format %{ "membar_acquire\n\t"
8071             "dmb ish" %}
8072 
8073   ins_encode %{
8074     __ block_comment("membar_acquire");
8075     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8076   %}
8077 
8078   ins_pipe(pipe_serial);
8079 %}
8080 
8081 
8082 instruct membar_acquire_lock() %{
8083   match(MemBarAcquireLock);
8084   ins_cost(VOLATILE_REF_COST);
8085 
8086   format %{ "membar_acquire_lock (elided)" %}
8087 
8088   ins_encode %{
8089     __ block_comment("membar_acquire_lock (elided)");
8090   %}
8091 
8092   ins_pipe(pipe_serial);
8093 %}
8094 
8095 instruct store_fence() %{
8096   match(StoreFence);
8097   ins_cost(VOLATILE_REF_COST);
8098 
8099   format %{ "store_fence" %}
8100 
8101   ins_encode %{
8102     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8103   %}
8104   ins_pipe(pipe_serial);
8105 %}
8106 
8107 instruct unnecessary_membar_release() %{
8108   predicate(unnecessary_release(n));
8109   match(MemBarRelease);
8110   ins_cost(0);
8111 
8112   format %{ "membar_release (elided)" %}
8113 
8114   ins_encode %{
8115     __ block_comment("membar_release (elided)");
8116   %}
8117   ins_pipe(pipe_serial);
8118 %}
8119 
8120 instruct membar_release() %{
8121   match(MemBarRelease);
8122   ins_cost(VOLATILE_REF_COST);
8123 
8124   format %{ "membar_release\n\t"
8125             "dmb ish" %}
8126 
8127   ins_encode %{
8128     __ block_comment("membar_release");
8129     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8130   %}
8131   ins_pipe(pipe_serial);
8132 %}
8133 
8134 instruct membar_storestore() %{
8135   match(MemBarStoreStore);
8136   ins_cost(VOLATILE_REF_COST);
8137 
8138   format %{ "MEMBAR-store-store" %}
8139 
8140   ins_encode %{
8141     __ membar(Assembler::StoreStore);
8142   %}
8143   ins_pipe(pipe_serial);
8144 %}
8145 
8146 instruct membar_release_lock() %{
8147   match(MemBarReleaseLock);
8148   ins_cost(VOLATILE_REF_COST);
8149 
8150   format %{ "membar_release_lock (elided)" %}
8151 
8152   ins_encode %{
8153     __ block_comment("membar_release_lock (elided)");
8154   %}
8155 
8156   ins_pipe(pipe_serial);
8157 %}
8158 
8159 instruct unnecessary_membar_volatile() %{
8160   predicate(unnecessary_volatile(n));
8161   match(MemBarVolatile);
8162   ins_cost(0);
8163 
8164   format %{ "membar_volatile (elided)" %}
8165 
8166   ins_encode %{
8167     __ block_comment("membar_volatile (elided)");
8168   %}
8169 
8170   ins_pipe(pipe_serial);
8171 %}
8172 
8173 instruct membar_volatile() %{
8174   match(MemBarVolatile);
8175   ins_cost(VOLATILE_REF_COST*100);
8176 
8177   format %{ "membar_volatile\n\t"
8178              "dmb ish"%}
8179 
8180   ins_encode %{
8181     __ block_comment("membar_volatile");
8182     __ membar(Assembler::StoreLoad);
8183   %}
8184 
8185   ins_pipe(pipe_serial);
8186 %}
8187 
8188 // ============================================================================
8189 // Cast/Convert Instructions
8190 
8191 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8192   match(Set dst (CastX2P src));
8193 
8194   ins_cost(INSN_COST);
8195   format %{ "mov $dst, $src\t# long -> ptr" %}
8196 
8197   ins_encode %{
8198     if ($dst$$reg != $src$$reg) {
8199       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8200     }
8201   %}
8202 
8203   ins_pipe(ialu_reg);
8204 %}
8205 
8206 instruct castN2X(iRegLNoSp dst, iRegN src) %{
8207   match(Set dst (CastP2X src));
8208 
8209   ins_cost(INSN_COST);
8210   format %{ "mov $dst, $src\t# ptr -> long" %}
8211 
8212   ins_encode %{
8213     if ($dst$$reg != $src$$reg) {
8214       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8215     }
8216   %}
8217 
8218   ins_pipe(ialu_reg);
8219 %}
8220 
8221 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8222   match(Set dst (CastP2X src));
8223 
8224   ins_cost(INSN_COST);
8225   format %{ "mov $dst, $src\t# ptr -> long" %}
8226 
8227   ins_encode %{
8228     if ($dst$$reg != $src$$reg) {
8229       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8230     }
8231   %}
8232 
8233   ins_pipe(ialu_reg);
8234 %}
8235 
8236 instruct castN2I(iRegINoSp dst, iRegN src) %{
8237   match(Set dst (CastN2I src));
8238 
8239   ins_cost(INSN_COST);
8240   format %{ "movw $dst, $src\t# compressed ptr -> int" %}
8241 
8242   ins_encode %{
8243     if ($dst$$reg != $src$$reg) {
8244       __ movw(as_Register($dst$$reg), as_Register($src$$reg));
8245     }
8246   %}
8247 
8248   ins_pipe(ialu_reg);
8249 %}
8250 
8251 instruct castI2N(iRegNNoSp dst, iRegI src) %{
8252   match(Set dst (CastI2N src));
8253 
8254   ins_cost(INSN_COST);
8255   format %{ "movw $dst, $src\t# int -> compressed ptr" %}
8256 
8257   ins_encode %{
8258     if ($dst$$reg != $src$$reg) {
8259       __ movw(as_Register($dst$$reg), as_Register($src$$reg));
8260     }
8261   %}
8262 
8263   ins_pipe(ialu_reg);
8264 %}
8265 
8266 
8267 // Convert oop into int for vectors alignment masking
8268 instruct convP2I(iRegINoSp dst, iRegP src) %{
8269   match(Set dst (ConvL2I (CastP2X src)));
8270 
8271   ins_cost(INSN_COST);
8272   format %{ "movw $dst, $src\t# ptr -> int" %}
8273   ins_encode %{
8274     __ movw($dst$$Register, $src$$Register);
8275   %}
8276 
8277   ins_pipe(ialu_reg);
8278 %}
8279 
8280 // Convert compressed oop into int for vectors alignment masking
8281 // in case of 32bit oops (heap < 4Gb).
8282 instruct convN2I(iRegINoSp dst, iRegN src)
8283 %{
8284   predicate(CompressedOops::shift() == 0);
8285   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8286 
8287   ins_cost(INSN_COST);
8288   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8289   ins_encode %{
8290     __ movw($dst$$Register, $src$$Register);
8291   %}
8292 
8293   ins_pipe(ialu_reg);
8294 %}
8295 
8296 
8297 // Convert oop pointer into compressed form
8298 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8299   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8300   match(Set dst (EncodeP src));
8301   effect(KILL cr);
8302   ins_cost(INSN_COST * 3);
8303   format %{ "encode_heap_oop $dst, $src" %}
8304   ins_encode %{
8305     Register s = $src$$Register;
8306     Register d = $dst$$Register;
8307     __ encode_heap_oop(d, s);
8308   %}
8309   ins_pipe(ialu_reg);
8310 %}
8311 
8312 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8313   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8314   match(Set dst (EncodeP src));
8315   ins_cost(INSN_COST * 3);
8316   format %{ "encode_heap_oop_not_null $dst, $src" %}
8317   ins_encode %{
8318     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8319   %}
8320   ins_pipe(ialu_reg);
8321 %}
8322 
8323 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8324   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8325             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8326   match(Set dst (DecodeN src));
8327   ins_cost(INSN_COST * 3);
8328   format %{ "decode_heap_oop $dst, $src" %}
8329   ins_encode %{
8330     Register s = $src$$Register;
8331     Register d = $dst$$Register;
8332     __ decode_heap_oop(d, s);
8333   %}
8334   ins_pipe(ialu_reg);
8335 %}
8336 
8337 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8338   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8339             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8340   match(Set dst (DecodeN src));
8341   ins_cost(INSN_COST * 3);
8342   format %{ "decode_heap_oop_not_null $dst, $src" %}
8343   ins_encode %{
8344     Register s = $src$$Register;
8345     Register d = $dst$$Register;
8346     __ decode_heap_oop_not_null(d, s);
8347   %}
8348   ins_pipe(ialu_reg);
8349 %}
8350 
8351 // n.b. AArch64 implementations of encode_klass_not_null and
8352 // decode_klass_not_null do not modify the flags register so, unlike
8353 // Intel, we don't kill CR as a side effect here
8354 
8355 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8356   match(Set dst (EncodePKlass src));
8357 
8358   ins_cost(INSN_COST * 3);
8359   format %{ "encode_klass_not_null $dst,$src" %}
8360 
8361   ins_encode %{
8362     Register src_reg = as_Register($src$$reg);
8363     Register dst_reg = as_Register($dst$$reg);
8364     __ encode_klass_not_null(dst_reg, src_reg);
8365   %}
8366 
8367    ins_pipe(ialu_reg);
8368 %}
8369 
8370 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8371   match(Set dst (DecodeNKlass src));
8372 
8373   ins_cost(INSN_COST * 3);
8374   format %{ "decode_klass_not_null $dst,$src" %}
8375 
8376   ins_encode %{
8377     Register src_reg = as_Register($src$$reg);
8378     Register dst_reg = as_Register($dst$$reg);
8379     if (dst_reg != src_reg) {
8380       __ decode_klass_not_null(dst_reg, src_reg);
8381     } else {
8382       __ decode_klass_not_null(dst_reg);
8383     }
8384   %}
8385 
8386    ins_pipe(ialu_reg);
8387 %}
8388 
8389 instruct checkCastPP(iRegPNoSp dst)
8390 %{
8391   match(Set dst (CheckCastPP dst));
8392 
8393   size(0);
8394   format %{ "# checkcastPP of $dst" %}
8395   ins_encode(/* empty encoding */);
8396   ins_pipe(pipe_class_empty);
8397 %}
8398 
8399 instruct castPP(iRegPNoSp dst)
8400 %{
8401   match(Set dst (CastPP dst));
8402 
8403   size(0);
8404   format %{ "# castPP of $dst" %}
8405   ins_encode(/* empty encoding */);
8406   ins_pipe(pipe_class_empty);
8407 %}
8408 
8409 instruct castII(iRegI dst)
8410 %{
8411   match(Set dst (CastII dst));
8412 
8413   size(0);
8414   format %{ "# castII of $dst" %}
8415   ins_encode(/* empty encoding */);
8416   ins_cost(0);
8417   ins_pipe(pipe_class_empty);
8418 %}
8419 
8420 // ============================================================================
8421 // Atomic operation instructions
8422 //
8423 // Intel and SPARC both implement Ideal Node LoadPLocked and
8424 // Store{PIL}Conditional instructions using a normal load for the
8425 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8426 //
8427 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8428 // pair to lock object allocations from Eden space when not using
8429 // TLABs.
8430 //
8431 // There does not appear to be a Load{IL}Locked Ideal Node and the
8432 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8433 // and to use StoreIConditional only for 32-bit and StoreLConditional
8434 // only for 64-bit.
8435 //
8436 // We implement LoadPLocked and StorePLocked instructions using,
8437 // respectively the AArch64 hw load-exclusive and store-conditional
8438 // instructions. Whereas we must implement each of
8439 // Store{IL}Conditional using a CAS which employs a pair of
8440 // instructions comprising a load-exclusive followed by a
8441 // store-conditional.
8442 
8443 
8444 // Locked-load (linked load) of the current heap-top
8445 // used when updating the eden heap top
8446 // implemented using ldaxr on AArch64
8447 
8448 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8449 %{
8450   match(Set dst (LoadPLocked mem));
8451 
8452   ins_cost(VOLATILE_REF_COST);
8453 
8454   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8455 
8456   ins_encode(aarch64_enc_ldaxr(dst, mem));
8457 
8458   ins_pipe(pipe_serial);
8459 %}
8460 
8461 // Conditional-store of the updated heap-top.
8462 // Used during allocation of the shared heap.
8463 // Sets flag (EQ) on success.
8464 // implemented using stlxr on AArch64.
8465 
8466 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8467 %{
8468   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8469 
8470   ins_cost(VOLATILE_REF_COST);
8471 
8472  // TODO
8473  // do we need to do a store-conditional release or can we just use a
8474  // plain store-conditional?
8475 
8476   format %{
8477     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8478     "cmpw rscratch1, zr\t# EQ on successful write"
8479   %}
8480 
8481   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8482 
8483   ins_pipe(pipe_serial);
8484 %}
8485 
8486 
8487 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8488 // when attempting to rebias a lock towards the current thread.  We
8489 // must use the acquire form of cmpxchg in order to guarantee acquire
8490 // semantics in this case.
8491 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8492 %{
8493   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8494 
8495   ins_cost(VOLATILE_REF_COST);
8496 
8497   format %{
8498     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8499     "cmpw rscratch1, zr\t# EQ on successful write"
8500   %}
8501 
8502   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8503 
8504   ins_pipe(pipe_slow);
8505 %}
8506 
8507 // storeIConditional also has acquire semantics, for no better reason
8508 // than matching storeLConditional.  At the time of writing this
8509 // comment storeIConditional was not used anywhere by AArch64.
8510 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8511 %{
8512   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8513 
8514   ins_cost(VOLATILE_REF_COST);
8515 
8516   format %{
8517     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8518     "cmpw rscratch1, zr\t# EQ on successful write"
8519   %}
8520 
8521   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8522 
8523   ins_pipe(pipe_slow);
8524 %}
8525 
8526 // standard CompareAndSwapX when we are using barriers
8527 // these have higher priority than the rules selected by a predicate
8528 
8529 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8530 // can't match them
8531 
8532 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8533 
8534   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8535   ins_cost(2 * VOLATILE_REF_COST);
8536 
8537   effect(KILL cr);
8538 
8539   format %{
8540     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8541     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8542   %}
8543 
8544   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8545             aarch64_enc_cset_eq(res));
8546 
8547   ins_pipe(pipe_slow);
8548 %}
8549 
8550 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8551 
8552   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8553   ins_cost(2 * VOLATILE_REF_COST);
8554 
8555   effect(KILL cr);
8556 
8557   format %{
8558     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8559     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8560   %}
8561 
8562   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8563             aarch64_enc_cset_eq(res));
8564 
8565   ins_pipe(pipe_slow);
8566 %}
8567 
8568 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8569 
8570   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8571   ins_cost(2 * VOLATILE_REF_COST);
8572 
8573   effect(KILL cr);
8574 
8575  format %{
8576     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8577     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8578  %}
8579 
8580  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8581             aarch64_enc_cset_eq(res));
8582 
8583   ins_pipe(pipe_slow);
8584 %}
8585 
8586 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8587 
8588   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8589   ins_cost(2 * VOLATILE_REF_COST);
8590 
8591   effect(KILL cr);
8592 
8593  format %{
8594     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8595     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8596  %}
8597 
8598  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8599             aarch64_enc_cset_eq(res));
8600 
8601   ins_pipe(pipe_slow);
8602 %}
8603 
8604 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8605 
8606   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8607   ins_cost(2 * VOLATILE_REF_COST);
8608 
8609   effect(KILL cr);
8610 
8611  format %{
8612     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8613     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8614  %}
8615 
8616  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8617             aarch64_enc_cset_eq(res));
8618 
8619   ins_pipe(pipe_slow);
8620 %}
8621 
8622 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8623 
8624   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8625   ins_cost(2 * VOLATILE_REF_COST);
8626 
8627   effect(KILL cr);
8628 
8629  format %{
8630     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8631     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8632  %}
8633 
8634  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8635             aarch64_enc_cset_eq(res));
8636 
8637   ins_pipe(pipe_slow);
8638 %}
8639 
8640 // alternative CompareAndSwapX when we are eliding barriers
8641 
8642 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8643 
8644   predicate(needs_acquiring_load_exclusive(n));
8645   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8646   ins_cost(VOLATILE_REF_COST);
8647 
8648   effect(KILL cr);
8649 
8650   format %{
8651     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8652     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8653   %}
8654 
8655   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8656             aarch64_enc_cset_eq(res));
8657 
8658   ins_pipe(pipe_slow);
8659 %}
8660 
8661 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8662 
8663   predicate(needs_acquiring_load_exclusive(n));
8664   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8665   ins_cost(VOLATILE_REF_COST);
8666 
8667   effect(KILL cr);
8668 
8669   format %{
8670     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8671     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8672   %}
8673 
8674   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8675             aarch64_enc_cset_eq(res));
8676 
8677   ins_pipe(pipe_slow);
8678 %}
8679 
8680 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8681 
8682   predicate(needs_acquiring_load_exclusive(n));
8683   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8684   ins_cost(VOLATILE_REF_COST);
8685 
8686   effect(KILL cr);
8687 
8688  format %{
8689     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8690     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8691  %}
8692 
8693  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8694             aarch64_enc_cset_eq(res));
8695 
8696   ins_pipe(pipe_slow);
8697 %}
8698 
8699 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8700 
8701   predicate(needs_acquiring_load_exclusive(n));
8702   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8703   ins_cost(VOLATILE_REF_COST);
8704 
8705   effect(KILL cr);
8706 
8707  format %{
8708     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8709     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8710  %}
8711 
8712  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8713             aarch64_enc_cset_eq(res));
8714 
8715   ins_pipe(pipe_slow);
8716 %}
8717 
8718 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8719 
8720   predicate(needs_acquiring_load_exclusive(n));
8721   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8722   ins_cost(VOLATILE_REF_COST);
8723 
8724   effect(KILL cr);
8725 
8726  format %{
8727     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8728     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8729  %}
8730 
8731  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8732             aarch64_enc_cset_eq(res));
8733 
8734   ins_pipe(pipe_slow);
8735 %}
8736 
8737 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8738 
8739   predicate(needs_acquiring_load_exclusive(n));
8740   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8741   ins_cost(VOLATILE_REF_COST);
8742 
8743   effect(KILL cr);
8744 
8745  format %{
8746     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8747     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8748  %}
8749 
8750  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8751             aarch64_enc_cset_eq(res));
8752 
8753   ins_pipe(pipe_slow);
8754 %}
8755 
8756 
8757 // ---------------------------------------------------------------------
8758 
8759 
8760 // BEGIN This section of the file is automatically generated. Do not edit --------------
8761 
8762 // Sundry CAS operations.  Note that release is always true,
8763 // regardless of the memory ordering of the CAS.  This is because we
8764 // need the volatile case to be sequentially consistent but there is
8765 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8766 // can't check the type of memory ordering here, so we always emit a
8767 // STLXR.
8768 
8769 // This section is generated from aarch64_ad_cas.m4
8770 
8771 
8772 
8773 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8774   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8775   ins_cost(2 * VOLATILE_REF_COST);
8776   effect(TEMP_DEF res, KILL cr);
8777   format %{
8778     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8779   %}
8780   ins_encode %{
8781     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8782                Assembler::byte, /*acquire*/ false, /*release*/ true,
8783                /*weak*/ false, $res$$Register);
8784     __ sxtbw($res$$Register, $res$$Register);
8785   %}
8786   ins_pipe(pipe_slow);
8787 %}
8788 
8789 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8790   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8791   ins_cost(2 * VOLATILE_REF_COST);
8792   effect(TEMP_DEF res, KILL cr);
8793   format %{
8794     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8795   %}
8796   ins_encode %{
8797     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8798                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8799                /*weak*/ false, $res$$Register);
8800     __ sxthw($res$$Register, $res$$Register);
8801   %}
8802   ins_pipe(pipe_slow);
8803 %}
8804 
8805 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8806   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8807   ins_cost(2 * VOLATILE_REF_COST);
8808   effect(TEMP_DEF res, KILL cr);
8809   format %{
8810     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8811   %}
8812   ins_encode %{
8813     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8814                Assembler::word, /*acquire*/ false, /*release*/ true,
8815                /*weak*/ false, $res$$Register);
8816   %}
8817   ins_pipe(pipe_slow);
8818 %}
8819 
8820 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8821   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8822   ins_cost(2 * VOLATILE_REF_COST);
8823   effect(TEMP_DEF res, KILL cr);
8824   format %{
8825     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8826   %}
8827   ins_encode %{
8828     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8829                Assembler::xword, /*acquire*/ false, /*release*/ true,
8830                /*weak*/ false, $res$$Register);
8831   %}
8832   ins_pipe(pipe_slow);
8833 %}
8834 
8835 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8836   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8837   ins_cost(2 * VOLATILE_REF_COST);
8838   effect(TEMP_DEF res, KILL cr);
8839   format %{
8840     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8841   %}
8842   ins_encode %{
8843     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8844                Assembler::word, /*acquire*/ false, /*release*/ true,
8845                /*weak*/ false, $res$$Register);
8846   %}
8847   ins_pipe(pipe_slow);
8848 %}
8849 
8850 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8851   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8852   ins_cost(2 * VOLATILE_REF_COST);
8853   effect(TEMP_DEF res, KILL cr);
8854   format %{
8855     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8856   %}
8857   ins_encode %{
8858     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8859                Assembler::xword, /*acquire*/ false, /*release*/ true,
8860                /*weak*/ false, $res$$Register);
8861   %}
8862   ins_pipe(pipe_slow);
8863 %}
8864 
8865 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8866   predicate(needs_acquiring_load_exclusive(n));
8867   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8868   ins_cost(VOLATILE_REF_COST);
8869   effect(TEMP_DEF res, KILL cr);
8870   format %{
8871     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8872   %}
8873   ins_encode %{
8874     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8875                Assembler::byte, /*acquire*/ true, /*release*/ true,
8876                /*weak*/ false, $res$$Register);
8877     __ sxtbw($res$$Register, $res$$Register);
8878   %}
8879   ins_pipe(pipe_slow);
8880 %}
8881 
8882 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8883   predicate(needs_acquiring_load_exclusive(n));
8884   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8885   ins_cost(VOLATILE_REF_COST);
8886   effect(TEMP_DEF res, KILL cr);
8887   format %{
8888     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8889   %}
8890   ins_encode %{
8891     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8892                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8893                /*weak*/ false, $res$$Register);
8894     __ sxthw($res$$Register, $res$$Register);
8895   %}
8896   ins_pipe(pipe_slow);
8897 %}
8898 
8899 
8900 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8901   predicate(needs_acquiring_load_exclusive(n));
8902   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8903   ins_cost(VOLATILE_REF_COST);
8904   effect(TEMP_DEF res, KILL cr);
8905   format %{
8906     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8907   %}
8908   ins_encode %{
8909     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8910                Assembler::word, /*acquire*/ true, /*release*/ true,
8911                /*weak*/ false, $res$$Register);
8912   %}
8913   ins_pipe(pipe_slow);
8914 %}
8915 
8916 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8917   predicate(needs_acquiring_load_exclusive(n));
8918   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8919   ins_cost(VOLATILE_REF_COST);
8920   effect(TEMP_DEF res, KILL cr);
8921   format %{
8922     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8923   %}
8924   ins_encode %{
8925     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8926                Assembler::xword, /*acquire*/ true, /*release*/ true,
8927                /*weak*/ false, $res$$Register);
8928   %}
8929   ins_pipe(pipe_slow);
8930 %}
8931 
8932 
8933 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8934   predicate(needs_acquiring_load_exclusive(n));
8935   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8936   ins_cost(VOLATILE_REF_COST);
8937   effect(TEMP_DEF res, KILL cr);
8938   format %{
8939     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8940   %}
8941   ins_encode %{
8942     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8943                Assembler::word, /*acquire*/ true, /*release*/ true,
8944                /*weak*/ false, $res$$Register);
8945   %}
8946   ins_pipe(pipe_slow);
8947 %}
8948 
8949 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8950   predicate(needs_acquiring_load_exclusive(n));
8951   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8952   ins_cost(VOLATILE_REF_COST);
8953   effect(TEMP_DEF res, KILL cr);
8954   format %{
8955     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8956   %}
8957   ins_encode %{
8958     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8959                Assembler::xword, /*acquire*/ true, /*release*/ true,
8960                /*weak*/ false, $res$$Register);
8961   %}
8962   ins_pipe(pipe_slow);
8963 %}
8964 
8965 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8966   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8967   ins_cost(2 * VOLATILE_REF_COST);
8968   effect(KILL cr);
8969   format %{
8970     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8971     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8972   %}
8973   ins_encode %{
8974     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8975                Assembler::byte, /*acquire*/ false, /*release*/ true,
8976                /*weak*/ true, noreg);
8977     __ csetw($res$$Register, Assembler::EQ);
8978   %}
8979   ins_pipe(pipe_slow);
8980 %}
8981 
8982 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8983   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8984   ins_cost(2 * VOLATILE_REF_COST);
8985   effect(KILL cr);
8986   format %{
8987     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8988     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8989   %}
8990   ins_encode %{
8991     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8992                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8993                /*weak*/ true, noreg);
8994     __ csetw($res$$Register, Assembler::EQ);
8995   %}
8996   ins_pipe(pipe_slow);
8997 %}
8998 
8999 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9000   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9001   ins_cost(2 * VOLATILE_REF_COST);
9002   effect(KILL cr);
9003   format %{
9004     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9005     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9006   %}
9007   ins_encode %{
9008     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9009                Assembler::word, /*acquire*/ false, /*release*/ true,
9010                /*weak*/ true, noreg);
9011     __ csetw($res$$Register, Assembler::EQ);
9012   %}
9013   ins_pipe(pipe_slow);
9014 %}
9015 
9016 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9017   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9018   ins_cost(2 * VOLATILE_REF_COST);
9019   effect(KILL cr);
9020   format %{
9021     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9022     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9023   %}
9024   ins_encode %{
9025     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9026                Assembler::xword, /*acquire*/ false, /*release*/ true,
9027                /*weak*/ true, noreg);
9028     __ csetw($res$$Register, Assembler::EQ);
9029   %}
9030   ins_pipe(pipe_slow);
9031 %}
9032 
9033 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9034   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9035   ins_cost(2 * VOLATILE_REF_COST);
9036   effect(KILL cr);
9037   format %{
9038     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9039     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9040   %}
9041   ins_encode %{
9042     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9043                Assembler::word, /*acquire*/ false, /*release*/ true,
9044                /*weak*/ true, noreg);
9045     __ csetw($res$$Register, Assembler::EQ);
9046   %}
9047   ins_pipe(pipe_slow);
9048 %}
9049 
9050 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9051   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9052   ins_cost(2 * VOLATILE_REF_COST);
9053   effect(KILL cr);
9054   format %{
9055     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9056     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9057   %}
9058   ins_encode %{
9059     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9060                Assembler::xword, /*acquire*/ false, /*release*/ true,
9061                /*weak*/ true, noreg);
9062     __ csetw($res$$Register, Assembler::EQ);
9063   %}
9064   ins_pipe(pipe_slow);
9065 %}
9066 
9067 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9068   predicate(needs_acquiring_load_exclusive(n));
9069   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9070   ins_cost(VOLATILE_REF_COST);
9071   effect(KILL cr);
9072   format %{
9073     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9074     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9075   %}
9076   ins_encode %{
9077     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9078                Assembler::byte, /*acquire*/ true, /*release*/ true,
9079                /*weak*/ true, noreg);
9080     __ csetw($res$$Register, Assembler::EQ);
9081   %}
9082   ins_pipe(pipe_slow);
9083 %}
9084 
9085 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9086   predicate(needs_acquiring_load_exclusive(n));
9087   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9088   ins_cost(VOLATILE_REF_COST);
9089   effect(KILL cr);
9090   format %{
9091     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9092     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9093   %}
9094   ins_encode %{
9095     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9096                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9097                /*weak*/ true, noreg);
9098     __ csetw($res$$Register, Assembler::EQ);
9099   %}
9100   ins_pipe(pipe_slow);
9101 %}
9102 
9103 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9104   predicate(needs_acquiring_load_exclusive(n));
9105   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9106   ins_cost(VOLATILE_REF_COST);
9107   effect(KILL cr);
9108   format %{
9109     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9110     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9111   %}
9112   ins_encode %{
9113     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9114                Assembler::word, /*acquire*/ true, /*release*/ true,
9115                /*weak*/ true, noreg);
9116     __ csetw($res$$Register, Assembler::EQ);
9117   %}
9118   ins_pipe(pipe_slow);
9119 %}
9120 
9121 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9122   predicate(needs_acquiring_load_exclusive(n));
9123   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9124   ins_cost(VOLATILE_REF_COST);
9125   effect(KILL cr);
9126   format %{
9127     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9128     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9129   %}
9130   ins_encode %{
9131     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9132                Assembler::xword, /*acquire*/ true, /*release*/ true,
9133                /*weak*/ true, noreg);
9134     __ csetw($res$$Register, Assembler::EQ);
9135   %}
9136   ins_pipe(pipe_slow);
9137 %}
9138 
9139 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9140   predicate(needs_acquiring_load_exclusive(n));
9141   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9142   ins_cost(VOLATILE_REF_COST);
9143   effect(KILL cr);
9144   format %{
9145     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9146     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9147   %}
9148   ins_encode %{
9149     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9150                Assembler::word, /*acquire*/ true, /*release*/ true,
9151                /*weak*/ true, noreg);
9152     __ csetw($res$$Register, Assembler::EQ);
9153   %}
9154   ins_pipe(pipe_slow);
9155 %}
9156 
9157 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9158   predicate(needs_acquiring_load_exclusive(n));
9159   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9160   ins_cost(VOLATILE_REF_COST);
9161   effect(KILL cr);
9162   format %{
9163     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9164     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9165   %}
9166   ins_encode %{
9167     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9168                Assembler::xword, /*acquire*/ true, /*release*/ true,
9169                /*weak*/ true, noreg);
9170     __ csetw($res$$Register, Assembler::EQ);
9171   %}
9172   ins_pipe(pipe_slow);
9173 %}
9174 
9175 // END This section of the file is automatically generated. Do not edit --------------
9176 // ---------------------------------------------------------------------
9177 
9178 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9179   match(Set prev (GetAndSetI mem newv));
9180   ins_cost(2 * VOLATILE_REF_COST);
9181   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9182   ins_encode %{
9183     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9184   %}
9185   ins_pipe(pipe_serial);
9186 %}
9187 
9188 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9189   match(Set prev (GetAndSetL mem newv));
9190   ins_cost(2 * VOLATILE_REF_COST);
9191   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9192   ins_encode %{
9193     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9194   %}
9195   ins_pipe(pipe_serial);
9196 %}
9197 
9198 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9199   match(Set prev (GetAndSetN mem newv));
9200   ins_cost(2 * VOLATILE_REF_COST);
9201   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9202   ins_encode %{
9203     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9204   %}
9205   ins_pipe(pipe_serial);
9206 %}
9207 
9208 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9209   match(Set prev (GetAndSetP mem newv));
9210   ins_cost(2 * VOLATILE_REF_COST);
9211   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9212   ins_encode %{
9213     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9214   %}
9215   ins_pipe(pipe_serial);
9216 %}
9217 
9218 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9219   predicate(needs_acquiring_load_exclusive(n));
9220   match(Set prev (GetAndSetI mem newv));
9221   ins_cost(VOLATILE_REF_COST);
9222   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9223   ins_encode %{
9224     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9225   %}
9226   ins_pipe(pipe_serial);
9227 %}
9228 
9229 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9230   predicate(needs_acquiring_load_exclusive(n));
9231   match(Set prev (GetAndSetL mem newv));
9232   ins_cost(VOLATILE_REF_COST);
9233   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9234   ins_encode %{
9235     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9236   %}
9237   ins_pipe(pipe_serial);
9238 %}
9239 
9240 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9241   predicate(needs_acquiring_load_exclusive(n));
9242   match(Set prev (GetAndSetN mem newv));
9243   ins_cost(VOLATILE_REF_COST);
9244   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9245   ins_encode %{
9246     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9247   %}
9248   ins_pipe(pipe_serial);
9249 %}
9250 
9251 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9252   predicate(needs_acquiring_load_exclusive(n));
9253   match(Set prev (GetAndSetP mem newv));
9254   ins_cost(VOLATILE_REF_COST);
9255   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9256   ins_encode %{
9257     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9258   %}
9259   ins_pipe(pipe_serial);
9260 %}
9261 
9262 
9263 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9264   match(Set newval (GetAndAddL mem incr));
9265   ins_cost(2 * VOLATILE_REF_COST + 1);
9266   format %{ "get_and_addL $newval, [$mem], $incr" %}
9267   ins_encode %{
9268     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9269   %}
9270   ins_pipe(pipe_serial);
9271 %}
9272 
9273 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9274   predicate(n->as_LoadStore()->result_not_used());
9275   match(Set dummy (GetAndAddL mem incr));
9276   ins_cost(2 * VOLATILE_REF_COST);
9277   format %{ "get_and_addL [$mem], $incr" %}
9278   ins_encode %{
9279     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9280   %}
9281   ins_pipe(pipe_serial);
9282 %}
9283 
9284 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9285   match(Set newval (GetAndAddL mem incr));
9286   ins_cost(2 * VOLATILE_REF_COST + 1);
9287   format %{ "get_and_addL $newval, [$mem], $incr" %}
9288   ins_encode %{
9289     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9290   %}
9291   ins_pipe(pipe_serial);
9292 %}
9293 
9294 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9295   predicate(n->as_LoadStore()->result_not_used());
9296   match(Set dummy (GetAndAddL mem incr));
9297   ins_cost(2 * VOLATILE_REF_COST);
9298   format %{ "get_and_addL [$mem], $incr" %}
9299   ins_encode %{
9300     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9301   %}
9302   ins_pipe(pipe_serial);
9303 %}
9304 
9305 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9306   match(Set newval (GetAndAddI mem incr));
9307   ins_cost(2 * VOLATILE_REF_COST + 1);
9308   format %{ "get_and_addI $newval, [$mem], $incr" %}
9309   ins_encode %{
9310     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9311   %}
9312   ins_pipe(pipe_serial);
9313 %}
9314 
9315 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9316   predicate(n->as_LoadStore()->result_not_used());
9317   match(Set dummy (GetAndAddI mem incr));
9318   ins_cost(2 * VOLATILE_REF_COST);
9319   format %{ "get_and_addI [$mem], $incr" %}
9320   ins_encode %{
9321     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9322   %}
9323   ins_pipe(pipe_serial);
9324 %}
9325 
9326 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9327   match(Set newval (GetAndAddI mem incr));
9328   ins_cost(2 * VOLATILE_REF_COST + 1);
9329   format %{ "get_and_addI $newval, [$mem], $incr" %}
9330   ins_encode %{
9331     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9332   %}
9333   ins_pipe(pipe_serial);
9334 %}
9335 
9336 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9337   predicate(n->as_LoadStore()->result_not_used());
9338   match(Set dummy (GetAndAddI mem incr));
9339   ins_cost(2 * VOLATILE_REF_COST);
9340   format %{ "get_and_addI [$mem], $incr" %}
9341   ins_encode %{
9342     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9343   %}
9344   ins_pipe(pipe_serial);
9345 %}
9346 
9347 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9348   predicate(needs_acquiring_load_exclusive(n));
9349   match(Set newval (GetAndAddL mem incr));
9350   ins_cost(VOLATILE_REF_COST + 1);
9351   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9352   ins_encode %{
9353     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9354   %}
9355   ins_pipe(pipe_serial);
9356 %}
9357 
9358 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9359   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9360   match(Set dummy (GetAndAddL mem incr));
9361   ins_cost(VOLATILE_REF_COST);
9362   format %{ "get_and_addL_acq [$mem], $incr" %}
9363   ins_encode %{
9364     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9365   %}
9366   ins_pipe(pipe_serial);
9367 %}
9368 
9369 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9370   predicate(needs_acquiring_load_exclusive(n));
9371   match(Set newval (GetAndAddL mem incr));
9372   ins_cost(VOLATILE_REF_COST + 1);
9373   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9374   ins_encode %{
9375     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9376   %}
9377   ins_pipe(pipe_serial);
9378 %}
9379 
9380 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9381   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9382   match(Set dummy (GetAndAddL mem incr));
9383   ins_cost(VOLATILE_REF_COST);
9384   format %{ "get_and_addL_acq [$mem], $incr" %}
9385   ins_encode %{
9386     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9387   %}
9388   ins_pipe(pipe_serial);
9389 %}
9390 
9391 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9392   predicate(needs_acquiring_load_exclusive(n));
9393   match(Set newval (GetAndAddI mem incr));
9394   ins_cost(VOLATILE_REF_COST + 1);
9395   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9396   ins_encode %{
9397     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9398   %}
9399   ins_pipe(pipe_serial);
9400 %}
9401 
9402 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9403   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9404   match(Set dummy (GetAndAddI mem incr));
9405   ins_cost(VOLATILE_REF_COST);
9406   format %{ "get_and_addI_acq [$mem], $incr" %}
9407   ins_encode %{
9408     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9409   %}
9410   ins_pipe(pipe_serial);
9411 %}
9412 
9413 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9414   predicate(needs_acquiring_load_exclusive(n));
9415   match(Set newval (GetAndAddI mem incr));
9416   ins_cost(VOLATILE_REF_COST + 1);
9417   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9418   ins_encode %{
9419     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9420   %}
9421   ins_pipe(pipe_serial);
9422 %}
9423 
9424 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9425   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9426   match(Set dummy (GetAndAddI mem incr));
9427   ins_cost(VOLATILE_REF_COST);
9428   format %{ "get_and_addI_acq [$mem], $incr" %}
9429   ins_encode %{
9430     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9431   %}
9432   ins_pipe(pipe_serial);
9433 %}
9434 
9435 // Manifest a CmpL result in an integer register.
9436 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9437 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9438 %{
9439   match(Set dst (CmpL3 src1 src2));
9440   effect(KILL flags);
9441 
9442   ins_cost(INSN_COST * 6);
9443   format %{
9444       "cmp $src1, $src2"
9445       "csetw $dst, ne"
9446       "cnegw $dst, lt"
9447   %}
9448   // format %{ "CmpL3 $dst, $src1, $src2" %}
9449   ins_encode %{
9450     __ cmp($src1$$Register, $src2$$Register);
9451     __ csetw($dst$$Register, Assembler::NE);
9452     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9453   %}
9454 
9455   ins_pipe(pipe_class_default);
9456 %}
9457 
9458 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9459 %{
9460   match(Set dst (CmpL3 src1 src2));
9461   effect(KILL flags);
9462 
9463   ins_cost(INSN_COST * 6);
9464   format %{
9465       "cmp $src1, $src2"
9466       "csetw $dst, ne"
9467       "cnegw $dst, lt"
9468   %}
9469   ins_encode %{
9470     int32_t con = (int32_t)$src2$$constant;
9471      if (con < 0) {
9472       __ adds(zr, $src1$$Register, -con);
9473     } else {
9474       __ subs(zr, $src1$$Register, con);
9475     }
9476     __ csetw($dst$$Register, Assembler::NE);
9477     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9478   %}
9479 
9480   ins_pipe(pipe_class_default);
9481 %}
9482 
9483 // ============================================================================
9484 // Conditional Move Instructions
9485 
9486 // n.b. we have identical rules for both a signed compare op (cmpOp)
9487 // and an unsigned compare op (cmpOpU). it would be nice if we could
9488 // define an op class which merged both inputs and use it to type the
9489 // argument to a single rule. unfortunatelyt his fails because the
9490 // opclass does not live up to the COND_INTER interface of its
9491 // component operands. When the generic code tries to negate the
9492 // operand it ends up running the generci Machoper::negate method
9493 // which throws a ShouldNotHappen. So, we have to provide two flavours
9494 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9495 
9496 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9497   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9498 
9499   ins_cost(INSN_COST * 2);
9500   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9501 
9502   ins_encode %{
9503     __ cselw(as_Register($dst$$reg),
9504              as_Register($src2$$reg),
9505              as_Register($src1$$reg),
9506              (Assembler::Condition)$cmp$$cmpcode);
9507   %}
9508 
9509   ins_pipe(icond_reg_reg);
9510 %}
9511 
9512 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9513   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9514 
9515   ins_cost(INSN_COST * 2);
9516   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9517 
9518   ins_encode %{
9519     __ cselw(as_Register($dst$$reg),
9520              as_Register($src2$$reg),
9521              as_Register($src1$$reg),
9522              (Assembler::Condition)$cmp$$cmpcode);
9523   %}
9524 
9525   ins_pipe(icond_reg_reg);
9526 %}
9527 
9528 // special cases where one arg is zero
9529 
9530 // n.b. this is selected in preference to the rule above because it
9531 // avoids loading constant 0 into a source register
9532 
9533 // TODO
9534 // we ought only to be able to cull one of these variants as the ideal
9535 // transforms ought always to order the zero consistently (to left/right?)
9536 
9537 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9538   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9539 
9540   ins_cost(INSN_COST * 2);
9541   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9542 
9543   ins_encode %{
9544     __ cselw(as_Register($dst$$reg),
9545              as_Register($src$$reg),
9546              zr,
9547              (Assembler::Condition)$cmp$$cmpcode);
9548   %}
9549 
9550   ins_pipe(icond_reg);
9551 %}
9552 
9553 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9554   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9555 
9556   ins_cost(INSN_COST * 2);
9557   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9558 
9559   ins_encode %{
9560     __ cselw(as_Register($dst$$reg),
9561              as_Register($src$$reg),
9562              zr,
9563              (Assembler::Condition)$cmp$$cmpcode);
9564   %}
9565 
9566   ins_pipe(icond_reg);
9567 %}
9568 
9569 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9570   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9571 
9572   ins_cost(INSN_COST * 2);
9573   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9574 
9575   ins_encode %{
9576     __ cselw(as_Register($dst$$reg),
9577              zr,
9578              as_Register($src$$reg),
9579              (Assembler::Condition)$cmp$$cmpcode);
9580   %}
9581 
9582   ins_pipe(icond_reg);
9583 %}
9584 
9585 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9586   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9587 
9588   ins_cost(INSN_COST * 2);
9589   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9590 
9591   ins_encode %{
9592     __ cselw(as_Register($dst$$reg),
9593              zr,
9594              as_Register($src$$reg),
9595              (Assembler::Condition)$cmp$$cmpcode);
9596   %}
9597 
9598   ins_pipe(icond_reg);
9599 %}
9600 
9601 // special case for creating a boolean 0 or 1
9602 
9603 // n.b. this is selected in preference to the rule above because it
9604 // avoids loading constants 0 and 1 into a source register
9605 
9606 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9607   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9608 
9609   ins_cost(INSN_COST * 2);
9610   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9611 
9612   ins_encode %{
9613     // equivalently
9614     // cset(as_Register($dst$$reg),
9615     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9616     __ csincw(as_Register($dst$$reg),
9617              zr,
9618              zr,
9619              (Assembler::Condition)$cmp$$cmpcode);
9620   %}
9621 
9622   ins_pipe(icond_none);
9623 %}
9624 
9625 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9626   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9627 
9628   ins_cost(INSN_COST * 2);
9629   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9630 
9631   ins_encode %{
9632     // equivalently
9633     // cset(as_Register($dst$$reg),
9634     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9635     __ csincw(as_Register($dst$$reg),
9636              zr,
9637              zr,
9638              (Assembler::Condition)$cmp$$cmpcode);
9639   %}
9640 
9641   ins_pipe(icond_none);
9642 %}
9643 
9644 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9645   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9646 
9647   ins_cost(INSN_COST * 2);
9648   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9649 
9650   ins_encode %{
9651     __ csel(as_Register($dst$$reg),
9652             as_Register($src2$$reg),
9653             as_Register($src1$$reg),
9654             (Assembler::Condition)$cmp$$cmpcode);
9655   %}
9656 
9657   ins_pipe(icond_reg_reg);
9658 %}
9659 
9660 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9661   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9662 
9663   ins_cost(INSN_COST * 2);
9664   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9665 
9666   ins_encode %{
9667     __ csel(as_Register($dst$$reg),
9668             as_Register($src2$$reg),
9669             as_Register($src1$$reg),
9670             (Assembler::Condition)$cmp$$cmpcode);
9671   %}
9672 
9673   ins_pipe(icond_reg_reg);
9674 %}
9675 
9676 // special cases where one arg is zero
9677 
9678 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9679   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9680 
9681   ins_cost(INSN_COST * 2);
9682   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9683 
9684   ins_encode %{
9685     __ csel(as_Register($dst$$reg),
9686             zr,
9687             as_Register($src$$reg),
9688             (Assembler::Condition)$cmp$$cmpcode);
9689   %}
9690 
9691   ins_pipe(icond_reg);
9692 %}
9693 
9694 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9695   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9696 
9697   ins_cost(INSN_COST * 2);
9698   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9699 
9700   ins_encode %{
9701     __ csel(as_Register($dst$$reg),
9702             zr,
9703             as_Register($src$$reg),
9704             (Assembler::Condition)$cmp$$cmpcode);
9705   %}
9706 
9707   ins_pipe(icond_reg);
9708 %}
9709 
9710 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9711   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9712 
9713   ins_cost(INSN_COST * 2);
9714   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9715 
9716   ins_encode %{
9717     __ csel(as_Register($dst$$reg),
9718             as_Register($src$$reg),
9719             zr,
9720             (Assembler::Condition)$cmp$$cmpcode);
9721   %}
9722 
9723   ins_pipe(icond_reg);
9724 %}
9725 
9726 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9727   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9728 
9729   ins_cost(INSN_COST * 2);
9730   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9731 
9732   ins_encode %{
9733     __ csel(as_Register($dst$$reg),
9734             as_Register($src$$reg),
9735             zr,
9736             (Assembler::Condition)$cmp$$cmpcode);
9737   %}
9738 
9739   ins_pipe(icond_reg);
9740 %}
9741 
9742 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9743   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9744 
9745   ins_cost(INSN_COST * 2);
9746   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9747 
9748   ins_encode %{
9749     __ csel(as_Register($dst$$reg),
9750             as_Register($src2$$reg),
9751             as_Register($src1$$reg),
9752             (Assembler::Condition)$cmp$$cmpcode);
9753   %}
9754 
9755   ins_pipe(icond_reg_reg);
9756 %}
9757 
9758 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9759   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9760 
9761   ins_cost(INSN_COST * 2);
9762   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9763 
9764   ins_encode %{
9765     __ csel(as_Register($dst$$reg),
9766             as_Register($src2$$reg),
9767             as_Register($src1$$reg),
9768             (Assembler::Condition)$cmp$$cmpcode);
9769   %}
9770 
9771   ins_pipe(icond_reg_reg);
9772 %}
9773 
9774 // special cases where one arg is zero
9775 
9776 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9777   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9778 
9779   ins_cost(INSN_COST * 2);
9780   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9781 
9782   ins_encode %{
9783     __ csel(as_Register($dst$$reg),
9784             zr,
9785             as_Register($src$$reg),
9786             (Assembler::Condition)$cmp$$cmpcode);
9787   %}
9788 
9789   ins_pipe(icond_reg);
9790 %}
9791 
9792 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9793   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9794 
9795   ins_cost(INSN_COST * 2);
9796   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9797 
9798   ins_encode %{
9799     __ csel(as_Register($dst$$reg),
9800             zr,
9801             as_Register($src$$reg),
9802             (Assembler::Condition)$cmp$$cmpcode);
9803   %}
9804 
9805   ins_pipe(icond_reg);
9806 %}
9807 
9808 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9809   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9810 
9811   ins_cost(INSN_COST * 2);
9812   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9813 
9814   ins_encode %{
9815     __ csel(as_Register($dst$$reg),
9816             as_Register($src$$reg),
9817             zr,
9818             (Assembler::Condition)$cmp$$cmpcode);
9819   %}
9820 
9821   ins_pipe(icond_reg);
9822 %}
9823 
9824 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9825   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9826 
9827   ins_cost(INSN_COST * 2);
9828   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9829 
9830   ins_encode %{
9831     __ csel(as_Register($dst$$reg),
9832             as_Register($src$$reg),
9833             zr,
9834             (Assembler::Condition)$cmp$$cmpcode);
9835   %}
9836 
9837   ins_pipe(icond_reg);
9838 %}
9839 
9840 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9841   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9842 
9843   ins_cost(INSN_COST * 2);
9844   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9845 
9846   ins_encode %{
9847     __ cselw(as_Register($dst$$reg),
9848              as_Register($src2$$reg),
9849              as_Register($src1$$reg),
9850              (Assembler::Condition)$cmp$$cmpcode);
9851   %}
9852 
9853   ins_pipe(icond_reg_reg);
9854 %}
9855 
9856 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9857   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9858 
9859   ins_cost(INSN_COST * 2);
9860   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9861 
9862   ins_encode %{
9863     __ cselw(as_Register($dst$$reg),
9864              as_Register($src2$$reg),
9865              as_Register($src1$$reg),
9866              (Assembler::Condition)$cmp$$cmpcode);
9867   %}
9868 
9869   ins_pipe(icond_reg_reg);
9870 %}
9871 
9872 // special cases where one arg is zero
9873 
9874 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9875   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9876 
9877   ins_cost(INSN_COST * 2);
9878   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9879 
9880   ins_encode %{
9881     __ cselw(as_Register($dst$$reg),
9882              zr,
9883              as_Register($src$$reg),
9884              (Assembler::Condition)$cmp$$cmpcode);
9885   %}
9886 
9887   ins_pipe(icond_reg);
9888 %}
9889 
9890 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9891   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9892 
9893   ins_cost(INSN_COST * 2);
9894   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9895 
9896   ins_encode %{
9897     __ cselw(as_Register($dst$$reg),
9898              zr,
9899              as_Register($src$$reg),
9900              (Assembler::Condition)$cmp$$cmpcode);
9901   %}
9902 
9903   ins_pipe(icond_reg);
9904 %}
9905 
9906 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9907   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9908 
9909   ins_cost(INSN_COST * 2);
9910   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9911 
9912   ins_encode %{
9913     __ cselw(as_Register($dst$$reg),
9914              as_Register($src$$reg),
9915              zr,
9916              (Assembler::Condition)$cmp$$cmpcode);
9917   %}
9918 
9919   ins_pipe(icond_reg);
9920 %}
9921 
9922 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9923   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9924 
9925   ins_cost(INSN_COST * 2);
9926   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9927 
9928   ins_encode %{
9929     __ cselw(as_Register($dst$$reg),
9930              as_Register($src$$reg),
9931              zr,
9932              (Assembler::Condition)$cmp$$cmpcode);
9933   %}
9934 
9935   ins_pipe(icond_reg);
9936 %}
9937 
9938 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9939 %{
9940   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9941 
9942   ins_cost(INSN_COST * 3);
9943 
9944   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9945   ins_encode %{
9946     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9947     __ fcsels(as_FloatRegister($dst$$reg),
9948               as_FloatRegister($src2$$reg),
9949               as_FloatRegister($src1$$reg),
9950               cond);
9951   %}
9952 
9953   ins_pipe(fp_cond_reg_reg_s);
9954 %}
9955 
9956 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9957 %{
9958   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9959 
9960   ins_cost(INSN_COST * 3);
9961 
9962   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9963   ins_encode %{
9964     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9965     __ fcsels(as_FloatRegister($dst$$reg),
9966               as_FloatRegister($src2$$reg),
9967               as_FloatRegister($src1$$reg),
9968               cond);
9969   %}
9970 
9971   ins_pipe(fp_cond_reg_reg_s);
9972 %}
9973 
9974 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9975 %{
9976   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9977 
9978   ins_cost(INSN_COST * 3);
9979 
9980   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9981   ins_encode %{
9982     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9983     __ fcseld(as_FloatRegister($dst$$reg),
9984               as_FloatRegister($src2$$reg),
9985               as_FloatRegister($src1$$reg),
9986               cond);
9987   %}
9988 
9989   ins_pipe(fp_cond_reg_reg_d);
9990 %}
9991 
9992 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9993 %{
9994   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9995 
9996   ins_cost(INSN_COST * 3);
9997 
9998   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9999   ins_encode %{
10000     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10001     __ fcseld(as_FloatRegister($dst$$reg),
10002               as_FloatRegister($src2$$reg),
10003               as_FloatRegister($src1$$reg),
10004               cond);
10005   %}
10006 
10007   ins_pipe(fp_cond_reg_reg_d);
10008 %}
10009 
10010 // ============================================================================
10011 // Arithmetic Instructions
10012 //
10013 
10014 // Integer Addition
10015 
10016 // TODO
10017 // these currently employ operations which do not set CR and hence are
10018 // not flagged as killing CR but we would like to isolate the cases
10019 // where we want to set flags from those where we don't. need to work
10020 // out how to do that.
10021 
10022 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10023   match(Set dst (AddI src1 src2));
10024 
10025   ins_cost(INSN_COST);
10026   format %{ "addw  $dst, $src1, $src2" %}
10027 
10028   ins_encode %{
10029     __ addw(as_Register($dst$$reg),
10030             as_Register($src1$$reg),
10031             as_Register($src2$$reg));
10032   %}
10033 
10034   ins_pipe(ialu_reg_reg);
10035 %}
10036 
10037 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10038   match(Set dst (AddI src1 src2));
10039 
10040   ins_cost(INSN_COST);
10041   format %{ "addw $dst, $src1, $src2" %}
10042 
10043   // use opcode to indicate that this is an add not a sub
10044   opcode(0x0);
10045 
10046   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10047 
10048   ins_pipe(ialu_reg_imm);
10049 %}
10050 
10051 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10052   match(Set dst (AddI (ConvL2I src1) src2));
10053 
10054   ins_cost(INSN_COST);
10055   format %{ "addw $dst, $src1, $src2" %}
10056 
10057   // use opcode to indicate that this is an add not a sub
10058   opcode(0x0);
10059 
10060   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10061 
10062   ins_pipe(ialu_reg_imm);
10063 %}
10064 
10065 // Pointer Addition
10066 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10067   match(Set dst (AddP src1 src2));
10068 
10069   ins_cost(INSN_COST);
10070   format %{ "add $dst, $src1, $src2\t# ptr" %}
10071 
10072   ins_encode %{
10073     __ add(as_Register($dst$$reg),
10074            as_Register($src1$$reg),
10075            as_Register($src2$$reg));
10076   %}
10077 
10078   ins_pipe(ialu_reg_reg);
10079 %}
10080 
10081 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10082   match(Set dst (AddP src1 (ConvI2L src2)));
10083 
10084   ins_cost(1.9 * INSN_COST);
10085   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10086 
10087   ins_encode %{
10088     __ add(as_Register($dst$$reg),
10089            as_Register($src1$$reg),
10090            as_Register($src2$$reg), ext::sxtw);
10091   %}
10092 
10093   ins_pipe(ialu_reg_reg);
10094 %}
10095 
10096 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10097   match(Set dst (AddP src1 (LShiftL src2 scale)));
10098 
10099   ins_cost(1.9 * INSN_COST);
10100   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10101 
10102   ins_encode %{
10103     __ lea(as_Register($dst$$reg),
10104            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10105                    Address::lsl($scale$$constant)));
10106   %}
10107 
10108   ins_pipe(ialu_reg_reg_shift);
10109 %}
10110 
10111 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10112   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10113 
10114   ins_cost(1.9 * INSN_COST);
10115   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10116 
10117   ins_encode %{
10118     __ lea(as_Register($dst$$reg),
10119            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10120                    Address::sxtw($scale$$constant)));
10121   %}
10122 
10123   ins_pipe(ialu_reg_reg_shift);
10124 %}
10125 
10126 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10127   match(Set dst (LShiftL (ConvI2L src) scale));
10128 
10129   ins_cost(INSN_COST);
10130   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10131 
10132   ins_encode %{
10133     __ sbfiz(as_Register($dst$$reg),
10134           as_Register($src$$reg),
10135           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10136   %}
10137 
10138   ins_pipe(ialu_reg_shift);
10139 %}
10140 
10141 // Pointer Immediate Addition
10142 // n.b. this needs to be more expensive than using an indirect memory
10143 // operand
10144 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10145   match(Set dst (AddP src1 src2));
10146 
10147   ins_cost(INSN_COST);
10148   format %{ "add $dst, $src1, $src2\t# ptr" %}
10149 
10150   // use opcode to indicate that this is an add not a sub
10151   opcode(0x0);
10152 
10153   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10154 
10155   ins_pipe(ialu_reg_imm);
10156 %}
10157 
10158 // Long Addition
10159 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10160 
10161   match(Set dst (AddL src1 src2));
10162 
10163   ins_cost(INSN_COST);
10164   format %{ "add  $dst, $src1, $src2" %}
10165 
10166   ins_encode %{
10167     __ add(as_Register($dst$$reg),
10168            as_Register($src1$$reg),
10169            as_Register($src2$$reg));
10170   %}
10171 
10172   ins_pipe(ialu_reg_reg);
10173 %}
10174 
10175 // No constant pool entries requiredLong Immediate Addition.
10176 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10177   match(Set dst (AddL src1 src2));
10178 
10179   ins_cost(INSN_COST);
10180   format %{ "add $dst, $src1, $src2" %}
10181 
10182   // use opcode to indicate that this is an add not a sub
10183   opcode(0x0);
10184 
10185   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10186 
10187   ins_pipe(ialu_reg_imm);
10188 %}
10189 
10190 // Integer Subtraction
10191 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10192   match(Set dst (SubI src1 src2));
10193 
10194   ins_cost(INSN_COST);
10195   format %{ "subw  $dst, $src1, $src2" %}
10196 
10197   ins_encode %{
10198     __ subw(as_Register($dst$$reg),
10199             as_Register($src1$$reg),
10200             as_Register($src2$$reg));
10201   %}
10202 
10203   ins_pipe(ialu_reg_reg);
10204 %}
10205 
10206 // Immediate Subtraction
10207 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10208   match(Set dst (SubI src1 src2));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "subw $dst, $src1, $src2" %}
10212 
10213   // use opcode to indicate that this is a sub not an add
10214   opcode(0x1);
10215 
10216   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10217 
10218   ins_pipe(ialu_reg_imm);
10219 %}
10220 
10221 // Long Subtraction
10222 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10223 
10224   match(Set dst (SubL src1 src2));
10225 
10226   ins_cost(INSN_COST);
10227   format %{ "sub  $dst, $src1, $src2" %}
10228 
10229   ins_encode %{
10230     __ sub(as_Register($dst$$reg),
10231            as_Register($src1$$reg),
10232            as_Register($src2$$reg));
10233   %}
10234 
10235   ins_pipe(ialu_reg_reg);
10236 %}
10237 
10238 // No constant pool entries requiredLong Immediate Subtraction.
10239 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10240   match(Set dst (SubL src1 src2));
10241 
10242   ins_cost(INSN_COST);
10243   format %{ "sub$dst, $src1, $src2" %}
10244 
10245   // use opcode to indicate that this is a sub not an add
10246   opcode(0x1);
10247 
10248   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10249 
10250   ins_pipe(ialu_reg_imm);
10251 %}
10252 
10253 // Integer Negation (special case for sub)
10254 
10255 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10256   match(Set dst (SubI zero src));
10257 
10258   ins_cost(INSN_COST);
10259   format %{ "negw $dst, $src\t# int" %}
10260 
10261   ins_encode %{
10262     __ negw(as_Register($dst$$reg),
10263             as_Register($src$$reg));
10264   %}
10265 
10266   ins_pipe(ialu_reg);
10267 %}
10268 
10269 // Long Negation
10270 
10271 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10272   match(Set dst (SubL zero src));
10273 
10274   ins_cost(INSN_COST);
10275   format %{ "neg $dst, $src\t# long" %}
10276 
10277   ins_encode %{
10278     __ neg(as_Register($dst$$reg),
10279            as_Register($src$$reg));
10280   %}
10281 
10282   ins_pipe(ialu_reg);
10283 %}
10284 
10285 // Integer Multiply
10286 
10287 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10288   match(Set dst (MulI src1 src2));
10289 
10290   ins_cost(INSN_COST * 3);
10291   format %{ "mulw  $dst, $src1, $src2" %}
10292 
10293   ins_encode %{
10294     __ mulw(as_Register($dst$$reg),
10295             as_Register($src1$$reg),
10296             as_Register($src2$$reg));
10297   %}
10298 
10299   ins_pipe(imul_reg_reg);
10300 %}
10301 
10302 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10303   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10304 
10305   ins_cost(INSN_COST * 3);
10306   format %{ "smull  $dst, $src1, $src2" %}
10307 
10308   ins_encode %{
10309     __ smull(as_Register($dst$$reg),
10310              as_Register($src1$$reg),
10311              as_Register($src2$$reg));
10312   %}
10313 
10314   ins_pipe(imul_reg_reg);
10315 %}
10316 
10317 // Long Multiply
10318 
10319 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10320   match(Set dst (MulL src1 src2));
10321 
10322   ins_cost(INSN_COST * 5);
10323   format %{ "mul  $dst, $src1, $src2" %}
10324 
10325   ins_encode %{
10326     __ mul(as_Register($dst$$reg),
10327            as_Register($src1$$reg),
10328            as_Register($src2$$reg));
10329   %}
10330 
10331   ins_pipe(lmul_reg_reg);
10332 %}
10333 
10334 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10335 %{
10336   match(Set dst (MulHiL src1 src2));
10337 
10338   ins_cost(INSN_COST * 7);
10339   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10340 
10341   ins_encode %{
10342     __ smulh(as_Register($dst$$reg),
10343              as_Register($src1$$reg),
10344              as_Register($src2$$reg));
10345   %}
10346 
10347   ins_pipe(lmul_reg_reg);
10348 %}
10349 
10350 // Combined Integer Multiply & Add/Sub
10351 
10352 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10353   match(Set dst (AddI src3 (MulI src1 src2)));
10354 
10355   ins_cost(INSN_COST * 3);
10356   format %{ "madd  $dst, $src1, $src2, $src3" %}
10357 
10358   ins_encode %{
10359     __ maddw(as_Register($dst$$reg),
10360              as_Register($src1$$reg),
10361              as_Register($src2$$reg),
10362              as_Register($src3$$reg));
10363   %}
10364 
10365   ins_pipe(imac_reg_reg);
10366 %}
10367 
10368 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10369   match(Set dst (SubI src3 (MulI src1 src2)));
10370 
10371   ins_cost(INSN_COST * 3);
10372   format %{ "msub  $dst, $src1, $src2, $src3" %}
10373 
10374   ins_encode %{
10375     __ msubw(as_Register($dst$$reg),
10376              as_Register($src1$$reg),
10377              as_Register($src2$$reg),
10378              as_Register($src3$$reg));
10379   %}
10380 
10381   ins_pipe(imac_reg_reg);
10382 %}
10383 
10384 // Combined Integer Multiply & Neg
10385 
10386 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10387   match(Set dst (MulI (SubI zero src1) src2));
10388   match(Set dst (MulI src1 (SubI zero src2)));
10389 
10390   ins_cost(INSN_COST * 3);
10391   format %{ "mneg  $dst, $src1, $src2" %}
10392 
10393   ins_encode %{
10394     __ mnegw(as_Register($dst$$reg),
10395              as_Register($src1$$reg),
10396              as_Register($src2$$reg));
10397   %}
10398 
10399   ins_pipe(imac_reg_reg);
10400 %}
10401 
10402 // Combined Long Multiply & Add/Sub
10403 
10404 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10405   match(Set dst (AddL src3 (MulL src1 src2)));
10406 
10407   ins_cost(INSN_COST * 5);
10408   format %{ "madd  $dst, $src1, $src2, $src3" %}
10409 
10410   ins_encode %{
10411     __ madd(as_Register($dst$$reg),
10412             as_Register($src1$$reg),
10413             as_Register($src2$$reg),
10414             as_Register($src3$$reg));
10415   %}
10416 
10417   ins_pipe(lmac_reg_reg);
10418 %}
10419 
10420 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10421   match(Set dst (SubL src3 (MulL src1 src2)));
10422 
10423   ins_cost(INSN_COST * 5);
10424   format %{ "msub  $dst, $src1, $src2, $src3" %}
10425 
10426   ins_encode %{
10427     __ msub(as_Register($dst$$reg),
10428             as_Register($src1$$reg),
10429             as_Register($src2$$reg),
10430             as_Register($src3$$reg));
10431   %}
10432 
10433   ins_pipe(lmac_reg_reg);
10434 %}
10435 
10436 // Combined Long Multiply & Neg
10437 
10438 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10439   match(Set dst (MulL (SubL zero src1) src2));
10440   match(Set dst (MulL src1 (SubL zero src2)));
10441 
10442   ins_cost(INSN_COST * 5);
10443   format %{ "mneg  $dst, $src1, $src2" %}
10444 
10445   ins_encode %{
10446     __ mneg(as_Register($dst$$reg),
10447             as_Register($src1$$reg),
10448             as_Register($src2$$reg));
10449   %}
10450 
10451   ins_pipe(lmac_reg_reg);
10452 %}
10453 
10454 // Integer Divide
10455 
10456 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10457   match(Set dst (DivI src1 src2));
10458 
10459   ins_cost(INSN_COST * 19);
10460   format %{ "sdivw  $dst, $src1, $src2" %}
10461 
10462   ins_encode(aarch64_enc_divw(dst, src1, src2));
10463   ins_pipe(idiv_reg_reg);
10464 %}
10465 
10466 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10467   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10468   ins_cost(INSN_COST);
10469   format %{ "lsrw $dst, $src1, $div1" %}
10470   ins_encode %{
10471     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10472   %}
10473   ins_pipe(ialu_reg_shift);
10474 %}
10475 
10476 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10477   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10478   ins_cost(INSN_COST);
10479   format %{ "addw $dst, $src, LSR $div1" %}
10480 
10481   ins_encode %{
10482     __ addw(as_Register($dst$$reg),
10483               as_Register($src$$reg),
10484               as_Register($src$$reg),
10485               Assembler::LSR, 31);
10486   %}
10487   ins_pipe(ialu_reg);
10488 %}
10489 
10490 // Long Divide
10491 
10492 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10493   match(Set dst (DivL src1 src2));
10494 
10495   ins_cost(INSN_COST * 35);
10496   format %{ "sdiv   $dst, $src1, $src2" %}
10497 
10498   ins_encode(aarch64_enc_div(dst, src1, src2));
10499   ins_pipe(ldiv_reg_reg);
10500 %}
10501 
10502 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10503   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10504   ins_cost(INSN_COST);
10505   format %{ "lsr $dst, $src1, $div1" %}
10506   ins_encode %{
10507     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10508   %}
10509   ins_pipe(ialu_reg_shift);
10510 %}
10511 
10512 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10513   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10514   ins_cost(INSN_COST);
10515   format %{ "add $dst, $src, $div1" %}
10516 
10517   ins_encode %{
10518     __ add(as_Register($dst$$reg),
10519               as_Register($src$$reg),
10520               as_Register($src$$reg),
10521               Assembler::LSR, 63);
10522   %}
10523   ins_pipe(ialu_reg);
10524 %}
10525 
10526 // Integer Remainder
10527 
10528 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10529   match(Set dst (ModI src1 src2));
10530 
10531   ins_cost(INSN_COST * 22);
10532   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10533             "msubw($dst, rscratch1, $src2, $src1" %}
10534 
10535   ins_encode(aarch64_enc_modw(dst, src1, src2));
10536   ins_pipe(idiv_reg_reg);
10537 %}
10538 
10539 // Long Remainder
10540 
10541 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10542   match(Set dst (ModL src1 src2));
10543 
10544   ins_cost(INSN_COST * 38);
10545   format %{ "sdiv   rscratch1, $src1, $src2\n"
10546             "msub($dst, rscratch1, $src2, $src1" %}
10547 
10548   ins_encode(aarch64_enc_mod(dst, src1, src2));
10549   ins_pipe(ldiv_reg_reg);
10550 %}
10551 
10552 // Integer Shifts
10553 
10554 // Shift Left Register
10555 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10556   match(Set dst (LShiftI src1 src2));
10557 
10558   ins_cost(INSN_COST * 2);
10559   format %{ "lslvw  $dst, $src1, $src2" %}
10560 
10561   ins_encode %{
10562     __ lslvw(as_Register($dst$$reg),
10563              as_Register($src1$$reg),
10564              as_Register($src2$$reg));
10565   %}
10566 
10567   ins_pipe(ialu_reg_reg_vshift);
10568 %}
10569 
10570 // Shift Left Immediate
10571 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10572   match(Set dst (LShiftI src1 src2));
10573 
10574   ins_cost(INSN_COST);
10575   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10576 
10577   ins_encode %{
10578     __ lslw(as_Register($dst$$reg),
10579             as_Register($src1$$reg),
10580             $src2$$constant & 0x1f);
10581   %}
10582 
10583   ins_pipe(ialu_reg_shift);
10584 %}
10585 
10586 // Shift Right Logical Register
10587 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10588   match(Set dst (URShiftI src1 src2));
10589 
10590   ins_cost(INSN_COST * 2);
10591   format %{ "lsrvw  $dst, $src1, $src2" %}
10592 
10593   ins_encode %{
10594     __ lsrvw(as_Register($dst$$reg),
10595              as_Register($src1$$reg),
10596              as_Register($src2$$reg));
10597   %}
10598 
10599   ins_pipe(ialu_reg_reg_vshift);
10600 %}
10601 
10602 // Shift Right Logical Immediate
10603 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10604   match(Set dst (URShiftI src1 src2));
10605 
10606   ins_cost(INSN_COST);
10607   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10608 
10609   ins_encode %{
10610     __ lsrw(as_Register($dst$$reg),
10611             as_Register($src1$$reg),
10612             $src2$$constant & 0x1f);
10613   %}
10614 
10615   ins_pipe(ialu_reg_shift);
10616 %}
10617 
10618 // Shift Right Arithmetic Register
10619 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10620   match(Set dst (RShiftI src1 src2));
10621 
10622   ins_cost(INSN_COST * 2);
10623   format %{ "asrvw  $dst, $src1, $src2" %}
10624 
10625   ins_encode %{
10626     __ asrvw(as_Register($dst$$reg),
10627              as_Register($src1$$reg),
10628              as_Register($src2$$reg));
10629   %}
10630 
10631   ins_pipe(ialu_reg_reg_vshift);
10632 %}
10633 
10634 // Shift Right Arithmetic Immediate
10635 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10636   match(Set dst (RShiftI src1 src2));
10637 
10638   ins_cost(INSN_COST);
10639   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10640 
10641   ins_encode %{
10642     __ asrw(as_Register($dst$$reg),
10643             as_Register($src1$$reg),
10644             $src2$$constant & 0x1f);
10645   %}
10646 
10647   ins_pipe(ialu_reg_shift);
10648 %}
10649 
10650 // Combined Int Mask and Right Shift (using UBFM)
10651 // TODO
10652 
10653 // Long Shifts
10654 
10655 // Shift Left Register
10656 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10657   match(Set dst (LShiftL src1 src2));
10658 
10659   ins_cost(INSN_COST * 2);
10660   format %{ "lslv  $dst, $src1, $src2" %}
10661 
10662   ins_encode %{
10663     __ lslv(as_Register($dst$$reg),
10664             as_Register($src1$$reg),
10665             as_Register($src2$$reg));
10666   %}
10667 
10668   ins_pipe(ialu_reg_reg_vshift);
10669 %}
10670 
10671 // Shift Left Immediate
10672 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10673   match(Set dst (LShiftL src1 src2));
10674 
10675   ins_cost(INSN_COST);
10676   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10677 
10678   ins_encode %{
10679     __ lsl(as_Register($dst$$reg),
10680             as_Register($src1$$reg),
10681             $src2$$constant & 0x3f);
10682   %}
10683 
10684   ins_pipe(ialu_reg_shift);
10685 %}
10686 
10687 // Shift Right Logical Register
10688 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10689   match(Set dst (URShiftL src1 src2));
10690 
10691   ins_cost(INSN_COST * 2);
10692   format %{ "lsrv  $dst, $src1, $src2" %}
10693 
10694   ins_encode %{
10695     __ lsrv(as_Register($dst$$reg),
10696             as_Register($src1$$reg),
10697             as_Register($src2$$reg));
10698   %}
10699 
10700   ins_pipe(ialu_reg_reg_vshift);
10701 %}
10702 
10703 // Shift Right Logical Immediate
10704 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10705   match(Set dst (URShiftL src1 src2));
10706 
10707   ins_cost(INSN_COST);
10708   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10709 
10710   ins_encode %{
10711     __ lsr(as_Register($dst$$reg),
10712            as_Register($src1$$reg),
10713            $src2$$constant & 0x3f);
10714   %}
10715 
10716   ins_pipe(ialu_reg_shift);
10717 %}
10718 
10719 // A special-case pattern for card table stores.
10720 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10721   match(Set dst (URShiftL (CastP2X src1) src2));
10722 
10723   ins_cost(INSN_COST);
10724   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10725 
10726   ins_encode %{
10727     __ lsr(as_Register($dst$$reg),
10728            as_Register($src1$$reg),
10729            $src2$$constant & 0x3f);
10730   %}
10731 
10732   ins_pipe(ialu_reg_shift);
10733 %}
10734 
10735 // Shift Right Arithmetic Register
10736 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10737   match(Set dst (RShiftL src1 src2));
10738 
10739   ins_cost(INSN_COST * 2);
10740   format %{ "asrv  $dst, $src1, $src2" %}
10741 
10742   ins_encode %{
10743     __ asrv(as_Register($dst$$reg),
10744             as_Register($src1$$reg),
10745             as_Register($src2$$reg));
10746   %}
10747 
10748   ins_pipe(ialu_reg_reg_vshift);
10749 %}
10750 
10751 // Shift Right Arithmetic Immediate
10752 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10753   match(Set dst (RShiftL src1 src2));
10754 
10755   ins_cost(INSN_COST);
10756   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10757 
10758   ins_encode %{
10759     __ asr(as_Register($dst$$reg),
10760            as_Register($src1$$reg),
10761            $src2$$constant & 0x3f);
10762   %}
10763 
10764   ins_pipe(ialu_reg_shift);
10765 %}
10766 
10767 // BEGIN This section of the file is automatically generated. Do not edit --------------
10768 
10769 instruct regL_not_reg(iRegLNoSp dst,
10770                          iRegL src1, immL_M1 m1,
10771                          rFlagsReg cr) %{
10772   match(Set dst (XorL src1 m1));
10773   ins_cost(INSN_COST);
10774   format %{ "eon  $dst, $src1, zr" %}
10775 
10776   ins_encode %{
10777     __ eon(as_Register($dst$$reg),
10778               as_Register($src1$$reg),
10779               zr,
10780               Assembler::LSL, 0);
10781   %}
10782 
10783   ins_pipe(ialu_reg);
10784 %}
10785 instruct regI_not_reg(iRegINoSp dst,
10786                          iRegIorL2I src1, immI_M1 m1,
10787                          rFlagsReg cr) %{
10788   match(Set dst (XorI src1 m1));
10789   ins_cost(INSN_COST);
10790   format %{ "eonw  $dst, $src1, zr" %}
10791 
10792   ins_encode %{
10793     __ eonw(as_Register($dst$$reg),
10794               as_Register($src1$$reg),
10795               zr,
10796               Assembler::LSL, 0);
10797   %}
10798 
10799   ins_pipe(ialu_reg);
10800 %}
10801 
10802 instruct AndI_reg_not_reg(iRegINoSp dst,
10803                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10804                          rFlagsReg cr) %{
10805   match(Set dst (AndI src1 (XorI src2 m1)));
10806   ins_cost(INSN_COST);
10807   format %{ "bicw  $dst, $src1, $src2" %}
10808 
10809   ins_encode %{
10810     __ bicw(as_Register($dst$$reg),
10811               as_Register($src1$$reg),
10812               as_Register($src2$$reg),
10813               Assembler::LSL, 0);
10814   %}
10815 
10816   ins_pipe(ialu_reg_reg);
10817 %}
10818 
10819 instruct AndL_reg_not_reg(iRegLNoSp dst,
10820                          iRegL src1, iRegL src2, immL_M1 m1,
10821                          rFlagsReg cr) %{
10822   match(Set dst (AndL src1 (XorL src2 m1)));
10823   ins_cost(INSN_COST);
10824   format %{ "bic  $dst, $src1, $src2" %}
10825 
10826   ins_encode %{
10827     __ bic(as_Register($dst$$reg),
10828               as_Register($src1$$reg),
10829               as_Register($src2$$reg),
10830               Assembler::LSL, 0);
10831   %}
10832 
10833   ins_pipe(ialu_reg_reg);
10834 %}
10835 
10836 instruct OrI_reg_not_reg(iRegINoSp dst,
10837                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10838                          rFlagsReg cr) %{
10839   match(Set dst (OrI src1 (XorI src2 m1)));
10840   ins_cost(INSN_COST);
10841   format %{ "ornw  $dst, $src1, $src2" %}
10842 
10843   ins_encode %{
10844     __ ornw(as_Register($dst$$reg),
10845               as_Register($src1$$reg),
10846               as_Register($src2$$reg),
10847               Assembler::LSL, 0);
10848   %}
10849 
10850   ins_pipe(ialu_reg_reg);
10851 %}
10852 
10853 instruct OrL_reg_not_reg(iRegLNoSp dst,
10854                          iRegL src1, iRegL src2, immL_M1 m1,
10855                          rFlagsReg cr) %{
10856   match(Set dst (OrL src1 (XorL src2 m1)));
10857   ins_cost(INSN_COST);
10858   format %{ "orn  $dst, $src1, $src2" %}
10859 
10860   ins_encode %{
10861     __ orn(as_Register($dst$$reg),
10862               as_Register($src1$$reg),
10863               as_Register($src2$$reg),
10864               Assembler::LSL, 0);
10865   %}
10866 
10867   ins_pipe(ialu_reg_reg);
10868 %}
10869 
10870 instruct XorI_reg_not_reg(iRegINoSp dst,
10871                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10872                          rFlagsReg cr) %{
10873   match(Set dst (XorI m1 (XorI src2 src1)));
10874   ins_cost(INSN_COST);
10875   format %{ "eonw  $dst, $src1, $src2" %}
10876 
10877   ins_encode %{
10878     __ eonw(as_Register($dst$$reg),
10879               as_Register($src1$$reg),
10880               as_Register($src2$$reg),
10881               Assembler::LSL, 0);
10882   %}
10883 
10884   ins_pipe(ialu_reg_reg);
10885 %}
10886 
10887 instruct XorL_reg_not_reg(iRegLNoSp dst,
10888                          iRegL src1, iRegL src2, immL_M1 m1,
10889                          rFlagsReg cr) %{
10890   match(Set dst (XorL m1 (XorL src2 src1)));
10891   ins_cost(INSN_COST);
10892   format %{ "eon  $dst, $src1, $src2" %}
10893 
10894   ins_encode %{
10895     __ eon(as_Register($dst$$reg),
10896               as_Register($src1$$reg),
10897               as_Register($src2$$reg),
10898               Assembler::LSL, 0);
10899   %}
10900 
10901   ins_pipe(ialu_reg_reg);
10902 %}
10903 
10904 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10905                          iRegIorL2I src1, iRegIorL2I src2,
10906                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10907   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10908   ins_cost(1.9 * INSN_COST);
10909   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10910 
10911   ins_encode %{
10912     __ bicw(as_Register($dst$$reg),
10913               as_Register($src1$$reg),
10914               as_Register($src2$$reg),
10915               Assembler::LSR,
10916               $src3$$constant & 0x1f);
10917   %}
10918 
10919   ins_pipe(ialu_reg_reg_shift);
10920 %}
10921 
10922 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10923                          iRegL src1, iRegL src2,
10924                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10925   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10926   ins_cost(1.9 * INSN_COST);
10927   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10928 
10929   ins_encode %{
10930     __ bic(as_Register($dst$$reg),
10931               as_Register($src1$$reg),
10932               as_Register($src2$$reg),
10933               Assembler::LSR,
10934               $src3$$constant & 0x3f);
10935   %}
10936 
10937   ins_pipe(ialu_reg_reg_shift);
10938 %}
10939 
10940 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10941                          iRegIorL2I src1, iRegIorL2I src2,
10942                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10943   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10944   ins_cost(1.9 * INSN_COST);
10945   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10946 
10947   ins_encode %{
10948     __ bicw(as_Register($dst$$reg),
10949               as_Register($src1$$reg),
10950               as_Register($src2$$reg),
10951               Assembler::ASR,
10952               $src3$$constant & 0x1f);
10953   %}
10954 
10955   ins_pipe(ialu_reg_reg_shift);
10956 %}
10957 
10958 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10959                          iRegL src1, iRegL src2,
10960                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10961   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10962   ins_cost(1.9 * INSN_COST);
10963   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10964 
10965   ins_encode %{
10966     __ bic(as_Register($dst$$reg),
10967               as_Register($src1$$reg),
10968               as_Register($src2$$reg),
10969               Assembler::ASR,
10970               $src3$$constant & 0x3f);
10971   %}
10972 
10973   ins_pipe(ialu_reg_reg_shift);
10974 %}
10975 
10976 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10977                          iRegIorL2I src1, iRegIorL2I src2,
10978                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10979   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10980   ins_cost(1.9 * INSN_COST);
10981   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10982 
10983   ins_encode %{
10984     __ bicw(as_Register($dst$$reg),
10985               as_Register($src1$$reg),
10986               as_Register($src2$$reg),
10987               Assembler::LSL,
10988               $src3$$constant & 0x1f);
10989   %}
10990 
10991   ins_pipe(ialu_reg_reg_shift);
10992 %}
10993 
10994 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10995                          iRegL src1, iRegL src2,
10996                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10997   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10998   ins_cost(1.9 * INSN_COST);
10999   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11000 
11001   ins_encode %{
11002     __ bic(as_Register($dst$$reg),
11003               as_Register($src1$$reg),
11004               as_Register($src2$$reg),
11005               Assembler::LSL,
11006               $src3$$constant & 0x3f);
11007   %}
11008 
11009   ins_pipe(ialu_reg_reg_shift);
11010 %}
11011 
11012 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11013                          iRegIorL2I src1, iRegIorL2I src2,
11014                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11015   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11016   ins_cost(1.9 * INSN_COST);
11017   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11018 
11019   ins_encode %{
11020     __ eonw(as_Register($dst$$reg),
11021               as_Register($src1$$reg),
11022               as_Register($src2$$reg),
11023               Assembler::LSR,
11024               $src3$$constant & 0x1f);
11025   %}
11026 
11027   ins_pipe(ialu_reg_reg_shift);
11028 %}
11029 
11030 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11031                          iRegL src1, iRegL src2,
11032                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11033   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11034   ins_cost(1.9 * INSN_COST);
11035   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11036 
11037   ins_encode %{
11038     __ eon(as_Register($dst$$reg),
11039               as_Register($src1$$reg),
11040               as_Register($src2$$reg),
11041               Assembler::LSR,
11042               $src3$$constant & 0x3f);
11043   %}
11044 
11045   ins_pipe(ialu_reg_reg_shift);
11046 %}
11047 
11048 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11049                          iRegIorL2I src1, iRegIorL2I src2,
11050                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11051   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11052   ins_cost(1.9 * INSN_COST);
11053   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11054 
11055   ins_encode %{
11056     __ eonw(as_Register($dst$$reg),
11057               as_Register($src1$$reg),
11058               as_Register($src2$$reg),
11059               Assembler::ASR,
11060               $src3$$constant & 0x1f);
11061   %}
11062 
11063   ins_pipe(ialu_reg_reg_shift);
11064 %}
11065 
11066 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11067                          iRegL src1, iRegL src2,
11068                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11069   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11070   ins_cost(1.9 * INSN_COST);
11071   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11072 
11073   ins_encode %{
11074     __ eon(as_Register($dst$$reg),
11075               as_Register($src1$$reg),
11076               as_Register($src2$$reg),
11077               Assembler::ASR,
11078               $src3$$constant & 0x3f);
11079   %}
11080 
11081   ins_pipe(ialu_reg_reg_shift);
11082 %}
11083 
11084 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11085                          iRegIorL2I src1, iRegIorL2I src2,
11086                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11087   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11088   ins_cost(1.9 * INSN_COST);
11089   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11090 
11091   ins_encode %{
11092     __ eonw(as_Register($dst$$reg),
11093               as_Register($src1$$reg),
11094               as_Register($src2$$reg),
11095               Assembler::LSL,
11096               $src3$$constant & 0x1f);
11097   %}
11098 
11099   ins_pipe(ialu_reg_reg_shift);
11100 %}
11101 
11102 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11103                          iRegL src1, iRegL src2,
11104                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11105   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11106   ins_cost(1.9 * INSN_COST);
11107   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11108 
11109   ins_encode %{
11110     __ eon(as_Register($dst$$reg),
11111               as_Register($src1$$reg),
11112               as_Register($src2$$reg),
11113               Assembler::LSL,
11114               $src3$$constant & 0x3f);
11115   %}
11116 
11117   ins_pipe(ialu_reg_reg_shift);
11118 %}
11119 
11120 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11121                          iRegIorL2I src1, iRegIorL2I src2,
11122                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11123   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11124   ins_cost(1.9 * INSN_COST);
11125   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11126 
11127   ins_encode %{
11128     __ ornw(as_Register($dst$$reg),
11129               as_Register($src1$$reg),
11130               as_Register($src2$$reg),
11131               Assembler::LSR,
11132               $src3$$constant & 0x1f);
11133   %}
11134 
11135   ins_pipe(ialu_reg_reg_shift);
11136 %}
11137 
11138 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11139                          iRegL src1, iRegL src2,
11140                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11141   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11142   ins_cost(1.9 * INSN_COST);
11143   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11144 
11145   ins_encode %{
11146     __ orn(as_Register($dst$$reg),
11147               as_Register($src1$$reg),
11148               as_Register($src2$$reg),
11149               Assembler::LSR,
11150               $src3$$constant & 0x3f);
11151   %}
11152 
11153   ins_pipe(ialu_reg_reg_shift);
11154 %}
11155 
11156 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11157                          iRegIorL2I src1, iRegIorL2I src2,
11158                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11159   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11160   ins_cost(1.9 * INSN_COST);
11161   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11162 
11163   ins_encode %{
11164     __ ornw(as_Register($dst$$reg),
11165               as_Register($src1$$reg),
11166               as_Register($src2$$reg),
11167               Assembler::ASR,
11168               $src3$$constant & 0x1f);
11169   %}
11170 
11171   ins_pipe(ialu_reg_reg_shift);
11172 %}
11173 
11174 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11175                          iRegL src1, iRegL src2,
11176                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11177   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11178   ins_cost(1.9 * INSN_COST);
11179   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11180 
11181   ins_encode %{
11182     __ orn(as_Register($dst$$reg),
11183               as_Register($src1$$reg),
11184               as_Register($src2$$reg),
11185               Assembler::ASR,
11186               $src3$$constant & 0x3f);
11187   %}
11188 
11189   ins_pipe(ialu_reg_reg_shift);
11190 %}
11191 
11192 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11193                          iRegIorL2I src1, iRegIorL2I src2,
11194                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11195   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11196   ins_cost(1.9 * INSN_COST);
11197   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11198 
11199   ins_encode %{
11200     __ ornw(as_Register($dst$$reg),
11201               as_Register($src1$$reg),
11202               as_Register($src2$$reg),
11203               Assembler::LSL,
11204               $src3$$constant & 0x1f);
11205   %}
11206 
11207   ins_pipe(ialu_reg_reg_shift);
11208 %}
11209 
11210 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11211                          iRegL src1, iRegL src2,
11212                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11213   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11214   ins_cost(1.9 * INSN_COST);
11215   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11216 
11217   ins_encode %{
11218     __ orn(as_Register($dst$$reg),
11219               as_Register($src1$$reg),
11220               as_Register($src2$$reg),
11221               Assembler::LSL,
11222               $src3$$constant & 0x3f);
11223   %}
11224 
11225   ins_pipe(ialu_reg_reg_shift);
11226 %}
11227 
11228 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11229                          iRegIorL2I src1, iRegIorL2I src2,
11230                          immI src3, rFlagsReg cr) %{
11231   match(Set dst (AndI src1 (URShiftI src2 src3)));
11232 
11233   ins_cost(1.9 * INSN_COST);
11234   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11235 
11236   ins_encode %{
11237     __ andw(as_Register($dst$$reg),
11238               as_Register($src1$$reg),
11239               as_Register($src2$$reg),
11240               Assembler::LSR,
11241               $src3$$constant & 0x1f);
11242   %}
11243 
11244   ins_pipe(ialu_reg_reg_shift);
11245 %}
11246 
11247 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11248                          iRegL src1, iRegL src2,
11249                          immI src3, rFlagsReg cr) %{
11250   match(Set dst (AndL src1 (URShiftL src2 src3)));
11251 
11252   ins_cost(1.9 * INSN_COST);
11253   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11254 
11255   ins_encode %{
11256     __ andr(as_Register($dst$$reg),
11257               as_Register($src1$$reg),
11258               as_Register($src2$$reg),
11259               Assembler::LSR,
11260               $src3$$constant & 0x3f);
11261   %}
11262 
11263   ins_pipe(ialu_reg_reg_shift);
11264 %}
11265 
11266 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11267                          iRegIorL2I src1, iRegIorL2I src2,
11268                          immI src3, rFlagsReg cr) %{
11269   match(Set dst (AndI src1 (RShiftI src2 src3)));
11270 
11271   ins_cost(1.9 * INSN_COST);
11272   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11273 
11274   ins_encode %{
11275     __ andw(as_Register($dst$$reg),
11276               as_Register($src1$$reg),
11277               as_Register($src2$$reg),
11278               Assembler::ASR,
11279               $src3$$constant & 0x1f);
11280   %}
11281 
11282   ins_pipe(ialu_reg_reg_shift);
11283 %}
11284 
11285 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11286                          iRegL src1, iRegL src2,
11287                          immI src3, rFlagsReg cr) %{
11288   match(Set dst (AndL src1 (RShiftL src2 src3)));
11289 
11290   ins_cost(1.9 * INSN_COST);
11291   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11292 
11293   ins_encode %{
11294     __ andr(as_Register($dst$$reg),
11295               as_Register($src1$$reg),
11296               as_Register($src2$$reg),
11297               Assembler::ASR,
11298               $src3$$constant & 0x3f);
11299   %}
11300 
11301   ins_pipe(ialu_reg_reg_shift);
11302 %}
11303 
11304 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11305                          iRegIorL2I src1, iRegIorL2I src2,
11306                          immI src3, rFlagsReg cr) %{
11307   match(Set dst (AndI src1 (LShiftI src2 src3)));
11308 
11309   ins_cost(1.9 * INSN_COST);
11310   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11311 
11312   ins_encode %{
11313     __ andw(as_Register($dst$$reg),
11314               as_Register($src1$$reg),
11315               as_Register($src2$$reg),
11316               Assembler::LSL,
11317               $src3$$constant & 0x1f);
11318   %}
11319 
11320   ins_pipe(ialu_reg_reg_shift);
11321 %}
11322 
11323 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11324                          iRegL src1, iRegL src2,
11325                          immI src3, rFlagsReg cr) %{
11326   match(Set dst (AndL src1 (LShiftL src2 src3)));
11327 
11328   ins_cost(1.9 * INSN_COST);
11329   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11330 
11331   ins_encode %{
11332     __ andr(as_Register($dst$$reg),
11333               as_Register($src1$$reg),
11334               as_Register($src2$$reg),
11335               Assembler::LSL,
11336               $src3$$constant & 0x3f);
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg_shift);
11340 %}
11341 
11342 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11343                          iRegIorL2I src1, iRegIorL2I src2,
11344                          immI src3, rFlagsReg cr) %{
11345   match(Set dst (XorI src1 (URShiftI src2 src3)));
11346 
11347   ins_cost(1.9 * INSN_COST);
11348   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11349 
11350   ins_encode %{
11351     __ eorw(as_Register($dst$$reg),
11352               as_Register($src1$$reg),
11353               as_Register($src2$$reg),
11354               Assembler::LSR,
11355               $src3$$constant & 0x1f);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg_shift);
11359 %}
11360 
11361 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11362                          iRegL src1, iRegL src2,
11363                          immI src3, rFlagsReg cr) %{
11364   match(Set dst (XorL src1 (URShiftL src2 src3)));
11365 
11366   ins_cost(1.9 * INSN_COST);
11367   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11368 
11369   ins_encode %{
11370     __ eor(as_Register($dst$$reg),
11371               as_Register($src1$$reg),
11372               as_Register($src2$$reg),
11373               Assembler::LSR,
11374               $src3$$constant & 0x3f);
11375   %}
11376 
11377   ins_pipe(ialu_reg_reg_shift);
11378 %}
11379 
11380 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11381                          iRegIorL2I src1, iRegIorL2I src2,
11382                          immI src3, rFlagsReg cr) %{
11383   match(Set dst (XorI src1 (RShiftI src2 src3)));
11384 
11385   ins_cost(1.9 * INSN_COST);
11386   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11387 
11388   ins_encode %{
11389     __ eorw(as_Register($dst$$reg),
11390               as_Register($src1$$reg),
11391               as_Register($src2$$reg),
11392               Assembler::ASR,
11393               $src3$$constant & 0x1f);
11394   %}
11395 
11396   ins_pipe(ialu_reg_reg_shift);
11397 %}
11398 
11399 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11400                          iRegL src1, iRegL src2,
11401                          immI src3, rFlagsReg cr) %{
11402   match(Set dst (XorL src1 (RShiftL src2 src3)));
11403 
11404   ins_cost(1.9 * INSN_COST);
11405   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11406 
11407   ins_encode %{
11408     __ eor(as_Register($dst$$reg),
11409               as_Register($src1$$reg),
11410               as_Register($src2$$reg),
11411               Assembler::ASR,
11412               $src3$$constant & 0x3f);
11413   %}
11414 
11415   ins_pipe(ialu_reg_reg_shift);
11416 %}
11417 
11418 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11419                          iRegIorL2I src1, iRegIorL2I src2,
11420                          immI src3, rFlagsReg cr) %{
11421   match(Set dst (XorI src1 (LShiftI src2 src3)));
11422 
11423   ins_cost(1.9 * INSN_COST);
11424   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11425 
11426   ins_encode %{
11427     __ eorw(as_Register($dst$$reg),
11428               as_Register($src1$$reg),
11429               as_Register($src2$$reg),
11430               Assembler::LSL,
11431               $src3$$constant & 0x1f);
11432   %}
11433 
11434   ins_pipe(ialu_reg_reg_shift);
11435 %}
11436 
11437 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11438                          iRegL src1, iRegL src2,
11439                          immI src3, rFlagsReg cr) %{
11440   match(Set dst (XorL src1 (LShiftL src2 src3)));
11441 
11442   ins_cost(1.9 * INSN_COST);
11443   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11444 
11445   ins_encode %{
11446     __ eor(as_Register($dst$$reg),
11447               as_Register($src1$$reg),
11448               as_Register($src2$$reg),
11449               Assembler::LSL,
11450               $src3$$constant & 0x3f);
11451   %}
11452 
11453   ins_pipe(ialu_reg_reg_shift);
11454 %}
11455 
11456 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11457                          iRegIorL2I src1, iRegIorL2I src2,
11458                          immI src3, rFlagsReg cr) %{
11459   match(Set dst (OrI src1 (URShiftI src2 src3)));
11460 
11461   ins_cost(1.9 * INSN_COST);
11462   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11463 
11464   ins_encode %{
11465     __ orrw(as_Register($dst$$reg),
11466               as_Register($src1$$reg),
11467               as_Register($src2$$reg),
11468               Assembler::LSR,
11469               $src3$$constant & 0x1f);
11470   %}
11471 
11472   ins_pipe(ialu_reg_reg_shift);
11473 %}
11474 
11475 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11476                          iRegL src1, iRegL src2,
11477                          immI src3, rFlagsReg cr) %{
11478   match(Set dst (OrL src1 (URShiftL src2 src3)));
11479 
11480   ins_cost(1.9 * INSN_COST);
11481   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11482 
11483   ins_encode %{
11484     __ orr(as_Register($dst$$reg),
11485               as_Register($src1$$reg),
11486               as_Register($src2$$reg),
11487               Assembler::LSR,
11488               $src3$$constant & 0x3f);
11489   %}
11490 
11491   ins_pipe(ialu_reg_reg_shift);
11492 %}
11493 
11494 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11495                          iRegIorL2I src1, iRegIorL2I src2,
11496                          immI src3, rFlagsReg cr) %{
11497   match(Set dst (OrI src1 (RShiftI src2 src3)));
11498 
11499   ins_cost(1.9 * INSN_COST);
11500   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11501 
11502   ins_encode %{
11503     __ orrw(as_Register($dst$$reg),
11504               as_Register($src1$$reg),
11505               as_Register($src2$$reg),
11506               Assembler::ASR,
11507               $src3$$constant & 0x1f);
11508   %}
11509 
11510   ins_pipe(ialu_reg_reg_shift);
11511 %}
11512 
11513 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11514                          iRegL src1, iRegL src2,
11515                          immI src3, rFlagsReg cr) %{
11516   match(Set dst (OrL src1 (RShiftL src2 src3)));
11517 
11518   ins_cost(1.9 * INSN_COST);
11519   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11520 
11521   ins_encode %{
11522     __ orr(as_Register($dst$$reg),
11523               as_Register($src1$$reg),
11524               as_Register($src2$$reg),
11525               Assembler::ASR,
11526               $src3$$constant & 0x3f);
11527   %}
11528 
11529   ins_pipe(ialu_reg_reg_shift);
11530 %}
11531 
11532 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11533                          iRegIorL2I src1, iRegIorL2I src2,
11534                          immI src3, rFlagsReg cr) %{
11535   match(Set dst (OrI src1 (LShiftI src2 src3)));
11536 
11537   ins_cost(1.9 * INSN_COST);
11538   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11539 
11540   ins_encode %{
11541     __ orrw(as_Register($dst$$reg),
11542               as_Register($src1$$reg),
11543               as_Register($src2$$reg),
11544               Assembler::LSL,
11545               $src3$$constant & 0x1f);
11546   %}
11547 
11548   ins_pipe(ialu_reg_reg_shift);
11549 %}
11550 
11551 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11552                          iRegL src1, iRegL src2,
11553                          immI src3, rFlagsReg cr) %{
11554   match(Set dst (OrL src1 (LShiftL src2 src3)));
11555 
11556   ins_cost(1.9 * INSN_COST);
11557   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11558 
11559   ins_encode %{
11560     __ orr(as_Register($dst$$reg),
11561               as_Register($src1$$reg),
11562               as_Register($src2$$reg),
11563               Assembler::LSL,
11564               $src3$$constant & 0x3f);
11565   %}
11566 
11567   ins_pipe(ialu_reg_reg_shift);
11568 %}
11569 
11570 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11571                          iRegIorL2I src1, iRegIorL2I src2,
11572                          immI src3, rFlagsReg cr) %{
11573   match(Set dst (AddI src1 (URShiftI src2 src3)));
11574 
11575   ins_cost(1.9 * INSN_COST);
11576   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11577 
11578   ins_encode %{
11579     __ addw(as_Register($dst$$reg),
11580               as_Register($src1$$reg),
11581               as_Register($src2$$reg),
11582               Assembler::LSR,
11583               $src3$$constant & 0x1f);
11584   %}
11585 
11586   ins_pipe(ialu_reg_reg_shift);
11587 %}
11588 
11589 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11590                          iRegL src1, iRegL src2,
11591                          immI src3, rFlagsReg cr) %{
11592   match(Set dst (AddL src1 (URShiftL src2 src3)));
11593 
11594   ins_cost(1.9 * INSN_COST);
11595   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11596 
11597   ins_encode %{
11598     __ add(as_Register($dst$$reg),
11599               as_Register($src1$$reg),
11600               as_Register($src2$$reg),
11601               Assembler::LSR,
11602               $src3$$constant & 0x3f);
11603   %}
11604 
11605   ins_pipe(ialu_reg_reg_shift);
11606 %}
11607 
11608 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11609                          iRegIorL2I src1, iRegIorL2I src2,
11610                          immI src3, rFlagsReg cr) %{
11611   match(Set dst (AddI src1 (RShiftI src2 src3)));
11612 
11613   ins_cost(1.9 * INSN_COST);
11614   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11615 
11616   ins_encode %{
11617     __ addw(as_Register($dst$$reg),
11618               as_Register($src1$$reg),
11619               as_Register($src2$$reg),
11620               Assembler::ASR,
11621               $src3$$constant & 0x1f);
11622   %}
11623 
11624   ins_pipe(ialu_reg_reg_shift);
11625 %}
11626 
11627 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11628                          iRegL src1, iRegL src2,
11629                          immI src3, rFlagsReg cr) %{
11630   match(Set dst (AddL src1 (RShiftL src2 src3)));
11631 
11632   ins_cost(1.9 * INSN_COST);
11633   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11634 
11635   ins_encode %{
11636     __ add(as_Register($dst$$reg),
11637               as_Register($src1$$reg),
11638               as_Register($src2$$reg),
11639               Assembler::ASR,
11640               $src3$$constant & 0x3f);
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_shift);
11644 %}
11645 
11646 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11647                          iRegIorL2I src1, iRegIorL2I src2,
11648                          immI src3, rFlagsReg cr) %{
11649   match(Set dst (AddI src1 (LShiftI src2 src3)));
11650 
11651   ins_cost(1.9 * INSN_COST);
11652   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11653 
11654   ins_encode %{
11655     __ addw(as_Register($dst$$reg),
11656               as_Register($src1$$reg),
11657               as_Register($src2$$reg),
11658               Assembler::LSL,
11659               $src3$$constant & 0x1f);
11660   %}
11661 
11662   ins_pipe(ialu_reg_reg_shift);
11663 %}
11664 
11665 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11666                          iRegL src1, iRegL src2,
11667                          immI src3, rFlagsReg cr) %{
11668   match(Set dst (AddL src1 (LShiftL src2 src3)));
11669 
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11672 
11673   ins_encode %{
11674     __ add(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::LSL,
11678               $src3$$constant & 0x3f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11685                          iRegIorL2I src1, iRegIorL2I src2,
11686                          immI src3, rFlagsReg cr) %{
11687   match(Set dst (SubI src1 (URShiftI src2 src3)));
11688 
11689   ins_cost(1.9 * INSN_COST);
11690   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11691 
11692   ins_encode %{
11693     __ subw(as_Register($dst$$reg),
11694               as_Register($src1$$reg),
11695               as_Register($src2$$reg),
11696               Assembler::LSR,
11697               $src3$$constant & 0x1f);
11698   %}
11699 
11700   ins_pipe(ialu_reg_reg_shift);
11701 %}
11702 
11703 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11704                          iRegL src1, iRegL src2,
11705                          immI src3, rFlagsReg cr) %{
11706   match(Set dst (SubL src1 (URShiftL src2 src3)));
11707 
11708   ins_cost(1.9 * INSN_COST);
11709   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11710 
11711   ins_encode %{
11712     __ sub(as_Register($dst$$reg),
11713               as_Register($src1$$reg),
11714               as_Register($src2$$reg),
11715               Assembler::LSR,
11716               $src3$$constant & 0x3f);
11717   %}
11718 
11719   ins_pipe(ialu_reg_reg_shift);
11720 %}
11721 
11722 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11723                          iRegIorL2I src1, iRegIorL2I src2,
11724                          immI src3, rFlagsReg cr) %{
11725   match(Set dst (SubI src1 (RShiftI src2 src3)));
11726 
11727   ins_cost(1.9 * INSN_COST);
11728   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11729 
11730   ins_encode %{
11731     __ subw(as_Register($dst$$reg),
11732               as_Register($src1$$reg),
11733               as_Register($src2$$reg),
11734               Assembler::ASR,
11735               $src3$$constant & 0x1f);
11736   %}
11737 
11738   ins_pipe(ialu_reg_reg_shift);
11739 %}
11740 
11741 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11742                          iRegL src1, iRegL src2,
11743                          immI src3, rFlagsReg cr) %{
11744   match(Set dst (SubL src1 (RShiftL src2 src3)));
11745 
11746   ins_cost(1.9 * INSN_COST);
11747   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11748 
11749   ins_encode %{
11750     __ sub(as_Register($dst$$reg),
11751               as_Register($src1$$reg),
11752               as_Register($src2$$reg),
11753               Assembler::ASR,
11754               $src3$$constant & 0x3f);
11755   %}
11756 
11757   ins_pipe(ialu_reg_reg_shift);
11758 %}
11759 
11760 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11761                          iRegIorL2I src1, iRegIorL2I src2,
11762                          immI src3, rFlagsReg cr) %{
11763   match(Set dst (SubI src1 (LShiftI src2 src3)));
11764 
11765   ins_cost(1.9 * INSN_COST);
11766   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11767 
11768   ins_encode %{
11769     __ subw(as_Register($dst$$reg),
11770               as_Register($src1$$reg),
11771               as_Register($src2$$reg),
11772               Assembler::LSL,
11773               $src3$$constant & 0x1f);
11774   %}
11775 
11776   ins_pipe(ialu_reg_reg_shift);
11777 %}
11778 
11779 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11780                          iRegL src1, iRegL src2,
11781                          immI src3, rFlagsReg cr) %{
11782   match(Set dst (SubL src1 (LShiftL src2 src3)));
11783 
11784   ins_cost(1.9 * INSN_COST);
11785   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11786 
11787   ins_encode %{
11788     __ sub(as_Register($dst$$reg),
11789               as_Register($src1$$reg),
11790               as_Register($src2$$reg),
11791               Assembler::LSL,
11792               $src3$$constant & 0x3f);
11793   %}
11794 
11795   ins_pipe(ialu_reg_reg_shift);
11796 %}
11797 
11798 
11799 
11800 // Shift Left followed by Shift Right.
11801 // This idiom is used by the compiler for the i2b bytecode etc.
11802 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11803 %{
11804   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11805   ins_cost(INSN_COST * 2);
11806   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11807   ins_encode %{
11808     int lshift = $lshift_count$$constant & 63;
11809     int rshift = $rshift_count$$constant & 63;
11810     int s = 63 - lshift;
11811     int r = (rshift - lshift) & 63;
11812     __ sbfm(as_Register($dst$$reg),
11813             as_Register($src$$reg),
11814             r, s);
11815   %}
11816 
11817   ins_pipe(ialu_reg_shift);
11818 %}
11819 
11820 // Shift Left followed by Shift Right.
11821 // This idiom is used by the compiler for the i2b bytecode etc.
11822 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11823 %{
11824   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11825   ins_cost(INSN_COST * 2);
11826   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11827   ins_encode %{
11828     int lshift = $lshift_count$$constant & 31;
11829     int rshift = $rshift_count$$constant & 31;
11830     int s = 31 - lshift;
11831     int r = (rshift - lshift) & 31;
11832     __ sbfmw(as_Register($dst$$reg),
11833             as_Register($src$$reg),
11834             r, s);
11835   %}
11836 
11837   ins_pipe(ialu_reg_shift);
11838 %}
11839 
11840 // Shift Left followed by Shift Right.
11841 // This idiom is used by the compiler for the i2b bytecode etc.
11842 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11843 %{
11844   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11845   ins_cost(INSN_COST * 2);
11846   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11847   ins_encode %{
11848     int lshift = $lshift_count$$constant & 63;
11849     int rshift = $rshift_count$$constant & 63;
11850     int s = 63 - lshift;
11851     int r = (rshift - lshift) & 63;
11852     __ ubfm(as_Register($dst$$reg),
11853             as_Register($src$$reg),
11854             r, s);
11855   %}
11856 
11857   ins_pipe(ialu_reg_shift);
11858 %}
11859 
11860 // Shift Left followed by Shift Right.
11861 // This idiom is used by the compiler for the i2b bytecode etc.
11862 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11863 %{
11864   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11865   ins_cost(INSN_COST * 2);
11866   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11867   ins_encode %{
11868     int lshift = $lshift_count$$constant & 31;
11869     int rshift = $rshift_count$$constant & 31;
11870     int s = 31 - lshift;
11871     int r = (rshift - lshift) & 31;
11872     __ ubfmw(as_Register($dst$$reg),
11873             as_Register($src$$reg),
11874             r, s);
11875   %}
11876 
11877   ins_pipe(ialu_reg_shift);
11878 %}
11879 // Bitfield extract with shift & mask
11880 
11881 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11882 %{
11883   match(Set dst (AndI (URShiftI src rshift) mask));
11884   // Make sure we are not going to exceed what ubfxw can do.
11885   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11886 
11887   ins_cost(INSN_COST);
11888   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11889   ins_encode %{
11890     int rshift = $rshift$$constant & 31;
11891     long mask = $mask$$constant;
11892     int width = exact_log2(mask+1);
11893     __ ubfxw(as_Register($dst$$reg),
11894             as_Register($src$$reg), rshift, width);
11895   %}
11896   ins_pipe(ialu_reg_shift);
11897 %}
11898 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11899 %{
11900   match(Set dst (AndL (URShiftL src rshift) mask));
11901   // Make sure we are not going to exceed what ubfx can do.
11902   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11903 
11904   ins_cost(INSN_COST);
11905   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11906   ins_encode %{
11907     int rshift = $rshift$$constant & 63;
11908     long mask = $mask$$constant;
11909     int width = exact_log2_long(mask+1);
11910     __ ubfx(as_Register($dst$$reg),
11911             as_Register($src$$reg), rshift, width);
11912   %}
11913   ins_pipe(ialu_reg_shift);
11914 %}
11915 
11916 // We can use ubfx when extending an And with a mask when we know mask
11917 // is positive.  We know that because immI_bitmask guarantees it.
11918 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11919 %{
11920   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11921   // Make sure we are not going to exceed what ubfxw can do.
11922   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11923 
11924   ins_cost(INSN_COST * 2);
11925   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11926   ins_encode %{
11927     int rshift = $rshift$$constant & 31;
11928     long mask = $mask$$constant;
11929     int width = exact_log2(mask+1);
11930     __ ubfx(as_Register($dst$$reg),
11931             as_Register($src$$reg), rshift, width);
11932   %}
11933   ins_pipe(ialu_reg_shift);
11934 %}
11935 
11936 // We can use ubfiz when masking by a positive number and then left shifting the result.
11937 // We know that the mask is positive because immI_bitmask guarantees it.
11938 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11939 %{
11940   match(Set dst (LShiftI (AndI src mask) lshift));
11941   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11942 
11943   ins_cost(INSN_COST);
11944   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11945   ins_encode %{
11946     int lshift = $lshift$$constant & 31;
11947     long mask = $mask$$constant;
11948     int width = exact_log2(mask+1);
11949     __ ubfizw(as_Register($dst$$reg),
11950           as_Register($src$$reg), lshift, width);
11951   %}
11952   ins_pipe(ialu_reg_shift);
11953 %}
11954 // We can use ubfiz when masking by a positive number and then left shifting the result.
11955 // We know that the mask is positive because immL_bitmask guarantees it.
11956 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11957 %{
11958   match(Set dst (LShiftL (AndL src mask) lshift));
11959   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11960 
11961   ins_cost(INSN_COST);
11962   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11963   ins_encode %{
11964     int lshift = $lshift$$constant & 63;
11965     long mask = $mask$$constant;
11966     int width = exact_log2_long(mask+1);
11967     __ ubfiz(as_Register($dst$$reg),
11968           as_Register($src$$reg), lshift, width);
11969   %}
11970   ins_pipe(ialu_reg_shift);
11971 %}
11972 
11973 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11974 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11975 %{
11976   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
11977   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11978 
11979   ins_cost(INSN_COST);
11980   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11981   ins_encode %{
11982     int lshift = $lshift$$constant & 63;
11983     long mask = $mask$$constant;
11984     int width = exact_log2(mask+1);
11985     __ ubfiz(as_Register($dst$$reg),
11986              as_Register($src$$reg), lshift, width);
11987   %}
11988   ins_pipe(ialu_reg_shift);
11989 %}
11990 
11991 // Rotations
11992 
11993 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11994 %{
11995   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11996   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11997 
11998   ins_cost(INSN_COST);
11999   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12000 
12001   ins_encode %{
12002     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12003             $rshift$$constant & 63);
12004   %}
12005   ins_pipe(ialu_reg_reg_extr);
12006 %}
12007 
12008 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12009 %{
12010   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12011   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12012 
12013   ins_cost(INSN_COST);
12014   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12015 
12016   ins_encode %{
12017     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12018             $rshift$$constant & 31);
12019   %}
12020   ins_pipe(ialu_reg_reg_extr);
12021 %}
12022 
12023 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12024 %{
12025   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12026   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12027 
12028   ins_cost(INSN_COST);
12029   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12030 
12031   ins_encode %{
12032     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12033             $rshift$$constant & 63);
12034   %}
12035   ins_pipe(ialu_reg_reg_extr);
12036 %}
12037 
12038 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12039 %{
12040   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12041   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12042 
12043   ins_cost(INSN_COST);
12044   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12045 
12046   ins_encode %{
12047     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12048             $rshift$$constant & 31);
12049   %}
12050   ins_pipe(ialu_reg_reg_extr);
12051 %}
12052 
12053 
12054 // rol expander
12055 
12056 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12057 %{
12058   effect(DEF dst, USE src, USE shift);
12059 
12060   format %{ "rol    $dst, $src, $shift" %}
12061   ins_cost(INSN_COST * 3);
12062   ins_encode %{
12063     __ subw(rscratch1, zr, as_Register($shift$$reg));
12064     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12065             rscratch1);
12066     %}
12067   ins_pipe(ialu_reg_reg_vshift);
12068 %}
12069 
12070 // rol expander
12071 
12072 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12073 %{
12074   effect(DEF dst, USE src, USE shift);
12075 
12076   format %{ "rol    $dst, $src, $shift" %}
12077   ins_cost(INSN_COST * 3);
12078   ins_encode %{
12079     __ subw(rscratch1, zr, as_Register($shift$$reg));
12080     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12081             rscratch1);
12082     %}
12083   ins_pipe(ialu_reg_reg_vshift);
12084 %}
12085 
12086 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12087 %{
12088   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12089 
12090   expand %{
12091     rolL_rReg(dst, src, shift, cr);
12092   %}
12093 %}
12094 
12095 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12096 %{
12097   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12098 
12099   expand %{
12100     rolL_rReg(dst, src, shift, cr);
12101   %}
12102 %}
12103 
12104 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12105 %{
12106   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12107 
12108   expand %{
12109     rolI_rReg(dst, src, shift, cr);
12110   %}
12111 %}
12112 
12113 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12114 %{
12115   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12116 
12117   expand %{
12118     rolI_rReg(dst, src, shift, cr);
12119   %}
12120 %}
12121 
12122 // ror expander
12123 
12124 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12125 %{
12126   effect(DEF dst, USE src, USE shift);
12127 
12128   format %{ "ror    $dst, $src, $shift" %}
12129   ins_cost(INSN_COST);
12130   ins_encode %{
12131     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12132             as_Register($shift$$reg));
12133     %}
12134   ins_pipe(ialu_reg_reg_vshift);
12135 %}
12136 
12137 // ror expander
12138 
12139 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12140 %{
12141   effect(DEF dst, USE src, USE shift);
12142 
12143   format %{ "ror    $dst, $src, $shift" %}
12144   ins_cost(INSN_COST);
12145   ins_encode %{
12146     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12147             as_Register($shift$$reg));
12148     %}
12149   ins_pipe(ialu_reg_reg_vshift);
12150 %}
12151 
12152 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12153 %{
12154   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12155 
12156   expand %{
12157     rorL_rReg(dst, src, shift, cr);
12158   %}
12159 %}
12160 
12161 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12162 %{
12163   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12164 
12165   expand %{
12166     rorL_rReg(dst, src, shift, cr);
12167   %}
12168 %}
12169 
12170 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12171 %{
12172   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12173 
12174   expand %{
12175     rorI_rReg(dst, src, shift, cr);
12176   %}
12177 %}
12178 
12179 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12180 %{
12181   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12182 
12183   expand %{
12184     rorI_rReg(dst, src, shift, cr);
12185   %}
12186 %}
12187 
12188 // Add/subtract (extended)
12189 
12190 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12191 %{
12192   match(Set dst (AddL src1 (ConvI2L src2)));
12193   ins_cost(INSN_COST);
12194   format %{ "add  $dst, $src1, $src2, sxtw" %}
12195 
12196    ins_encode %{
12197      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12198             as_Register($src2$$reg), ext::sxtw);
12199    %}
12200   ins_pipe(ialu_reg_reg);
12201 %};
12202 
12203 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12204 %{
12205   match(Set dst (SubL src1 (ConvI2L src2)));
12206   ins_cost(INSN_COST);
12207   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12208 
12209    ins_encode %{
12210      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12211             as_Register($src2$$reg), ext::sxtw);
12212    %}
12213   ins_pipe(ialu_reg_reg);
12214 %};
12215 
12216 
12217 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12218 %{
12219   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12220   ins_cost(INSN_COST);
12221   format %{ "add  $dst, $src1, $src2, sxth" %}
12222 
12223    ins_encode %{
12224      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12225             as_Register($src2$$reg), ext::sxth);
12226    %}
12227   ins_pipe(ialu_reg_reg);
12228 %}
12229 
12230 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12231 %{
12232   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12233   ins_cost(INSN_COST);
12234   format %{ "add  $dst, $src1, $src2, sxtb" %}
12235 
12236    ins_encode %{
12237      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12238             as_Register($src2$$reg), ext::sxtb);
12239    %}
12240   ins_pipe(ialu_reg_reg);
12241 %}
12242 
12243 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12244 %{
12245   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12246   ins_cost(INSN_COST);
12247   format %{ "add  $dst, $src1, $src2, uxtb" %}
12248 
12249    ins_encode %{
12250      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12251             as_Register($src2$$reg), ext::uxtb);
12252    %}
12253   ins_pipe(ialu_reg_reg);
12254 %}
12255 
12256 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12257 %{
12258   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12259   ins_cost(INSN_COST);
12260   format %{ "add  $dst, $src1, $src2, sxth" %}
12261 
12262    ins_encode %{
12263      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12264             as_Register($src2$$reg), ext::sxth);
12265    %}
12266   ins_pipe(ialu_reg_reg);
12267 %}
12268 
12269 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12270 %{
12271   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12272   ins_cost(INSN_COST);
12273   format %{ "add  $dst, $src1, $src2, sxtw" %}
12274 
12275    ins_encode %{
12276      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12277             as_Register($src2$$reg), ext::sxtw);
12278    %}
12279   ins_pipe(ialu_reg_reg);
12280 %}
12281 
12282 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12283 %{
12284   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12285   ins_cost(INSN_COST);
12286   format %{ "add  $dst, $src1, $src2, sxtb" %}
12287 
12288    ins_encode %{
12289      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12290             as_Register($src2$$reg), ext::sxtb);
12291    %}
12292   ins_pipe(ialu_reg_reg);
12293 %}
12294 
12295 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12296 %{
12297   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12298   ins_cost(INSN_COST);
12299   format %{ "add  $dst, $src1, $src2, uxtb" %}
12300 
12301    ins_encode %{
12302      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12303             as_Register($src2$$reg), ext::uxtb);
12304    %}
12305   ins_pipe(ialu_reg_reg);
12306 %}
12307 
12308 
12309 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12310 %{
12311   match(Set dst (AddI src1 (AndI src2 mask)));
12312   ins_cost(INSN_COST);
12313   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12314 
12315    ins_encode %{
12316      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12317             as_Register($src2$$reg), ext::uxtb);
12318    %}
12319   ins_pipe(ialu_reg_reg);
12320 %}
12321 
12322 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12323 %{
12324   match(Set dst (AddI src1 (AndI src2 mask)));
12325   ins_cost(INSN_COST);
12326   format %{ "addw  $dst, $src1, $src2, uxth" %}
12327 
12328    ins_encode %{
12329      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12330             as_Register($src2$$reg), ext::uxth);
12331    %}
12332   ins_pipe(ialu_reg_reg);
12333 %}
12334 
12335 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12336 %{
12337   match(Set dst (AddL src1 (AndL src2 mask)));
12338   ins_cost(INSN_COST);
12339   format %{ "add  $dst, $src1, $src2, uxtb" %}
12340 
12341    ins_encode %{
12342      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12343             as_Register($src2$$reg), ext::uxtb);
12344    %}
12345   ins_pipe(ialu_reg_reg);
12346 %}
12347 
12348 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12349 %{
12350   match(Set dst (AddL src1 (AndL src2 mask)));
12351   ins_cost(INSN_COST);
12352   format %{ "add  $dst, $src1, $src2, uxth" %}
12353 
12354    ins_encode %{
12355      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12356             as_Register($src2$$reg), ext::uxth);
12357    %}
12358   ins_pipe(ialu_reg_reg);
12359 %}
12360 
12361 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12362 %{
12363   match(Set dst (AddL src1 (AndL src2 mask)));
12364   ins_cost(INSN_COST);
12365   format %{ "add  $dst, $src1, $src2, uxtw" %}
12366 
12367    ins_encode %{
12368      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12369             as_Register($src2$$reg), ext::uxtw);
12370    %}
12371   ins_pipe(ialu_reg_reg);
12372 %}
12373 
12374 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12375 %{
12376   match(Set dst (SubI src1 (AndI src2 mask)));
12377   ins_cost(INSN_COST);
12378   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12379 
12380    ins_encode %{
12381      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12382             as_Register($src2$$reg), ext::uxtb);
12383    %}
12384   ins_pipe(ialu_reg_reg);
12385 %}
12386 
12387 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12388 %{
12389   match(Set dst (SubI src1 (AndI src2 mask)));
12390   ins_cost(INSN_COST);
12391   format %{ "subw  $dst, $src1, $src2, uxth" %}
12392 
12393    ins_encode %{
12394      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12395             as_Register($src2$$reg), ext::uxth);
12396    %}
12397   ins_pipe(ialu_reg_reg);
12398 %}
12399 
12400 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12401 %{
12402   match(Set dst (SubL src1 (AndL src2 mask)));
12403   ins_cost(INSN_COST);
12404   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12405 
12406    ins_encode %{
12407      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12408             as_Register($src2$$reg), ext::uxtb);
12409    %}
12410   ins_pipe(ialu_reg_reg);
12411 %}
12412 
12413 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12414 %{
12415   match(Set dst (SubL src1 (AndL src2 mask)));
12416   ins_cost(INSN_COST);
12417   format %{ "sub  $dst, $src1, $src2, uxth" %}
12418 
12419    ins_encode %{
12420      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12421             as_Register($src2$$reg), ext::uxth);
12422    %}
12423   ins_pipe(ialu_reg_reg);
12424 %}
12425 
12426 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12427 %{
12428   match(Set dst (SubL src1 (AndL src2 mask)));
12429   ins_cost(INSN_COST);
12430   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12431 
12432    ins_encode %{
12433      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12434             as_Register($src2$$reg), ext::uxtw);
12435    %}
12436   ins_pipe(ialu_reg_reg);
12437 %}
12438 
12439 
12440 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12441 %{
12442   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12443   ins_cost(1.9 * INSN_COST);
12444   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12445 
12446    ins_encode %{
12447      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12448             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12449    %}
12450   ins_pipe(ialu_reg_reg_shift);
12451 %}
12452 
12453 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12454 %{
12455   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12456   ins_cost(1.9 * INSN_COST);
12457   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12458 
12459    ins_encode %{
12460      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12461             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12462    %}
12463   ins_pipe(ialu_reg_reg_shift);
12464 %}
12465 
12466 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12467 %{
12468   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12469   ins_cost(1.9 * INSN_COST);
12470   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12471 
12472    ins_encode %{
12473      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12474             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12475    %}
12476   ins_pipe(ialu_reg_reg_shift);
12477 %}
12478 
12479 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12480 %{
12481   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12482   ins_cost(1.9 * INSN_COST);
12483   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12484 
12485    ins_encode %{
12486      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12487             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12488    %}
12489   ins_pipe(ialu_reg_reg_shift);
12490 %}
12491 
12492 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12493 %{
12494   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12495   ins_cost(1.9 * INSN_COST);
12496   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12497 
12498    ins_encode %{
12499      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12500             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12501    %}
12502   ins_pipe(ialu_reg_reg_shift);
12503 %}
12504 
12505 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12506 %{
12507   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12508   ins_cost(1.9 * INSN_COST);
12509   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12510 
12511    ins_encode %{
12512      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12513             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12514    %}
12515   ins_pipe(ialu_reg_reg_shift);
12516 %}
12517 
12518 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12519 %{
12520   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12521   ins_cost(1.9 * INSN_COST);
12522   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12523 
12524    ins_encode %{
12525      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12526             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12527    %}
12528   ins_pipe(ialu_reg_reg_shift);
12529 %}
12530 
12531 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12532 %{
12533   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12534   ins_cost(1.9 * INSN_COST);
12535   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12536 
12537    ins_encode %{
12538      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12539             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12540    %}
12541   ins_pipe(ialu_reg_reg_shift);
12542 %}
12543 
12544 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12545 %{
12546   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12547   ins_cost(1.9 * INSN_COST);
12548   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12549 
12550    ins_encode %{
12551      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12552             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12553    %}
12554   ins_pipe(ialu_reg_reg_shift);
12555 %}
12556 
12557 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12558 %{
12559   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12560   ins_cost(1.9 * INSN_COST);
12561   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12562 
12563    ins_encode %{
12564      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12565             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12566    %}
12567   ins_pipe(ialu_reg_reg_shift);
12568 %}
12569 
12570 
12571 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12572 %{
12573   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12574   ins_cost(1.9 * INSN_COST);
12575   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12576 
12577    ins_encode %{
12578      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12579             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12580    %}
12581   ins_pipe(ialu_reg_reg_shift);
12582 %};
12583 
12584 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12585 %{
12586   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12587   ins_cost(1.9 * INSN_COST);
12588   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12589 
12590    ins_encode %{
12591      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12592             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12593    %}
12594   ins_pipe(ialu_reg_reg_shift);
12595 %};
12596 
12597 
12598 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12599 %{
12600   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12601   ins_cost(1.9 * INSN_COST);
12602   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12603 
12604    ins_encode %{
12605      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12606             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12607    %}
12608   ins_pipe(ialu_reg_reg_shift);
12609 %}
12610 
12611 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12612 %{
12613   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12614   ins_cost(1.9 * INSN_COST);
12615   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12616 
12617    ins_encode %{
12618      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12619             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12620    %}
12621   ins_pipe(ialu_reg_reg_shift);
12622 %}
12623 
12624 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12625 %{
12626   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12627   ins_cost(1.9 * INSN_COST);
12628   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12629 
12630    ins_encode %{
12631      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12632             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12633    %}
12634   ins_pipe(ialu_reg_reg_shift);
12635 %}
12636 
12637 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12638 %{
12639   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12640   ins_cost(1.9 * INSN_COST);
12641   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12642 
12643    ins_encode %{
12644      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12645             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12646    %}
12647   ins_pipe(ialu_reg_reg_shift);
12648 %}
12649 
12650 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12651 %{
12652   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12653   ins_cost(1.9 * INSN_COST);
12654   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12655 
12656    ins_encode %{
12657      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12658             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12659    %}
12660   ins_pipe(ialu_reg_reg_shift);
12661 %}
12662 
12663 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12664 %{
12665   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12666   ins_cost(1.9 * INSN_COST);
12667   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12668 
12669    ins_encode %{
12670      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12671             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12672    %}
12673   ins_pipe(ialu_reg_reg_shift);
12674 %}
12675 
12676 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12677 %{
12678   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12679   ins_cost(1.9 * INSN_COST);
12680   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12681 
12682    ins_encode %{
12683      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12684             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12685    %}
12686   ins_pipe(ialu_reg_reg_shift);
12687 %}
12688 
12689 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12690 %{
12691   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12692   ins_cost(1.9 * INSN_COST);
12693   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12694 
12695    ins_encode %{
12696      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12697             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12698    %}
12699   ins_pipe(ialu_reg_reg_shift);
12700 %}
12701 
12702 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12703 %{
12704   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12705   ins_cost(1.9 * INSN_COST);
12706   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12707 
12708    ins_encode %{
12709      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12710             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12711    %}
12712   ins_pipe(ialu_reg_reg_shift);
12713 %}
12714 
12715 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12716 %{
12717   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12718   ins_cost(1.9 * INSN_COST);
12719   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12720 
12721    ins_encode %{
12722      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12723             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12724    %}
12725   ins_pipe(ialu_reg_reg_shift);
12726 %}
12727 // END This section of the file is automatically generated. Do not edit --------------
12728 
12729 // ============================================================================
12730 // Floating Point Arithmetic Instructions
12731 
12732 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12733   match(Set dst (AddF src1 src2));
12734 
12735   ins_cost(INSN_COST * 5);
12736   format %{ "fadds   $dst, $src1, $src2" %}
12737 
12738   ins_encode %{
12739     __ fadds(as_FloatRegister($dst$$reg),
12740              as_FloatRegister($src1$$reg),
12741              as_FloatRegister($src2$$reg));
12742   %}
12743 
12744   ins_pipe(fp_dop_reg_reg_s);
12745 %}
12746 
12747 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12748   match(Set dst (AddD src1 src2));
12749 
12750   ins_cost(INSN_COST * 5);
12751   format %{ "faddd   $dst, $src1, $src2" %}
12752 
12753   ins_encode %{
12754     __ faddd(as_FloatRegister($dst$$reg),
12755              as_FloatRegister($src1$$reg),
12756              as_FloatRegister($src2$$reg));
12757   %}
12758 
12759   ins_pipe(fp_dop_reg_reg_d);
12760 %}
12761 
12762 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12763   match(Set dst (SubF src1 src2));
12764 
12765   ins_cost(INSN_COST * 5);
12766   format %{ "fsubs   $dst, $src1, $src2" %}
12767 
12768   ins_encode %{
12769     __ fsubs(as_FloatRegister($dst$$reg),
12770              as_FloatRegister($src1$$reg),
12771              as_FloatRegister($src2$$reg));
12772   %}
12773 
12774   ins_pipe(fp_dop_reg_reg_s);
12775 %}
12776 
12777 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12778   match(Set dst (SubD src1 src2));
12779 
12780   ins_cost(INSN_COST * 5);
12781   format %{ "fsubd   $dst, $src1, $src2" %}
12782 
12783   ins_encode %{
12784     __ fsubd(as_FloatRegister($dst$$reg),
12785              as_FloatRegister($src1$$reg),
12786              as_FloatRegister($src2$$reg));
12787   %}
12788 
12789   ins_pipe(fp_dop_reg_reg_d);
12790 %}
12791 
12792 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12793   match(Set dst (MulF src1 src2));
12794 
12795   ins_cost(INSN_COST * 6);
12796   format %{ "fmuls   $dst, $src1, $src2" %}
12797 
12798   ins_encode %{
12799     __ fmuls(as_FloatRegister($dst$$reg),
12800              as_FloatRegister($src1$$reg),
12801              as_FloatRegister($src2$$reg));
12802   %}
12803 
12804   ins_pipe(fp_dop_reg_reg_s);
12805 %}
12806 
12807 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12808   match(Set dst (MulD src1 src2));
12809 
12810   ins_cost(INSN_COST * 6);
12811   format %{ "fmuld   $dst, $src1, $src2" %}
12812 
12813   ins_encode %{
12814     __ fmuld(as_FloatRegister($dst$$reg),
12815              as_FloatRegister($src1$$reg),
12816              as_FloatRegister($src2$$reg));
12817   %}
12818 
12819   ins_pipe(fp_dop_reg_reg_d);
12820 %}
12821 
12822 // src1 * src2 + src3
12823 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12824   predicate(UseFMA);
12825   match(Set dst (FmaF src3 (Binary src1 src2)));
12826 
12827   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12828 
12829   ins_encode %{
12830     __ fmadds(as_FloatRegister($dst$$reg),
12831              as_FloatRegister($src1$$reg),
12832              as_FloatRegister($src2$$reg),
12833              as_FloatRegister($src3$$reg));
12834   %}
12835 
12836   ins_pipe(pipe_class_default);
12837 %}
12838 
12839 // src1 * src2 + src3
12840 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12841   predicate(UseFMA);
12842   match(Set dst (FmaD src3 (Binary src1 src2)));
12843 
12844   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12845 
12846   ins_encode %{
12847     __ fmaddd(as_FloatRegister($dst$$reg),
12848              as_FloatRegister($src1$$reg),
12849              as_FloatRegister($src2$$reg),
12850              as_FloatRegister($src3$$reg));
12851   %}
12852 
12853   ins_pipe(pipe_class_default);
12854 %}
12855 
12856 // -src1 * src2 + src3
12857 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12858   predicate(UseFMA);
12859   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12860   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12861 
12862   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12863 
12864   ins_encode %{
12865     __ fmsubs(as_FloatRegister($dst$$reg),
12866               as_FloatRegister($src1$$reg),
12867               as_FloatRegister($src2$$reg),
12868               as_FloatRegister($src3$$reg));
12869   %}
12870 
12871   ins_pipe(pipe_class_default);
12872 %}
12873 
12874 // -src1 * src2 + src3
12875 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12876   predicate(UseFMA);
12877   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12878   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12879 
12880   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12881 
12882   ins_encode %{
12883     __ fmsubd(as_FloatRegister($dst$$reg),
12884               as_FloatRegister($src1$$reg),
12885               as_FloatRegister($src2$$reg),
12886               as_FloatRegister($src3$$reg));
12887   %}
12888 
12889   ins_pipe(pipe_class_default);
12890 %}
12891 
12892 // -src1 * src2 - src3
12893 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12894   predicate(UseFMA);
12895   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12896   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12897 
12898   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12899 
12900   ins_encode %{
12901     __ fnmadds(as_FloatRegister($dst$$reg),
12902                as_FloatRegister($src1$$reg),
12903                as_FloatRegister($src2$$reg),
12904                as_FloatRegister($src3$$reg));
12905   %}
12906 
12907   ins_pipe(pipe_class_default);
12908 %}
12909 
12910 // -src1 * src2 - src3
12911 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12912   predicate(UseFMA);
12913   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12914   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12915 
12916   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12917 
12918   ins_encode %{
12919     __ fnmaddd(as_FloatRegister($dst$$reg),
12920                as_FloatRegister($src1$$reg),
12921                as_FloatRegister($src2$$reg),
12922                as_FloatRegister($src3$$reg));
12923   %}
12924 
12925   ins_pipe(pipe_class_default);
12926 %}
12927 
12928 // src1 * src2 - src3
12929 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12930   predicate(UseFMA);
12931   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12932 
12933   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12934 
12935   ins_encode %{
12936     __ fnmsubs(as_FloatRegister($dst$$reg),
12937                as_FloatRegister($src1$$reg),
12938                as_FloatRegister($src2$$reg),
12939                as_FloatRegister($src3$$reg));
12940   %}
12941 
12942   ins_pipe(pipe_class_default);
12943 %}
12944 
12945 // src1 * src2 - src3
12946 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12947   predicate(UseFMA);
12948   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12949 
12950   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12951 
12952   ins_encode %{
12953   // n.b. insn name should be fnmsubd
12954     __ fnmsub(as_FloatRegister($dst$$reg),
12955               as_FloatRegister($src1$$reg),
12956               as_FloatRegister($src2$$reg),
12957               as_FloatRegister($src3$$reg));
12958   %}
12959 
12960   ins_pipe(pipe_class_default);
12961 %}
12962 
12963 
12964 // Math.max(FF)F
12965 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12966   match(Set dst (MaxF src1 src2));
12967 
12968   format %{ "fmaxs   $dst, $src1, $src2" %}
12969   ins_encode %{
12970     __ fmaxs(as_FloatRegister($dst$$reg),
12971              as_FloatRegister($src1$$reg),
12972              as_FloatRegister($src2$$reg));
12973   %}
12974 
12975   ins_pipe(fp_dop_reg_reg_s);
12976 %}
12977 
12978 // Math.min(FF)F
12979 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12980   match(Set dst (MinF src1 src2));
12981 
12982   format %{ "fmins   $dst, $src1, $src2" %}
12983   ins_encode %{
12984     __ fmins(as_FloatRegister($dst$$reg),
12985              as_FloatRegister($src1$$reg),
12986              as_FloatRegister($src2$$reg));
12987   %}
12988 
12989   ins_pipe(fp_dop_reg_reg_s);
12990 %}
12991 
12992 // Math.max(DD)D
12993 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12994   match(Set dst (MaxD src1 src2));
12995 
12996   format %{ "fmaxd   $dst, $src1, $src2" %}
12997   ins_encode %{
12998     __ fmaxd(as_FloatRegister($dst$$reg),
12999              as_FloatRegister($src1$$reg),
13000              as_FloatRegister($src2$$reg));
13001   %}
13002 
13003   ins_pipe(fp_dop_reg_reg_d);
13004 %}
13005 
13006 // Math.min(DD)D
13007 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13008   match(Set dst (MinD src1 src2));
13009 
13010   format %{ "fmind   $dst, $src1, $src2" %}
13011   ins_encode %{
13012     __ fmind(as_FloatRegister($dst$$reg),
13013              as_FloatRegister($src1$$reg),
13014              as_FloatRegister($src2$$reg));
13015   %}
13016 
13017   ins_pipe(fp_dop_reg_reg_d);
13018 %}
13019 
13020 
13021 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13022   match(Set dst (DivF src1  src2));
13023 
13024   ins_cost(INSN_COST * 18);
13025   format %{ "fdivs   $dst, $src1, $src2" %}
13026 
13027   ins_encode %{
13028     __ fdivs(as_FloatRegister($dst$$reg),
13029              as_FloatRegister($src1$$reg),
13030              as_FloatRegister($src2$$reg));
13031   %}
13032 
13033   ins_pipe(fp_div_s);
13034 %}
13035 
13036 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13037   match(Set dst (DivD src1  src2));
13038 
13039   ins_cost(INSN_COST * 32);
13040   format %{ "fdivd   $dst, $src1, $src2" %}
13041 
13042   ins_encode %{
13043     __ fdivd(as_FloatRegister($dst$$reg),
13044              as_FloatRegister($src1$$reg),
13045              as_FloatRegister($src2$$reg));
13046   %}
13047 
13048   ins_pipe(fp_div_d);
13049 %}
13050 
13051 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13052   match(Set dst (NegF src));
13053 
13054   ins_cost(INSN_COST * 3);
13055   format %{ "fneg   $dst, $src" %}
13056 
13057   ins_encode %{
13058     __ fnegs(as_FloatRegister($dst$$reg),
13059              as_FloatRegister($src$$reg));
13060   %}
13061 
13062   ins_pipe(fp_uop_s);
13063 %}
13064 
13065 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13066   match(Set dst (NegD src));
13067 
13068   ins_cost(INSN_COST * 3);
13069   format %{ "fnegd   $dst, $src" %}
13070 
13071   ins_encode %{
13072     __ fnegd(as_FloatRegister($dst$$reg),
13073              as_FloatRegister($src$$reg));
13074   %}
13075 
13076   ins_pipe(fp_uop_d);
13077 %}
13078 
13079 instruct absF_reg(vRegF dst, vRegF src) %{
13080   match(Set dst (AbsF src));
13081 
13082   ins_cost(INSN_COST * 3);
13083   format %{ "fabss   $dst, $src" %}
13084   ins_encode %{
13085     __ fabss(as_FloatRegister($dst$$reg),
13086              as_FloatRegister($src$$reg));
13087   %}
13088 
13089   ins_pipe(fp_uop_s);
13090 %}
13091 
13092 instruct absD_reg(vRegD dst, vRegD src) %{
13093   match(Set dst (AbsD src));
13094 
13095   ins_cost(INSN_COST * 3);
13096   format %{ "fabsd   $dst, $src" %}
13097   ins_encode %{
13098     __ fabsd(as_FloatRegister($dst$$reg),
13099              as_FloatRegister($src$$reg));
13100   %}
13101 
13102   ins_pipe(fp_uop_d);
13103 %}
13104 
13105 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13106   match(Set dst (SqrtD src));
13107 
13108   ins_cost(INSN_COST * 50);
13109   format %{ "fsqrtd  $dst, $src" %}
13110   ins_encode %{
13111     __ fsqrtd(as_FloatRegister($dst$$reg),
13112              as_FloatRegister($src$$reg));
13113   %}
13114 
13115   ins_pipe(fp_div_s);
13116 %}
13117 
13118 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13119   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13120 
13121   ins_cost(INSN_COST * 50);
13122   format %{ "fsqrts  $dst, $src" %}
13123   ins_encode %{
13124     __ fsqrts(as_FloatRegister($dst$$reg),
13125              as_FloatRegister($src$$reg));
13126   %}
13127 
13128   ins_pipe(fp_div_d);
13129 %}
13130 
13131 // ============================================================================
13132 // Logical Instructions
13133 
13134 // Integer Logical Instructions
13135 
13136 // And Instructions
13137 
13138 
13139 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13140   match(Set dst (AndI src1 src2));
13141 
13142   format %{ "andw  $dst, $src1, $src2\t# int" %}
13143 
13144   ins_cost(INSN_COST);
13145   ins_encode %{
13146     __ andw(as_Register($dst$$reg),
13147             as_Register($src1$$reg),
13148             as_Register($src2$$reg));
13149   %}
13150 
13151   ins_pipe(ialu_reg_reg);
13152 %}
13153 
13154 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13155   match(Set dst (AndI src1 src2));
13156 
13157   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13158 
13159   ins_cost(INSN_COST);
13160   ins_encode %{
13161     __ andw(as_Register($dst$$reg),
13162             as_Register($src1$$reg),
13163             (unsigned long)($src2$$constant));
13164   %}
13165 
13166   ins_pipe(ialu_reg_imm);
13167 %}
13168 
13169 // Or Instructions
13170 
13171 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13172   match(Set dst (OrI src1 src2));
13173 
13174   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13175 
13176   ins_cost(INSN_COST);
13177   ins_encode %{
13178     __ orrw(as_Register($dst$$reg),
13179             as_Register($src1$$reg),
13180             as_Register($src2$$reg));
13181   %}
13182 
13183   ins_pipe(ialu_reg_reg);
13184 %}
13185 
13186 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13187   match(Set dst (OrI src1 src2));
13188 
13189   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13190 
13191   ins_cost(INSN_COST);
13192   ins_encode %{
13193     __ orrw(as_Register($dst$$reg),
13194             as_Register($src1$$reg),
13195             (unsigned long)($src2$$constant));
13196   %}
13197 
13198   ins_pipe(ialu_reg_imm);
13199 %}
13200 
13201 // Xor Instructions
13202 
13203 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13204   match(Set dst (XorI src1 src2));
13205 
13206   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13207 
13208   ins_cost(INSN_COST);
13209   ins_encode %{
13210     __ eorw(as_Register($dst$$reg),
13211             as_Register($src1$$reg),
13212             as_Register($src2$$reg));
13213   %}
13214 
13215   ins_pipe(ialu_reg_reg);
13216 %}
13217 
13218 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13219   match(Set dst (XorI src1 src2));
13220 
13221   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13222 
13223   ins_cost(INSN_COST);
13224   ins_encode %{
13225     __ eorw(as_Register($dst$$reg),
13226             as_Register($src1$$reg),
13227             (unsigned long)($src2$$constant));
13228   %}
13229 
13230   ins_pipe(ialu_reg_imm);
13231 %}
13232 
13233 // Long Logical Instructions
13234 // TODO
13235 
13236 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13237   match(Set dst (AndL src1 src2));
13238 
13239   format %{ "and  $dst, $src1, $src2\t# int" %}
13240 
13241   ins_cost(INSN_COST);
13242   ins_encode %{
13243     __ andr(as_Register($dst$$reg),
13244             as_Register($src1$$reg),
13245             as_Register($src2$$reg));
13246   %}
13247 
13248   ins_pipe(ialu_reg_reg);
13249 %}
13250 
13251 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13252   match(Set dst (AndL src1 src2));
13253 
13254   format %{ "and  $dst, $src1, $src2\t# int" %}
13255 
13256   ins_cost(INSN_COST);
13257   ins_encode %{
13258     __ andr(as_Register($dst$$reg),
13259             as_Register($src1$$reg),
13260             (unsigned long)($src2$$constant));
13261   %}
13262 
13263   ins_pipe(ialu_reg_imm);
13264 %}
13265 
13266 // Or Instructions
13267 
13268 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13269   match(Set dst (OrL src1 src2));
13270 
13271   format %{ "orr  $dst, $src1, $src2\t# int" %}
13272 
13273   ins_cost(INSN_COST);
13274   ins_encode %{
13275     __ orr(as_Register($dst$$reg),
13276            as_Register($src1$$reg),
13277            as_Register($src2$$reg));
13278   %}
13279 
13280   ins_pipe(ialu_reg_reg);
13281 %}
13282 
13283 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13284   match(Set dst (OrL src1 src2));
13285 
13286   format %{ "orr  $dst, $src1, $src2\t# int" %}
13287 
13288   ins_cost(INSN_COST);
13289   ins_encode %{
13290     __ orr(as_Register($dst$$reg),
13291            as_Register($src1$$reg),
13292            (unsigned long)($src2$$constant));
13293   %}
13294 
13295   ins_pipe(ialu_reg_imm);
13296 %}
13297 
13298 // Xor Instructions
13299 
13300 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13301   match(Set dst (XorL src1 src2));
13302 
13303   format %{ "eor  $dst, $src1, $src2\t# int" %}
13304 
13305   ins_cost(INSN_COST);
13306   ins_encode %{
13307     __ eor(as_Register($dst$$reg),
13308            as_Register($src1$$reg),
13309            as_Register($src2$$reg));
13310   %}
13311 
13312   ins_pipe(ialu_reg_reg);
13313 %}
13314 
13315 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13316   match(Set dst (XorL src1 src2));
13317 
13318   ins_cost(INSN_COST);
13319   format %{ "eor  $dst, $src1, $src2\t# int" %}
13320 
13321   ins_encode %{
13322     __ eor(as_Register($dst$$reg),
13323            as_Register($src1$$reg),
13324            (unsigned long)($src2$$constant));
13325   %}
13326 
13327   ins_pipe(ialu_reg_imm);
13328 %}
13329 
13330 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13331 %{
13332   match(Set dst (ConvI2L src));
13333 
13334   ins_cost(INSN_COST);
13335   format %{ "sxtw  $dst, $src\t# i2l" %}
13336   ins_encode %{
13337     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13338   %}
13339   ins_pipe(ialu_reg_shift);
13340 %}
13341 
13342 // this pattern occurs in bigmath arithmetic
13343 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13344 %{
13345   match(Set dst (AndL (ConvI2L src) mask));
13346 
13347   ins_cost(INSN_COST);
13348   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13349   ins_encode %{
13350     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13351   %}
13352 
13353   ins_pipe(ialu_reg_shift);
13354 %}
13355 
13356 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13357   match(Set dst (ConvL2I src));
13358 
13359   ins_cost(INSN_COST);
13360   format %{ "movw  $dst, $src \t// l2i" %}
13361 
13362   ins_encode %{
13363     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13364   %}
13365 
13366   ins_pipe(ialu_reg);
13367 %}
13368 
13369 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13370 %{
13371   match(Set dst (Conv2B src));
13372   effect(KILL cr);
13373 
13374   format %{
13375     "cmpw $src, zr\n\t"
13376     "cset $dst, ne"
13377   %}
13378 
13379   ins_encode %{
13380     __ cmpw(as_Register($src$$reg), zr);
13381     __ cset(as_Register($dst$$reg), Assembler::NE);
13382   %}
13383 
13384   ins_pipe(ialu_reg);
13385 %}
13386 
13387 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13388 %{
13389   match(Set dst (Conv2B src));
13390   effect(KILL cr);
13391 
13392   format %{
13393     "cmp  $src, zr\n\t"
13394     "cset $dst, ne"
13395   %}
13396 
13397   ins_encode %{
13398     __ cmp(as_Register($src$$reg), zr);
13399     __ cset(as_Register($dst$$reg), Assembler::NE);
13400   %}
13401 
13402   ins_pipe(ialu_reg);
13403 %}
13404 
13405 instruct convD2F_reg(vRegF dst, vRegD src) %{
13406   match(Set dst (ConvD2F src));
13407 
13408   ins_cost(INSN_COST * 5);
13409   format %{ "fcvtd  $dst, $src \t// d2f" %}
13410 
13411   ins_encode %{
13412     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13413   %}
13414 
13415   ins_pipe(fp_d2f);
13416 %}
13417 
13418 instruct convF2D_reg(vRegD dst, vRegF src) %{
13419   match(Set dst (ConvF2D src));
13420 
13421   ins_cost(INSN_COST * 5);
13422   format %{ "fcvts  $dst, $src \t// f2d" %}
13423 
13424   ins_encode %{
13425     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13426   %}
13427 
13428   ins_pipe(fp_f2d);
13429 %}
13430 
13431 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13432   match(Set dst (ConvF2I src));
13433 
13434   ins_cost(INSN_COST * 5);
13435   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13436 
13437   ins_encode %{
13438     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13439   %}
13440 
13441   ins_pipe(fp_f2i);
13442 %}
13443 
13444 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13445   match(Set dst (ConvF2L src));
13446 
13447   ins_cost(INSN_COST * 5);
13448   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13449 
13450   ins_encode %{
13451     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13452   %}
13453 
13454   ins_pipe(fp_f2l);
13455 %}
13456 
13457 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13458   match(Set dst (ConvI2F src));
13459 
13460   ins_cost(INSN_COST * 5);
13461   format %{ "scvtfws  $dst, $src \t// i2f" %}
13462 
13463   ins_encode %{
13464     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13465   %}
13466 
13467   ins_pipe(fp_i2f);
13468 %}
13469 
13470 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13471   match(Set dst (ConvL2F src));
13472 
13473   ins_cost(INSN_COST * 5);
13474   format %{ "scvtfs  $dst, $src \t// l2f" %}
13475 
13476   ins_encode %{
13477     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13478   %}
13479 
13480   ins_pipe(fp_l2f);
13481 %}
13482 
13483 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13484   match(Set dst (ConvD2I src));
13485 
13486   ins_cost(INSN_COST * 5);
13487   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13488 
13489   ins_encode %{
13490     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13491   %}
13492 
13493   ins_pipe(fp_d2i);
13494 %}
13495 
13496 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13497   match(Set dst (ConvD2L src));
13498 
13499   ins_cost(INSN_COST * 5);
13500   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13501 
13502   ins_encode %{
13503     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13504   %}
13505 
13506   ins_pipe(fp_d2l);
13507 %}
13508 
13509 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13510   match(Set dst (ConvI2D src));
13511 
13512   ins_cost(INSN_COST * 5);
13513   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13514 
13515   ins_encode %{
13516     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13517   %}
13518 
13519   ins_pipe(fp_i2d);
13520 %}
13521 
13522 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13523   match(Set dst (ConvL2D src));
13524 
13525   ins_cost(INSN_COST * 5);
13526   format %{ "scvtfd  $dst, $src \t// l2d" %}
13527 
13528   ins_encode %{
13529     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13530   %}
13531 
13532   ins_pipe(fp_l2d);
13533 %}
13534 
13535 // stack <-> reg and reg <-> reg shuffles with no conversion
13536 
13537 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13538 
13539   match(Set dst (MoveF2I src));
13540 
13541   effect(DEF dst, USE src);
13542 
13543   ins_cost(4 * INSN_COST);
13544 
13545   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13546 
13547   ins_encode %{
13548     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13549   %}
13550 
13551   ins_pipe(iload_reg_reg);
13552 
13553 %}
13554 
13555 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13556 
13557   match(Set dst (MoveI2F src));
13558 
13559   effect(DEF dst, USE src);
13560 
13561   ins_cost(4 * INSN_COST);
13562 
13563   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13564 
13565   ins_encode %{
13566     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13567   %}
13568 
13569   ins_pipe(pipe_class_memory);
13570 
13571 %}
13572 
13573 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13574 
13575   match(Set dst (MoveD2L src));
13576 
13577   effect(DEF dst, USE src);
13578 
13579   ins_cost(4 * INSN_COST);
13580 
13581   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13582 
13583   ins_encode %{
13584     __ ldr($dst$$Register, Address(sp, $src$$disp));
13585   %}
13586 
13587   ins_pipe(iload_reg_reg);
13588 
13589 %}
13590 
13591 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13592 
13593   match(Set dst (MoveL2D src));
13594 
13595   effect(DEF dst, USE src);
13596 
13597   ins_cost(4 * INSN_COST);
13598 
13599   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13600 
13601   ins_encode %{
13602     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13603   %}
13604 
13605   ins_pipe(pipe_class_memory);
13606 
13607 %}
13608 
13609 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13610 
13611   match(Set dst (MoveF2I src));
13612 
13613   effect(DEF dst, USE src);
13614 
13615   ins_cost(INSN_COST);
13616 
13617   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13618 
13619   ins_encode %{
13620     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13621   %}
13622 
13623   ins_pipe(pipe_class_memory);
13624 
13625 %}
13626 
13627 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13628 
13629   match(Set dst (MoveI2F src));
13630 
13631   effect(DEF dst, USE src);
13632 
13633   ins_cost(INSN_COST);
13634 
13635   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13636 
13637   ins_encode %{
13638     __ strw($src$$Register, Address(sp, $dst$$disp));
13639   %}
13640 
13641   ins_pipe(istore_reg_reg);
13642 
13643 %}
13644 
13645 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13646 
13647   match(Set dst (MoveD2L src));
13648 
13649   effect(DEF dst, USE src);
13650 
13651   ins_cost(INSN_COST);
13652 
13653   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13654 
13655   ins_encode %{
13656     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13657   %}
13658 
13659   ins_pipe(pipe_class_memory);
13660 
13661 %}
13662 
13663 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13664 
13665   match(Set dst (MoveL2D src));
13666 
13667   effect(DEF dst, USE src);
13668 
13669   ins_cost(INSN_COST);
13670 
13671   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13672 
13673   ins_encode %{
13674     __ str($src$$Register, Address(sp, $dst$$disp));
13675   %}
13676 
13677   ins_pipe(istore_reg_reg);
13678 
13679 %}
13680 
13681 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13682 
13683   match(Set dst (MoveF2I src));
13684 
13685   effect(DEF dst, USE src);
13686 
13687   ins_cost(INSN_COST);
13688 
13689   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13690 
13691   ins_encode %{
13692     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13693   %}
13694 
13695   ins_pipe(fp_f2i);
13696 
13697 %}
13698 
13699 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13700 
13701   match(Set dst (MoveI2F src));
13702 
13703   effect(DEF dst, USE src);
13704 
13705   ins_cost(INSN_COST);
13706 
13707   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13708 
13709   ins_encode %{
13710     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13711   %}
13712 
13713   ins_pipe(fp_i2f);
13714 
13715 %}
13716 
13717 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13718 
13719   match(Set dst (MoveD2L src));
13720 
13721   effect(DEF dst, USE src);
13722 
13723   ins_cost(INSN_COST);
13724 
13725   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13726 
13727   ins_encode %{
13728     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13729   %}
13730 
13731   ins_pipe(fp_d2l);
13732 
13733 %}
13734 
13735 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13736 
13737   match(Set dst (MoveL2D src));
13738 
13739   effect(DEF dst, USE src);
13740 
13741   ins_cost(INSN_COST);
13742 
13743   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13744 
13745   ins_encode %{
13746     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13747   %}
13748 
13749   ins_pipe(fp_l2d);
13750 
13751 %}
13752 
13753 // ============================================================================
13754 // clearing of an array
13755 
13756 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, iRegL val, Universe dummy, rFlagsReg cr)
13757 %{
13758   match(Set dummy (ClearArray (Binary cnt base) val));
13759   effect(USE_KILL cnt, USE_KILL base);
13760 
13761   ins_cost(4 * INSN_COST);
13762   format %{ "ClearArray $cnt, $base, $val" %}
13763 
13764   ins_encode %{
13765     __ fill_words($base$$Register, $cnt$$Register, $val$$Register);
13766   %}
13767 
13768   ins_pipe(pipe_class_memory);
13769 %}
13770 
13771 // ============================================================================
13772 // Overflow Math Instructions
13773 
13774 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13775 %{
13776   match(Set cr (OverflowAddI op1 op2));
13777 
13778   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13779   ins_cost(INSN_COST);
13780   ins_encode %{
13781     __ cmnw($op1$$Register, $op2$$Register);
13782   %}
13783 
13784   ins_pipe(icmp_reg_reg);
13785 %}
13786 
13787 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13788 %{
13789   match(Set cr (OverflowAddI op1 op2));
13790 
13791   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13792   ins_cost(INSN_COST);
13793   ins_encode %{
13794     __ cmnw($op1$$Register, $op2$$constant);
13795   %}
13796 
13797   ins_pipe(icmp_reg_imm);
13798 %}
13799 
13800 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13801 %{
13802   match(Set cr (OverflowAddL op1 op2));
13803 
13804   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13805   ins_cost(INSN_COST);
13806   ins_encode %{
13807     __ cmn($op1$$Register, $op2$$Register);
13808   %}
13809 
13810   ins_pipe(icmp_reg_reg);
13811 %}
13812 
13813 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13814 %{
13815   match(Set cr (OverflowAddL op1 op2));
13816 
13817   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13818   ins_cost(INSN_COST);
13819   ins_encode %{
13820     __ cmn($op1$$Register, $op2$$constant);
13821   %}
13822 
13823   ins_pipe(icmp_reg_imm);
13824 %}
13825 
13826 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13827 %{
13828   match(Set cr (OverflowSubI op1 op2));
13829 
13830   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13831   ins_cost(INSN_COST);
13832   ins_encode %{
13833     __ cmpw($op1$$Register, $op2$$Register);
13834   %}
13835 
13836   ins_pipe(icmp_reg_reg);
13837 %}
13838 
13839 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13840 %{
13841   match(Set cr (OverflowSubI op1 op2));
13842 
13843   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13844   ins_cost(INSN_COST);
13845   ins_encode %{
13846     __ cmpw($op1$$Register, $op2$$constant);
13847   %}
13848 
13849   ins_pipe(icmp_reg_imm);
13850 %}
13851 
13852 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13853 %{
13854   match(Set cr (OverflowSubL op1 op2));
13855 
13856   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13857   ins_cost(INSN_COST);
13858   ins_encode %{
13859     __ cmp($op1$$Register, $op2$$Register);
13860   %}
13861 
13862   ins_pipe(icmp_reg_reg);
13863 %}
13864 
13865 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13866 %{
13867   match(Set cr (OverflowSubL op1 op2));
13868 
13869   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13870   ins_cost(INSN_COST);
13871   ins_encode %{
13872     __ subs(zr, $op1$$Register, $op2$$constant);
13873   %}
13874 
13875   ins_pipe(icmp_reg_imm);
13876 %}
13877 
13878 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13879 %{
13880   match(Set cr (OverflowSubI zero op1));
13881 
13882   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13883   ins_cost(INSN_COST);
13884   ins_encode %{
13885     __ cmpw(zr, $op1$$Register);
13886   %}
13887 
13888   ins_pipe(icmp_reg_imm);
13889 %}
13890 
13891 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13892 %{
13893   match(Set cr (OverflowSubL zero op1));
13894 
13895   format %{ "cmp   zr, $op1\t# overflow check long" %}
13896   ins_cost(INSN_COST);
13897   ins_encode %{
13898     __ cmp(zr, $op1$$Register);
13899   %}
13900 
13901   ins_pipe(icmp_reg_imm);
13902 %}
13903 
13904 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13905 %{
13906   match(Set cr (OverflowMulI op1 op2));
13907 
13908   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13909             "cmp   rscratch1, rscratch1, sxtw\n\t"
13910             "movw  rscratch1, #0x80000000\n\t"
13911             "cselw rscratch1, rscratch1, zr, NE\n\t"
13912             "cmpw  rscratch1, #1" %}
13913   ins_cost(5 * INSN_COST);
13914   ins_encode %{
13915     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13916     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13917     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13918     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13919     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13920   %}
13921 
13922   ins_pipe(pipe_slow);
13923 %}
13924 
13925 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13926 %{
13927   match(If cmp (OverflowMulI op1 op2));
13928   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13929             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13930   effect(USE labl, KILL cr);
13931 
13932   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13933             "cmp   rscratch1, rscratch1, sxtw\n\t"
13934             "b$cmp   $labl" %}
13935   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13936   ins_encode %{
13937     Label* L = $labl$$label;
13938     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13939     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13940     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13941     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13942   %}
13943 
13944   ins_pipe(pipe_serial);
13945 %}
13946 
13947 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13948 %{
13949   match(Set cr (OverflowMulL op1 op2));
13950 
13951   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13952             "smulh rscratch2, $op1, $op2\n\t"
13953             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13954             "movw  rscratch1, #0x80000000\n\t"
13955             "cselw rscratch1, rscratch1, zr, NE\n\t"
13956             "cmpw  rscratch1, #1" %}
13957   ins_cost(6 * INSN_COST);
13958   ins_encode %{
13959     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13960     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13961     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13962     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13963     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13964     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13965   %}
13966 
13967   ins_pipe(pipe_slow);
13968 %}
13969 
13970 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13971 %{
13972   match(If cmp (OverflowMulL op1 op2));
13973   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13974             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13975   effect(USE labl, KILL cr);
13976 
13977   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13978             "smulh rscratch2, $op1, $op2\n\t"
13979             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13980             "b$cmp $labl" %}
13981   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13982   ins_encode %{
13983     Label* L = $labl$$label;
13984     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13985     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13986     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13987     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13988     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13989   %}
13990 
13991   ins_pipe(pipe_serial);
13992 %}
13993 
13994 // ============================================================================
13995 // Compare Instructions
13996 
13997 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13998 %{
13999   match(Set cr (CmpI op1 op2));
14000 
14001   effect(DEF cr, USE op1, USE op2);
14002 
14003   ins_cost(INSN_COST);
14004   format %{ "cmpw  $op1, $op2" %}
14005 
14006   ins_encode(aarch64_enc_cmpw(op1, op2));
14007 
14008   ins_pipe(icmp_reg_reg);
14009 %}
14010 
14011 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14012 %{
14013   match(Set cr (CmpI op1 zero));
14014 
14015   effect(DEF cr, USE op1);
14016 
14017   ins_cost(INSN_COST);
14018   format %{ "cmpw $op1, 0" %}
14019 
14020   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14021 
14022   ins_pipe(icmp_reg_imm);
14023 %}
14024 
14025 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14026 %{
14027   match(Set cr (CmpI op1 op2));
14028 
14029   effect(DEF cr, USE op1);
14030 
14031   ins_cost(INSN_COST);
14032   format %{ "cmpw  $op1, $op2" %}
14033 
14034   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14035 
14036   ins_pipe(icmp_reg_imm);
14037 %}
14038 
14039 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14040 %{
14041   match(Set cr (CmpI op1 op2));
14042 
14043   effect(DEF cr, USE op1);
14044 
14045   ins_cost(INSN_COST * 2);
14046   format %{ "cmpw  $op1, $op2" %}
14047 
14048   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14049 
14050   ins_pipe(icmp_reg_imm);
14051 %}
14052 
14053 // Unsigned compare Instructions; really, same as signed compare
14054 // except it should only be used to feed an If or a CMovI which takes a
14055 // cmpOpU.
14056 
14057 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14058 %{
14059   match(Set cr (CmpU op1 op2));
14060 
14061   effect(DEF cr, USE op1, USE op2);
14062 
14063   ins_cost(INSN_COST);
14064   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14065 
14066   ins_encode(aarch64_enc_cmpw(op1, op2));
14067 
14068   ins_pipe(icmp_reg_reg);
14069 %}
14070 
14071 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14072 %{
14073   match(Set cr (CmpU op1 zero));
14074 
14075   effect(DEF cr, USE op1);
14076 
14077   ins_cost(INSN_COST);
14078   format %{ "cmpw $op1, #0\t# unsigned" %}
14079 
14080   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14081 
14082   ins_pipe(icmp_reg_imm);
14083 %}
14084 
14085 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14086 %{
14087   match(Set cr (CmpU op1 op2));
14088 
14089   effect(DEF cr, USE op1);
14090 
14091   ins_cost(INSN_COST);
14092   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14093 
14094   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14095 
14096   ins_pipe(icmp_reg_imm);
14097 %}
14098 
14099 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14100 %{
14101   match(Set cr (CmpU op1 op2));
14102 
14103   effect(DEF cr, USE op1);
14104 
14105   ins_cost(INSN_COST * 2);
14106   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14107 
14108   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14109 
14110   ins_pipe(icmp_reg_imm);
14111 %}
14112 
14113 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14114 %{
14115   match(Set cr (CmpL op1 op2));
14116 
14117   effect(DEF cr, USE op1, USE op2);
14118 
14119   ins_cost(INSN_COST);
14120   format %{ "cmp  $op1, $op2" %}
14121 
14122   ins_encode(aarch64_enc_cmp(op1, op2));
14123 
14124   ins_pipe(icmp_reg_reg);
14125 %}
14126 
14127 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14128 %{
14129   match(Set cr (CmpL op1 zero));
14130 
14131   effect(DEF cr, USE op1);
14132 
14133   ins_cost(INSN_COST);
14134   format %{ "tst  $op1" %}
14135 
14136   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14137 
14138   ins_pipe(icmp_reg_imm);
14139 %}
14140 
14141 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14142 %{
14143   match(Set cr (CmpL op1 op2));
14144 
14145   effect(DEF cr, USE op1);
14146 
14147   ins_cost(INSN_COST);
14148   format %{ "cmp  $op1, $op2" %}
14149 
14150   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14151 
14152   ins_pipe(icmp_reg_imm);
14153 %}
14154 
14155 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14156 %{
14157   match(Set cr (CmpL op1 op2));
14158 
14159   effect(DEF cr, USE op1);
14160 
14161   ins_cost(INSN_COST * 2);
14162   format %{ "cmp  $op1, $op2" %}
14163 
14164   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14165 
14166   ins_pipe(icmp_reg_imm);
14167 %}
14168 
14169 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14170 %{
14171   match(Set cr (CmpUL op1 op2));
14172 
14173   effect(DEF cr, USE op1, USE op2);
14174 
14175   ins_cost(INSN_COST);
14176   format %{ "cmp  $op1, $op2" %}
14177 
14178   ins_encode(aarch64_enc_cmp(op1, op2));
14179 
14180   ins_pipe(icmp_reg_reg);
14181 %}
14182 
14183 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14184 %{
14185   match(Set cr (CmpUL op1 zero));
14186 
14187   effect(DEF cr, USE op1);
14188 
14189   ins_cost(INSN_COST);
14190   format %{ "tst  $op1" %}
14191 
14192   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14193 
14194   ins_pipe(icmp_reg_imm);
14195 %}
14196 
14197 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14198 %{
14199   match(Set cr (CmpUL op1 op2));
14200 
14201   effect(DEF cr, USE op1);
14202 
14203   ins_cost(INSN_COST);
14204   format %{ "cmp  $op1, $op2" %}
14205 
14206   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14207 
14208   ins_pipe(icmp_reg_imm);
14209 %}
14210 
14211 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14212 %{
14213   match(Set cr (CmpUL op1 op2));
14214 
14215   effect(DEF cr, USE op1);
14216 
14217   ins_cost(INSN_COST * 2);
14218   format %{ "cmp  $op1, $op2" %}
14219 
14220   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14221 
14222   ins_pipe(icmp_reg_imm);
14223 %}
14224 
14225 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14226 %{
14227   match(Set cr (CmpP op1 op2));
14228 
14229   effect(DEF cr, USE op1, USE op2);
14230 
14231   ins_cost(INSN_COST);
14232   format %{ "cmp  $op1, $op2\t // ptr" %}
14233 
14234   ins_encode(aarch64_enc_cmpp(op1, op2));
14235 
14236   ins_pipe(icmp_reg_reg);
14237 %}
14238 
14239 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14240 %{
14241   match(Set cr (CmpN op1 op2));
14242 
14243   effect(DEF cr, USE op1, USE op2);
14244 
14245   ins_cost(INSN_COST);
14246   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14247 
14248   ins_encode(aarch64_enc_cmpn(op1, op2));
14249 
14250   ins_pipe(icmp_reg_reg);
14251 %}
14252 
14253 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14254 %{
14255   match(Set cr (CmpP op1 zero));
14256 
14257   effect(DEF cr, USE op1, USE zero);
14258 
14259   ins_cost(INSN_COST);
14260   format %{ "cmp  $op1, 0\t // ptr" %}
14261 
14262   ins_encode(aarch64_enc_testp(op1));
14263 
14264   ins_pipe(icmp_reg_imm);
14265 %}
14266 
14267 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14268 %{
14269   match(Set cr (CmpN op1 zero));
14270 
14271   effect(DEF cr, USE op1, USE zero);
14272 
14273   ins_cost(INSN_COST);
14274   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14275 
14276   ins_encode(aarch64_enc_testn(op1));
14277 
14278   ins_pipe(icmp_reg_imm);
14279 %}
14280 
14281 // FP comparisons
14282 //
14283 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14284 // using normal cmpOp. See declaration of rFlagsReg for details.
14285 
14286 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14287 %{
14288   match(Set cr (CmpF src1 src2));
14289 
14290   ins_cost(3 * INSN_COST);
14291   format %{ "fcmps $src1, $src2" %}
14292 
14293   ins_encode %{
14294     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14295   %}
14296 
14297   ins_pipe(pipe_class_compare);
14298 %}
14299 
14300 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14301 %{
14302   match(Set cr (CmpF src1 src2));
14303 
14304   ins_cost(3 * INSN_COST);
14305   format %{ "fcmps $src1, 0.0" %}
14306 
14307   ins_encode %{
14308     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14309   %}
14310 
14311   ins_pipe(pipe_class_compare);
14312 %}
14313 // FROM HERE
14314 
14315 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14316 %{
14317   match(Set cr (CmpD src1 src2));
14318 
14319   ins_cost(3 * INSN_COST);
14320   format %{ "fcmpd $src1, $src2" %}
14321 
14322   ins_encode %{
14323     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14324   %}
14325 
14326   ins_pipe(pipe_class_compare);
14327 %}
14328 
14329 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14330 %{
14331   match(Set cr (CmpD src1 src2));
14332 
14333   ins_cost(3 * INSN_COST);
14334   format %{ "fcmpd $src1, 0.0" %}
14335 
14336   ins_encode %{
14337     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14338   %}
14339 
14340   ins_pipe(pipe_class_compare);
14341 %}
14342 
14343 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14344 %{
14345   match(Set dst (CmpF3 src1 src2));
14346   effect(KILL cr);
14347 
14348   ins_cost(5 * INSN_COST);
14349   format %{ "fcmps $src1, $src2\n\t"
14350             "csinvw($dst, zr, zr, eq\n\t"
14351             "csnegw($dst, $dst, $dst, lt)"
14352   %}
14353 
14354   ins_encode %{
14355     Label done;
14356     FloatRegister s1 = as_FloatRegister($src1$$reg);
14357     FloatRegister s2 = as_FloatRegister($src2$$reg);
14358     Register d = as_Register($dst$$reg);
14359     __ fcmps(s1, s2);
14360     // installs 0 if EQ else -1
14361     __ csinvw(d, zr, zr, Assembler::EQ);
14362     // keeps -1 if less or unordered else installs 1
14363     __ csnegw(d, d, d, Assembler::LT);
14364     __ bind(done);
14365   %}
14366 
14367   ins_pipe(pipe_class_default);
14368 
14369 %}
14370 
14371 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14372 %{
14373   match(Set dst (CmpD3 src1 src2));
14374   effect(KILL cr);
14375 
14376   ins_cost(5 * INSN_COST);
14377   format %{ "fcmpd $src1, $src2\n\t"
14378             "csinvw($dst, zr, zr, eq\n\t"
14379             "csnegw($dst, $dst, $dst, lt)"
14380   %}
14381 
14382   ins_encode %{
14383     Label done;
14384     FloatRegister s1 = as_FloatRegister($src1$$reg);
14385     FloatRegister s2 = as_FloatRegister($src2$$reg);
14386     Register d = as_Register($dst$$reg);
14387     __ fcmpd(s1, s2);
14388     // installs 0 if EQ else -1
14389     __ csinvw(d, zr, zr, Assembler::EQ);
14390     // keeps -1 if less or unordered else installs 1
14391     __ csnegw(d, d, d, Assembler::LT);
14392     __ bind(done);
14393   %}
14394   ins_pipe(pipe_class_default);
14395 
14396 %}
14397 
14398 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14399 %{
14400   match(Set dst (CmpF3 src1 zero));
14401   effect(KILL cr);
14402 
14403   ins_cost(5 * INSN_COST);
14404   format %{ "fcmps $src1, 0.0\n\t"
14405             "csinvw($dst, zr, zr, eq\n\t"
14406             "csnegw($dst, $dst, $dst, lt)"
14407   %}
14408 
14409   ins_encode %{
14410     Label done;
14411     FloatRegister s1 = as_FloatRegister($src1$$reg);
14412     Register d = as_Register($dst$$reg);
14413     __ fcmps(s1, 0.0);
14414     // installs 0 if EQ else -1
14415     __ csinvw(d, zr, zr, Assembler::EQ);
14416     // keeps -1 if less or unordered else installs 1
14417     __ csnegw(d, d, d, Assembler::LT);
14418     __ bind(done);
14419   %}
14420 
14421   ins_pipe(pipe_class_default);
14422 
14423 %}
14424 
14425 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14426 %{
14427   match(Set dst (CmpD3 src1 zero));
14428   effect(KILL cr);
14429 
14430   ins_cost(5 * INSN_COST);
14431   format %{ "fcmpd $src1, 0.0\n\t"
14432             "csinvw($dst, zr, zr, eq\n\t"
14433             "csnegw($dst, $dst, $dst, lt)"
14434   %}
14435 
14436   ins_encode %{
14437     Label done;
14438     FloatRegister s1 = as_FloatRegister($src1$$reg);
14439     Register d = as_Register($dst$$reg);
14440     __ fcmpd(s1, 0.0);
14441     // installs 0 if EQ else -1
14442     __ csinvw(d, zr, zr, Assembler::EQ);
14443     // keeps -1 if less or unordered else installs 1
14444     __ csnegw(d, d, d, Assembler::LT);
14445     __ bind(done);
14446   %}
14447   ins_pipe(pipe_class_default);
14448 
14449 %}
14450 
14451 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14452 %{
14453   match(Set dst (CmpLTMask p q));
14454   effect(KILL cr);
14455 
14456   ins_cost(3 * INSN_COST);
14457 
14458   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14459             "csetw $dst, lt\n\t"
14460             "subw $dst, zr, $dst"
14461   %}
14462 
14463   ins_encode %{
14464     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14465     __ csetw(as_Register($dst$$reg), Assembler::LT);
14466     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14467   %}
14468 
14469   ins_pipe(ialu_reg_reg);
14470 %}
14471 
14472 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14473 %{
14474   match(Set dst (CmpLTMask src zero));
14475   effect(KILL cr);
14476 
14477   ins_cost(INSN_COST);
14478 
14479   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14480 
14481   ins_encode %{
14482     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14483   %}
14484 
14485   ins_pipe(ialu_reg_shift);
14486 %}
14487 
14488 // ============================================================================
14489 // Max and Min
14490 
14491 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14492 %{
14493   effect( DEF dst, USE src1, USE src2, USE cr );
14494 
14495   ins_cost(INSN_COST * 2);
14496   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14497 
14498   ins_encode %{
14499     __ cselw(as_Register($dst$$reg),
14500              as_Register($src1$$reg),
14501              as_Register($src2$$reg),
14502              Assembler::LT);
14503   %}
14504 
14505   ins_pipe(icond_reg_reg);
14506 %}
14507 
14508 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14509 %{
14510   match(Set dst (MinI src1 src2));
14511   ins_cost(INSN_COST * 3);
14512 
14513   expand %{
14514     rFlagsReg cr;
14515     compI_reg_reg(cr, src1, src2);
14516     cmovI_reg_reg_lt(dst, src1, src2, cr);
14517   %}
14518 
14519 %}
14520 // FROM HERE
14521 
14522 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14523 %{
14524   effect( DEF dst, USE src1, USE src2, USE cr );
14525 
14526   ins_cost(INSN_COST * 2);
14527   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14528 
14529   ins_encode %{
14530     __ cselw(as_Register($dst$$reg),
14531              as_Register($src1$$reg),
14532              as_Register($src2$$reg),
14533              Assembler::GT);
14534   %}
14535 
14536   ins_pipe(icond_reg_reg);
14537 %}
14538 
14539 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14540 %{
14541   match(Set dst (MaxI src1 src2));
14542   ins_cost(INSN_COST * 3);
14543   expand %{
14544     rFlagsReg cr;
14545     compI_reg_reg(cr, src1, src2);
14546     cmovI_reg_reg_gt(dst, src1, src2, cr);
14547   %}
14548 %}
14549 
14550 // ============================================================================
14551 // Branch Instructions
14552 
14553 // Direct Branch.
14554 instruct branch(label lbl)
14555 %{
14556   match(Goto);
14557 
14558   effect(USE lbl);
14559 
14560   ins_cost(BRANCH_COST);
14561   format %{ "b  $lbl" %}
14562 
14563   ins_encode(aarch64_enc_b(lbl));
14564 
14565   ins_pipe(pipe_branch);
14566 %}
14567 
14568 // Conditional Near Branch
14569 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14570 %{
14571   // Same match rule as `branchConFar'.
14572   match(If cmp cr);
14573 
14574   effect(USE lbl);
14575 
14576   ins_cost(BRANCH_COST);
14577   // If set to 1 this indicates that the current instruction is a
14578   // short variant of a long branch. This avoids using this
14579   // instruction in first-pass matching. It will then only be used in
14580   // the `Shorten_branches' pass.
14581   // ins_short_branch(1);
14582   format %{ "b$cmp  $lbl" %}
14583 
14584   ins_encode(aarch64_enc_br_con(cmp, lbl));
14585 
14586   ins_pipe(pipe_branch_cond);
14587 %}
14588 
14589 // Conditional Near Branch Unsigned
14590 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14591 %{
14592   // Same match rule as `branchConFar'.
14593   match(If cmp cr);
14594 
14595   effect(USE lbl);
14596 
14597   ins_cost(BRANCH_COST);
14598   // If set to 1 this indicates that the current instruction is a
14599   // short variant of a long branch. This avoids using this
14600   // instruction in first-pass matching. It will then only be used in
14601   // the `Shorten_branches' pass.
14602   // ins_short_branch(1);
14603   format %{ "b$cmp  $lbl\t# unsigned" %}
14604 
14605   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14606 
14607   ins_pipe(pipe_branch_cond);
14608 %}
14609 
14610 // Make use of CBZ and CBNZ.  These instructions, as well as being
14611 // shorter than (cmp; branch), have the additional benefit of not
14612 // killing the flags.
14613 
14614 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14615   match(If cmp (CmpI op1 op2));
14616   effect(USE labl);
14617 
14618   ins_cost(BRANCH_COST);
14619   format %{ "cbw$cmp   $op1, $labl" %}
14620   ins_encode %{
14621     Label* L = $labl$$label;
14622     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14623     if (cond == Assembler::EQ)
14624       __ cbzw($op1$$Register, *L);
14625     else
14626       __ cbnzw($op1$$Register, *L);
14627   %}
14628   ins_pipe(pipe_cmp_branch);
14629 %}
14630 
14631 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14632   match(If cmp (CmpL op1 op2));
14633   effect(USE labl);
14634 
14635   ins_cost(BRANCH_COST);
14636   format %{ "cb$cmp   $op1, $labl" %}
14637   ins_encode %{
14638     Label* L = $labl$$label;
14639     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14640     if (cond == Assembler::EQ)
14641       __ cbz($op1$$Register, *L);
14642     else
14643       __ cbnz($op1$$Register, *L);
14644   %}
14645   ins_pipe(pipe_cmp_branch);
14646 %}
14647 
14648 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14649   match(If cmp (CmpP op1 op2));
14650   effect(USE labl);
14651 
14652   ins_cost(BRANCH_COST);
14653   format %{ "cb$cmp   $op1, $labl" %}
14654   ins_encode %{
14655     Label* L = $labl$$label;
14656     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14657     if (cond == Assembler::EQ)
14658       __ cbz($op1$$Register, *L);
14659     else
14660       __ cbnz($op1$$Register, *L);
14661   %}
14662   ins_pipe(pipe_cmp_branch);
14663 %}
14664 
14665 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14666   match(If cmp (CmpN op1 op2));
14667   effect(USE labl);
14668 
14669   ins_cost(BRANCH_COST);
14670   format %{ "cbw$cmp   $op1, $labl" %}
14671   ins_encode %{
14672     Label* L = $labl$$label;
14673     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14674     if (cond == Assembler::EQ)
14675       __ cbzw($op1$$Register, *L);
14676     else
14677       __ cbnzw($op1$$Register, *L);
14678   %}
14679   ins_pipe(pipe_cmp_branch);
14680 %}
14681 
14682 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14683   match(If cmp (CmpP (DecodeN oop) zero));
14684   effect(USE labl);
14685 
14686   ins_cost(BRANCH_COST);
14687   format %{ "cb$cmp   $oop, $labl" %}
14688   ins_encode %{
14689     Label* L = $labl$$label;
14690     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14691     if (cond == Assembler::EQ)
14692       __ cbzw($oop$$Register, *L);
14693     else
14694       __ cbnzw($oop$$Register, *L);
14695   %}
14696   ins_pipe(pipe_cmp_branch);
14697 %}
14698 
14699 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14700   match(If cmp (CmpU op1 op2));
14701   effect(USE labl);
14702 
14703   ins_cost(BRANCH_COST);
14704   format %{ "cbw$cmp   $op1, $labl" %}
14705   ins_encode %{
14706     Label* L = $labl$$label;
14707     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14708     if (cond == Assembler::EQ || cond == Assembler::LS)
14709       __ cbzw($op1$$Register, *L);
14710     else
14711       __ cbnzw($op1$$Register, *L);
14712   %}
14713   ins_pipe(pipe_cmp_branch);
14714 %}
14715 
14716 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14717   match(If cmp (CmpUL op1 op2));
14718   effect(USE labl);
14719 
14720   ins_cost(BRANCH_COST);
14721   format %{ "cb$cmp   $op1, $labl" %}
14722   ins_encode %{
14723     Label* L = $labl$$label;
14724     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14725     if (cond == Assembler::EQ || cond == Assembler::LS)
14726       __ cbz($op1$$Register, *L);
14727     else
14728       __ cbnz($op1$$Register, *L);
14729   %}
14730   ins_pipe(pipe_cmp_branch);
14731 %}
14732 
14733 // Test bit and Branch
14734 
14735 // Patterns for short (< 32KiB) variants
14736 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14737   match(If cmp (CmpL op1 op2));
14738   effect(USE labl);
14739 
14740   ins_cost(BRANCH_COST);
14741   format %{ "cb$cmp   $op1, $labl # long" %}
14742   ins_encode %{
14743     Label* L = $labl$$label;
14744     Assembler::Condition cond =
14745       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14746     __ tbr(cond, $op1$$Register, 63, *L);
14747   %}
14748   ins_pipe(pipe_cmp_branch);
14749   ins_short_branch(1);
14750 %}
14751 
14752 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14753   match(If cmp (CmpI op1 op2));
14754   effect(USE labl);
14755 
14756   ins_cost(BRANCH_COST);
14757   format %{ "cb$cmp   $op1, $labl # int" %}
14758   ins_encode %{
14759     Label* L = $labl$$label;
14760     Assembler::Condition cond =
14761       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14762     __ tbr(cond, $op1$$Register, 31, *L);
14763   %}
14764   ins_pipe(pipe_cmp_branch);
14765   ins_short_branch(1);
14766 %}
14767 
14768 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14769   match(If cmp (CmpL (AndL op1 op2) op3));
14770   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14771   effect(USE labl);
14772 
14773   ins_cost(BRANCH_COST);
14774   format %{ "tb$cmp   $op1, $op2, $labl" %}
14775   ins_encode %{
14776     Label* L = $labl$$label;
14777     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14778     int bit = exact_log2($op2$$constant);
14779     __ tbr(cond, $op1$$Register, bit, *L);
14780   %}
14781   ins_pipe(pipe_cmp_branch);
14782   ins_short_branch(1);
14783 %}
14784 
14785 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14786   match(If cmp (CmpI (AndI op1 op2) op3));
14787   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14788   effect(USE labl);
14789 
14790   ins_cost(BRANCH_COST);
14791   format %{ "tb$cmp   $op1, $op2, $labl" %}
14792   ins_encode %{
14793     Label* L = $labl$$label;
14794     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14795     int bit = exact_log2($op2$$constant);
14796     __ tbr(cond, $op1$$Register, bit, *L);
14797   %}
14798   ins_pipe(pipe_cmp_branch);
14799   ins_short_branch(1);
14800 %}
14801 
14802 // And far variants
14803 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14804   match(If cmp (CmpL op1 op2));
14805   effect(USE labl);
14806 
14807   ins_cost(BRANCH_COST);
14808   format %{ "cb$cmp   $op1, $labl # long" %}
14809   ins_encode %{
14810     Label* L = $labl$$label;
14811     Assembler::Condition cond =
14812       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14813     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14814   %}
14815   ins_pipe(pipe_cmp_branch);
14816 %}
14817 
14818 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14819   match(If cmp (CmpI op1 op2));
14820   effect(USE labl);
14821 
14822   ins_cost(BRANCH_COST);
14823   format %{ "cb$cmp   $op1, $labl # int" %}
14824   ins_encode %{
14825     Label* L = $labl$$label;
14826     Assembler::Condition cond =
14827       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14828     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14829   %}
14830   ins_pipe(pipe_cmp_branch);
14831 %}
14832 
14833 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14834   match(If cmp (CmpL (AndL op1 op2) op3));
14835   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14836   effect(USE labl);
14837 
14838   ins_cost(BRANCH_COST);
14839   format %{ "tb$cmp   $op1, $op2, $labl" %}
14840   ins_encode %{
14841     Label* L = $labl$$label;
14842     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14843     int bit = exact_log2($op2$$constant);
14844     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14845   %}
14846   ins_pipe(pipe_cmp_branch);
14847 %}
14848 
14849 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14850   match(If cmp (CmpI (AndI op1 op2) op3));
14851   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14852   effect(USE labl);
14853 
14854   ins_cost(BRANCH_COST);
14855   format %{ "tb$cmp   $op1, $op2, $labl" %}
14856   ins_encode %{
14857     Label* L = $labl$$label;
14858     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14859     int bit = exact_log2($op2$$constant);
14860     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14861   %}
14862   ins_pipe(pipe_cmp_branch);
14863 %}
14864 
14865 // Test bits
14866 
14867 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14868   match(Set cr (CmpL (AndL op1 op2) op3));
14869   predicate(Assembler::operand_valid_for_logical_immediate
14870             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14871 
14872   ins_cost(INSN_COST);
14873   format %{ "tst $op1, $op2 # long" %}
14874   ins_encode %{
14875     __ tst($op1$$Register, $op2$$constant);
14876   %}
14877   ins_pipe(ialu_reg_reg);
14878 %}
14879 
14880 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14881   match(Set cr (CmpI (AndI op1 op2) op3));
14882   predicate(Assembler::operand_valid_for_logical_immediate
14883             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14884 
14885   ins_cost(INSN_COST);
14886   format %{ "tst $op1, $op2 # int" %}
14887   ins_encode %{
14888     __ tstw($op1$$Register, $op2$$constant);
14889   %}
14890   ins_pipe(ialu_reg_reg);
14891 %}
14892 
14893 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14894   match(Set cr (CmpL (AndL op1 op2) op3));
14895 
14896   ins_cost(INSN_COST);
14897   format %{ "tst $op1, $op2 # long" %}
14898   ins_encode %{
14899     __ tst($op1$$Register, $op2$$Register);
14900   %}
14901   ins_pipe(ialu_reg_reg);
14902 %}
14903 
14904 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14905   match(Set cr (CmpI (AndI op1 op2) op3));
14906 
14907   ins_cost(INSN_COST);
14908   format %{ "tstw $op1, $op2 # int" %}
14909   ins_encode %{
14910     __ tstw($op1$$Register, $op2$$Register);
14911   %}
14912   ins_pipe(ialu_reg_reg);
14913 %}
14914 
14915 
14916 // Conditional Far Branch
14917 // Conditional Far Branch Unsigned
14918 // TODO: fixme
14919 
14920 // counted loop end branch near
14921 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14922 %{
14923   match(CountedLoopEnd cmp cr);
14924 
14925   effect(USE lbl);
14926 
14927   ins_cost(BRANCH_COST);
14928   // short variant.
14929   // ins_short_branch(1);
14930   format %{ "b$cmp $lbl \t// counted loop end" %}
14931 
14932   ins_encode(aarch64_enc_br_con(cmp, lbl));
14933 
14934   ins_pipe(pipe_branch);
14935 %}
14936 
14937 // counted loop end branch near Unsigned
14938 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14939 %{
14940   match(CountedLoopEnd cmp cr);
14941 
14942   effect(USE lbl);
14943 
14944   ins_cost(BRANCH_COST);
14945   // short variant.
14946   // ins_short_branch(1);
14947   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14948 
14949   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14950 
14951   ins_pipe(pipe_branch);
14952 %}
14953 
14954 // counted loop end branch far
14955 // counted loop end branch far unsigned
14956 // TODO: fixme
14957 
14958 // ============================================================================
14959 // inlined locking and unlocking
14960 
14961 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14962 %{
14963   match(Set cr (FastLock object box));
14964   effect(TEMP tmp, TEMP tmp2);
14965 
14966   // TODO
14967   // identify correct cost
14968   ins_cost(5 * INSN_COST);
14969   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14970 
14971   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14972 
14973   ins_pipe(pipe_serial);
14974 %}
14975 
14976 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14977 %{
14978   match(Set cr (FastUnlock object box));
14979   effect(TEMP tmp, TEMP tmp2);
14980 
14981   ins_cost(5 * INSN_COST);
14982   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14983 
14984   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14985 
14986   ins_pipe(pipe_serial);
14987 %}
14988 
14989 
14990 // ============================================================================
14991 // Safepoint Instructions
14992 
14993 // TODO
14994 // provide a near and far version of this code
14995 
14996 instruct safePoint(rFlagsReg cr, iRegP poll)
14997 %{
14998   match(SafePoint poll);
14999   effect(KILL cr);
15000 
15001   format %{
15002     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15003   %}
15004   ins_encode %{
15005     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15006   %}
15007   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15008 %}
15009 
15010 
15011 // ============================================================================
15012 // Procedure Call/Return Instructions
15013 
15014 // Call Java Static Instruction
15015 
15016 instruct CallStaticJavaDirect(method meth)
15017 %{
15018   match(CallStaticJava);
15019 
15020   effect(USE meth);
15021 
15022   ins_cost(CALL_COST);
15023 
15024   format %{ "call,static $meth \t// ==> " %}
15025 
15026   ins_encode( aarch64_enc_java_static_call(meth),
15027               aarch64_enc_call_epilog );
15028 
15029   ins_pipe(pipe_class_call);
15030 %}
15031 
15032 // TO HERE
15033 
15034 // Call Java Dynamic Instruction
15035 instruct CallDynamicJavaDirect(method meth)
15036 %{
15037   match(CallDynamicJava);
15038 
15039   effect(USE meth);
15040 
15041   ins_cost(CALL_COST);
15042 
15043   format %{ "CALL,dynamic $meth \t// ==> " %}
15044 
15045   ins_encode( aarch64_enc_java_dynamic_call(meth),
15046                aarch64_enc_call_epilog );
15047 
15048   ins_pipe(pipe_class_call);
15049 %}
15050 
15051 // Call Runtime Instruction
15052 
15053 instruct CallRuntimeDirect(method meth)
15054 %{
15055   match(CallRuntime);
15056 
15057   effect(USE meth);
15058 
15059   ins_cost(CALL_COST);
15060 
15061   format %{ "CALL, runtime $meth" %}
15062 
15063   ins_encode( aarch64_enc_java_to_runtime(meth) );
15064 
15065   ins_pipe(pipe_class_call);
15066 %}
15067 
15068 // Call Runtime Instruction
15069 
15070 instruct CallLeafDirect(method meth)
15071 %{
15072   match(CallLeaf);
15073 
15074   effect(USE meth);
15075 
15076   ins_cost(CALL_COST);
15077 
15078   format %{ "CALL, runtime leaf $meth" %}
15079 
15080   ins_encode( aarch64_enc_java_to_runtime(meth) );
15081 
15082   ins_pipe(pipe_class_call);
15083 %}
15084 
15085 // Call Runtime Instruction
15086 
15087 instruct CallLeafNoFPDirect(method meth)
15088 %{
15089   match(CallLeafNoFP);
15090 
15091   effect(USE meth);
15092 
15093   ins_cost(CALL_COST);
15094 
15095   format %{ "CALL, runtime leaf nofp $meth" %}
15096 
15097   ins_encode( aarch64_enc_java_to_runtime(meth) );
15098 
15099   ins_pipe(pipe_class_call);
15100 %}
15101 
15102 // Tail Call; Jump from runtime stub to Java code.
15103 // Also known as an 'interprocedural jump'.
15104 // Target of jump will eventually return to caller.
15105 // TailJump below removes the return address.
15106 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15107 %{
15108   match(TailCall jump_target method_oop);
15109 
15110   ins_cost(CALL_COST);
15111 
15112   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15113 
15114   ins_encode(aarch64_enc_tail_call(jump_target));
15115 
15116   ins_pipe(pipe_class_call);
15117 %}
15118 
15119 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15120 %{
15121   match(TailJump jump_target ex_oop);
15122 
15123   ins_cost(CALL_COST);
15124 
15125   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15126 
15127   ins_encode(aarch64_enc_tail_jmp(jump_target));
15128 
15129   ins_pipe(pipe_class_call);
15130 %}
15131 
15132 // Create exception oop: created by stack-crawling runtime code.
15133 // Created exception is now available to this handler, and is setup
15134 // just prior to jumping to this handler. No code emitted.
15135 // TODO check
15136 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15137 instruct CreateException(iRegP_R0 ex_oop)
15138 %{
15139   match(Set ex_oop (CreateEx));
15140 
15141   format %{ " -- \t// exception oop; no code emitted" %}
15142 
15143   size(0);
15144 
15145   ins_encode( /*empty*/ );
15146 
15147   ins_pipe(pipe_class_empty);
15148 %}
15149 
15150 // Rethrow exception: The exception oop will come in the first
15151 // argument position. Then JUMP (not call) to the rethrow stub code.
15152 instruct RethrowException() %{
15153   match(Rethrow);
15154   ins_cost(CALL_COST);
15155 
15156   format %{ "b rethrow_stub" %}
15157 
15158   ins_encode( aarch64_enc_rethrow() );
15159 
15160   ins_pipe(pipe_class_call);
15161 %}
15162 
15163 
15164 // Return Instruction
15165 // epilog node loads ret address into lr as part of frame pop
15166 instruct Ret()
15167 %{
15168   match(Return);
15169 
15170   format %{ "ret\t// return register" %}
15171 
15172   ins_encode( aarch64_enc_ret() );
15173 
15174   ins_pipe(pipe_branch);
15175 %}
15176 
15177 // Die now.
15178 instruct ShouldNotReachHere() %{
15179   match(Halt);
15180 
15181   ins_cost(CALL_COST);
15182   format %{ "ShouldNotReachHere" %}
15183 
15184   ins_encode %{
15185     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15186     // return true
15187     __ dpcs1(0xdead + 1);
15188   %}
15189 
15190   ins_pipe(pipe_class_default);
15191 %}
15192 
15193 // ============================================================================
15194 // Partial Subtype Check
15195 //
15196 // superklass array for an instance of the superklass.  Set a hidden
15197 // internal cache on a hit (cache is checked with exposed code in
15198 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15199 // encoding ALSO sets flags.
15200 
15201 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15202 %{
15203   match(Set result (PartialSubtypeCheck sub super));
15204   effect(KILL cr, KILL temp);
15205 
15206   ins_cost(1100);  // slightly larger than the next version
15207   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15208 
15209   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15210 
15211   opcode(0x1); // Force zero of result reg on hit
15212 
15213   ins_pipe(pipe_class_memory);
15214 %}
15215 
15216 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15217 %{
15218   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15219   effect(KILL temp, KILL result);
15220 
15221   ins_cost(1100);  // slightly larger than the next version
15222   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15223 
15224   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15225 
15226   opcode(0x0); // Don't zero result reg on hit
15227 
15228   ins_pipe(pipe_class_memory);
15229 %}
15230 
15231 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15232                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15233 %{
15234   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15235   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15236   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15237 
15238   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15239   ins_encode %{
15240     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15241     __ string_compare($str1$$Register, $str2$$Register,
15242                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15243                       $tmp1$$Register, $tmp2$$Register,
15244                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15245   %}
15246   ins_pipe(pipe_class_memory);
15247 %}
15248 
15249 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15250                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15251 %{
15252   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15253   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15254   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15255 
15256   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15257   ins_encode %{
15258     __ string_compare($str1$$Register, $str2$$Register,
15259                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15260                       $tmp1$$Register, $tmp2$$Register,
15261                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15262   %}
15263   ins_pipe(pipe_class_memory);
15264 %}
15265 
15266 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15267                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15268                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15269 %{
15270   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15271   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15272   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15273          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15274 
15275   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15276   ins_encode %{
15277     __ string_compare($str1$$Register, $str2$$Register,
15278                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15279                       $tmp1$$Register, $tmp2$$Register,
15280                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15281                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15282   %}
15283   ins_pipe(pipe_class_memory);
15284 %}
15285 
15286 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15287                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15288                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15289 %{
15290   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15291   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15292   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15293          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15294 
15295   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15296   ins_encode %{
15297     __ string_compare($str1$$Register, $str2$$Register,
15298                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15299                       $tmp1$$Register, $tmp2$$Register,
15300                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15301                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15302   %}
15303   ins_pipe(pipe_class_memory);
15304 %}
15305 
15306 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15307        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15308        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15309 %{
15310   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15311   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15312   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15313          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15314   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15315 
15316   ins_encode %{
15317     __ string_indexof($str1$$Register, $str2$$Register,
15318                       $cnt1$$Register, $cnt2$$Register,
15319                       $tmp1$$Register, $tmp2$$Register,
15320                       $tmp3$$Register, $tmp4$$Register,
15321                       $tmp5$$Register, $tmp6$$Register,
15322                       -1, $result$$Register, StrIntrinsicNode::UU);
15323   %}
15324   ins_pipe(pipe_class_memory);
15325 %}
15326 
15327 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15328        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15329        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15330 %{
15331   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15332   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15333   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15334          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15335   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15336 
15337   ins_encode %{
15338     __ string_indexof($str1$$Register, $str2$$Register,
15339                       $cnt1$$Register, $cnt2$$Register,
15340                       $tmp1$$Register, $tmp2$$Register,
15341                       $tmp3$$Register, $tmp4$$Register,
15342                       $tmp5$$Register, $tmp6$$Register,
15343                       -1, $result$$Register, StrIntrinsicNode::LL);
15344   %}
15345   ins_pipe(pipe_class_memory);
15346 %}
15347 
15348 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15349        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15350        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15351 %{
15352   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15353   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15354   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15355          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15356   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15357 
15358   ins_encode %{
15359     __ string_indexof($str1$$Register, $str2$$Register,
15360                       $cnt1$$Register, $cnt2$$Register,
15361                       $tmp1$$Register, $tmp2$$Register,
15362                       $tmp3$$Register, $tmp4$$Register,
15363                       $tmp5$$Register, $tmp6$$Register,
15364                       -1, $result$$Register, StrIntrinsicNode::UL);
15365   %}
15366   ins_pipe(pipe_class_memory);
15367 %}
15368 
15369 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15370                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15371                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15372 %{
15373   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15374   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15375   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15376          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15377   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15378 
15379   ins_encode %{
15380     int icnt2 = (int)$int_cnt2$$constant;
15381     __ string_indexof($str1$$Register, $str2$$Register,
15382                       $cnt1$$Register, zr,
15383                       $tmp1$$Register, $tmp2$$Register,
15384                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15385                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15386   %}
15387   ins_pipe(pipe_class_memory);
15388 %}
15389 
15390 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15391                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15392                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15393 %{
15394   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15395   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15396   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15397          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15398   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15399 
15400   ins_encode %{
15401     int icnt2 = (int)$int_cnt2$$constant;
15402     __ string_indexof($str1$$Register, $str2$$Register,
15403                       $cnt1$$Register, zr,
15404                       $tmp1$$Register, $tmp2$$Register,
15405                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15406                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15407   %}
15408   ins_pipe(pipe_class_memory);
15409 %}
15410 
15411 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15412                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15413                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15414 %{
15415   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15416   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15417   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15418          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15419   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15420 
15421   ins_encode %{
15422     int icnt2 = (int)$int_cnt2$$constant;
15423     __ string_indexof($str1$$Register, $str2$$Register,
15424                       $cnt1$$Register, zr,
15425                       $tmp1$$Register, $tmp2$$Register,
15426                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15427                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15428   %}
15429   ins_pipe(pipe_class_memory);
15430 %}
15431 
15432 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15433                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15434                               iRegINoSp tmp3, rFlagsReg cr)
15435 %{
15436   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15437   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15438          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15439 
15440   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15441 
15442   ins_encode %{
15443     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15444                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15445                            $tmp3$$Register);
15446   %}
15447   ins_pipe(pipe_class_memory);
15448 %}
15449 
15450 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15451                         iRegI_R0 result, rFlagsReg cr)
15452 %{
15453   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15454   match(Set result (StrEquals (Binary str1 str2) cnt));
15455   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15456 
15457   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15458   ins_encode %{
15459     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15460     __ string_equals($str1$$Register, $str2$$Register,
15461                      $result$$Register, $cnt$$Register, 1);
15462   %}
15463   ins_pipe(pipe_class_memory);
15464 %}
15465 
15466 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15467                         iRegI_R0 result, rFlagsReg cr)
15468 %{
15469   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15470   match(Set result (StrEquals (Binary str1 str2) cnt));
15471   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15472 
15473   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15474   ins_encode %{
15475     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15476     __ string_equals($str1$$Register, $str2$$Register,
15477                      $result$$Register, $cnt$$Register, 2);
15478   %}
15479   ins_pipe(pipe_class_memory);
15480 %}
15481 
15482 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15483                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15484                        iRegP_R10 tmp, rFlagsReg cr)
15485 %{
15486   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15487   match(Set result (AryEq ary1 ary2));
15488   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15489 
15490   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15491   ins_encode %{
15492     __ arrays_equals($ary1$$Register, $ary2$$Register,
15493                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15494                      $result$$Register, $tmp$$Register, 1);
15495     %}
15496   ins_pipe(pipe_class_memory);
15497 %}
15498 
15499 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15500                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15501                        iRegP_R10 tmp, rFlagsReg cr)
15502 %{
15503   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15504   match(Set result (AryEq ary1 ary2));
15505   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15506 
15507   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15508   ins_encode %{
15509     __ arrays_equals($ary1$$Register, $ary2$$Register,
15510                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15511                      $result$$Register, $tmp$$Register, 2);
15512   %}
15513   ins_pipe(pipe_class_memory);
15514 %}
15515 
15516 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15517 %{
15518   match(Set result (HasNegatives ary1 len));
15519   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15520   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15521   ins_encode %{
15522     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15523   %}
15524   ins_pipe( pipe_slow );
15525 %}
15526 
15527 // fast char[] to byte[] compression
15528 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15529                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15530                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15531                          iRegI_R0 result, rFlagsReg cr)
15532 %{
15533   match(Set result (StrCompressedCopy src (Binary dst len)));
15534   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15535 
15536   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15537   ins_encode %{
15538     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15539                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15540                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15541                            $result$$Register);
15542   %}
15543   ins_pipe( pipe_slow );
15544 %}
15545 
15546 // fast byte[] to char[] inflation
15547 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15548                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15549 %{
15550   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15551   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15552 
15553   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15554   ins_encode %{
15555     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15556                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15557   %}
15558   ins_pipe(pipe_class_memory);
15559 %}
15560 
15561 // encode char[] to byte[] in ISO_8859_1
15562 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15563                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15564                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15565                           iRegI_R0 result, rFlagsReg cr)
15566 %{
15567   match(Set result (EncodeISOArray src (Binary dst len)));
15568   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15569          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15570 
15571   format %{ "Encode array $src,$dst,$len -> $result" %}
15572   ins_encode %{
15573     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15574          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15575          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15576   %}
15577   ins_pipe( pipe_class_memory );
15578 %}
15579 
15580 // ============================================================================
15581 // This name is KNOWN by the ADLC and cannot be changed.
15582 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15583 // for this guy.
15584 instruct tlsLoadP(thread_RegP dst)
15585 %{
15586   match(Set dst (ThreadLocal));
15587 
15588   ins_cost(0);
15589 
15590   format %{ " -- \t// $dst=Thread::current(), empty" %}
15591 
15592   size(0);
15593 
15594   ins_encode( /*empty*/ );
15595 
15596   ins_pipe(pipe_class_empty);
15597 %}
15598 
15599 // ====================VECTOR INSTRUCTIONS=====================================
15600 
15601 // Load vector (32 bits)
15602 instruct loadV4(vecD dst, vmem4 mem)
15603 %{
15604   predicate(n->as_LoadVector()->memory_size() == 4);
15605   match(Set dst (LoadVector mem));
15606   ins_cost(4 * INSN_COST);
15607   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15608   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15609   ins_pipe(vload_reg_mem64);
15610 %}
15611 
15612 // Load vector (64 bits)
15613 instruct loadV8(vecD dst, vmem8 mem)
15614 %{
15615   predicate(n->as_LoadVector()->memory_size() == 8);
15616   match(Set dst (LoadVector mem));
15617   ins_cost(4 * INSN_COST);
15618   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15619   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15620   ins_pipe(vload_reg_mem64);
15621 %}
15622 
15623 // Load Vector (128 bits)
15624 instruct loadV16(vecX dst, vmem16 mem)
15625 %{
15626   predicate(n->as_LoadVector()->memory_size() == 16);
15627   match(Set dst (LoadVector mem));
15628   ins_cost(4 * INSN_COST);
15629   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15630   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15631   ins_pipe(vload_reg_mem128);
15632 %}
15633 
15634 // Store Vector (32 bits)
15635 instruct storeV4(vecD src, vmem4 mem)
15636 %{
15637   predicate(n->as_StoreVector()->memory_size() == 4);
15638   match(Set mem (StoreVector mem src));
15639   ins_cost(4 * INSN_COST);
15640   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15641   ins_encode( aarch64_enc_strvS(src, mem) );
15642   ins_pipe(vstore_reg_mem64);
15643 %}
15644 
15645 // Store Vector (64 bits)
15646 instruct storeV8(vecD src, vmem8 mem)
15647 %{
15648   predicate(n->as_StoreVector()->memory_size() == 8);
15649   match(Set mem (StoreVector mem src));
15650   ins_cost(4 * INSN_COST);
15651   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15652   ins_encode( aarch64_enc_strvD(src, mem) );
15653   ins_pipe(vstore_reg_mem64);
15654 %}
15655 
15656 // Store Vector (128 bits)
15657 instruct storeV16(vecX src, vmem16 mem)
15658 %{
15659   predicate(n->as_StoreVector()->memory_size() == 16);
15660   match(Set mem (StoreVector mem src));
15661   ins_cost(4 * INSN_COST);
15662   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15663   ins_encode( aarch64_enc_strvQ(src, mem) );
15664   ins_pipe(vstore_reg_mem128);
15665 %}
15666 
15667 instruct replicate8B(vecD dst, iRegIorL2I src)
15668 %{
15669   predicate(n->as_Vector()->length() == 4 ||
15670             n->as_Vector()->length() == 8);
15671   match(Set dst (ReplicateB src));
15672   ins_cost(INSN_COST);
15673   format %{ "dup  $dst, $src\t# vector (8B)" %}
15674   ins_encode %{
15675     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15676   %}
15677   ins_pipe(vdup_reg_reg64);
15678 %}
15679 
15680 instruct replicate16B(vecX dst, iRegIorL2I src)
15681 %{
15682   predicate(n->as_Vector()->length() == 16);
15683   match(Set dst (ReplicateB src));
15684   ins_cost(INSN_COST);
15685   format %{ "dup  $dst, $src\t# vector (16B)" %}
15686   ins_encode %{
15687     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15688   %}
15689   ins_pipe(vdup_reg_reg128);
15690 %}
15691 
15692 instruct replicate8B_imm(vecD dst, immI con)
15693 %{
15694   predicate(n->as_Vector()->length() == 4 ||
15695             n->as_Vector()->length() == 8);
15696   match(Set dst (ReplicateB con));
15697   ins_cost(INSN_COST);
15698   format %{ "movi  $dst, $con\t# vector(8B)" %}
15699   ins_encode %{
15700     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15701   %}
15702   ins_pipe(vmovi_reg_imm64);
15703 %}
15704 
15705 instruct replicate16B_imm(vecX dst, immI con)
15706 %{
15707   predicate(n->as_Vector()->length() == 16);
15708   match(Set dst (ReplicateB con));
15709   ins_cost(INSN_COST);
15710   format %{ "movi  $dst, $con\t# vector(16B)" %}
15711   ins_encode %{
15712     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15713   %}
15714   ins_pipe(vmovi_reg_imm128);
15715 %}
15716 
15717 instruct replicate4S(vecD dst, iRegIorL2I src)
15718 %{
15719   predicate(n->as_Vector()->length() == 2 ||
15720             n->as_Vector()->length() == 4);
15721   match(Set dst (ReplicateS src));
15722   ins_cost(INSN_COST);
15723   format %{ "dup  $dst, $src\t# vector (4S)" %}
15724   ins_encode %{
15725     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15726   %}
15727   ins_pipe(vdup_reg_reg64);
15728 %}
15729 
15730 instruct replicate8S(vecX dst, iRegIorL2I src)
15731 %{
15732   predicate(n->as_Vector()->length() == 8);
15733   match(Set dst (ReplicateS src));
15734   ins_cost(INSN_COST);
15735   format %{ "dup  $dst, $src\t# vector (8S)" %}
15736   ins_encode %{
15737     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15738   %}
15739   ins_pipe(vdup_reg_reg128);
15740 %}
15741 
15742 instruct replicate4S_imm(vecD dst, immI con)
15743 %{
15744   predicate(n->as_Vector()->length() == 2 ||
15745             n->as_Vector()->length() == 4);
15746   match(Set dst (ReplicateS con));
15747   ins_cost(INSN_COST);
15748   format %{ "movi  $dst, $con\t# vector(4H)" %}
15749   ins_encode %{
15750     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15751   %}
15752   ins_pipe(vmovi_reg_imm64);
15753 %}
15754 
15755 instruct replicate8S_imm(vecX dst, immI con)
15756 %{
15757   predicate(n->as_Vector()->length() == 8);
15758   match(Set dst (ReplicateS con));
15759   ins_cost(INSN_COST);
15760   format %{ "movi  $dst, $con\t# vector(8H)" %}
15761   ins_encode %{
15762     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15763   %}
15764   ins_pipe(vmovi_reg_imm128);
15765 %}
15766 
15767 instruct replicate2I(vecD dst, iRegIorL2I src)
15768 %{
15769   predicate(n->as_Vector()->length() == 2);
15770   match(Set dst (ReplicateI src));
15771   ins_cost(INSN_COST);
15772   format %{ "dup  $dst, $src\t# vector (2I)" %}
15773   ins_encode %{
15774     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15775   %}
15776   ins_pipe(vdup_reg_reg64);
15777 %}
15778 
15779 instruct replicate4I(vecX dst, iRegIorL2I src)
15780 %{
15781   predicate(n->as_Vector()->length() == 4);
15782   match(Set dst (ReplicateI src));
15783   ins_cost(INSN_COST);
15784   format %{ "dup  $dst, $src\t# vector (4I)" %}
15785   ins_encode %{
15786     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15787   %}
15788   ins_pipe(vdup_reg_reg128);
15789 %}
15790 
15791 instruct replicate2I_imm(vecD dst, immI con)
15792 %{
15793   predicate(n->as_Vector()->length() == 2);
15794   match(Set dst (ReplicateI con));
15795   ins_cost(INSN_COST);
15796   format %{ "movi  $dst, $con\t# vector(2I)" %}
15797   ins_encode %{
15798     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15799   %}
15800   ins_pipe(vmovi_reg_imm64);
15801 %}
15802 
15803 instruct replicate4I_imm(vecX dst, immI con)
15804 %{
15805   predicate(n->as_Vector()->length() == 4);
15806   match(Set dst (ReplicateI con));
15807   ins_cost(INSN_COST);
15808   format %{ "movi  $dst, $con\t# vector(4I)" %}
15809   ins_encode %{
15810     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15811   %}
15812   ins_pipe(vmovi_reg_imm128);
15813 %}
15814 
15815 instruct replicate2L(vecX dst, iRegL src)
15816 %{
15817   predicate(n->as_Vector()->length() == 2);
15818   match(Set dst (ReplicateL src));
15819   ins_cost(INSN_COST);
15820   format %{ "dup  $dst, $src\t# vector (2L)" %}
15821   ins_encode %{
15822     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15823   %}
15824   ins_pipe(vdup_reg_reg128);
15825 %}
15826 
15827 instruct replicate2L_zero(vecX dst, immI0 zero)
15828 %{
15829   predicate(n->as_Vector()->length() == 2);
15830   match(Set dst (ReplicateI zero));
15831   ins_cost(INSN_COST);
15832   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15833   ins_encode %{
15834     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15835            as_FloatRegister($dst$$reg),
15836            as_FloatRegister($dst$$reg));
15837   %}
15838   ins_pipe(vmovi_reg_imm128);
15839 %}
15840 
15841 instruct replicate2F(vecD dst, vRegF src)
15842 %{
15843   predicate(n->as_Vector()->length() == 2);
15844   match(Set dst (ReplicateF src));
15845   ins_cost(INSN_COST);
15846   format %{ "dup  $dst, $src\t# vector (2F)" %}
15847   ins_encode %{
15848     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15849            as_FloatRegister($src$$reg));
15850   %}
15851   ins_pipe(vdup_reg_freg64);
15852 %}
15853 
15854 instruct replicate4F(vecX dst, vRegF src)
15855 %{
15856   predicate(n->as_Vector()->length() == 4);
15857   match(Set dst (ReplicateF src));
15858   ins_cost(INSN_COST);
15859   format %{ "dup  $dst, $src\t# vector (4F)" %}
15860   ins_encode %{
15861     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15862            as_FloatRegister($src$$reg));
15863   %}
15864   ins_pipe(vdup_reg_freg128);
15865 %}
15866 
15867 instruct replicate2D(vecX dst, vRegD src)
15868 %{
15869   predicate(n->as_Vector()->length() == 2);
15870   match(Set dst (ReplicateD src));
15871   ins_cost(INSN_COST);
15872   format %{ "dup  $dst, $src\t# vector (2D)" %}
15873   ins_encode %{
15874     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15875            as_FloatRegister($src$$reg));
15876   %}
15877   ins_pipe(vdup_reg_dreg128);
15878 %}
15879 
15880 // ====================REDUCTION ARITHMETIC====================================
15881 
15882 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15883 %{
15884   match(Set dst (AddReductionVI src1 src2));
15885   ins_cost(INSN_COST);
15886   effect(TEMP tmp, TEMP tmp2);
15887   format %{ "umov  $tmp, $src2, S, 0\n\t"
15888             "umov  $tmp2, $src2, S, 1\n\t"
15889             "addw  $dst, $src1, $tmp\n\t"
15890             "addw  $dst, $dst, $tmp2\t add reduction2i"
15891   %}
15892   ins_encode %{
15893     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15894     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15895     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15896     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15897   %}
15898   ins_pipe(pipe_class_default);
15899 %}
15900 
15901 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15902 %{
15903   match(Set dst (AddReductionVI src1 src2));
15904   ins_cost(INSN_COST);
15905   effect(TEMP tmp, TEMP tmp2);
15906   format %{ "addv  $tmp, T4S, $src2\n\t"
15907             "umov  $tmp2, $tmp, S, 0\n\t"
15908             "addw  $dst, $tmp2, $src1\t add reduction4i"
15909   %}
15910   ins_encode %{
15911     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15912             as_FloatRegister($src2$$reg));
15913     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15914     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15915   %}
15916   ins_pipe(pipe_class_default);
15917 %}
15918 
15919 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15920 %{
15921   match(Set dst (MulReductionVI src1 src2));
15922   ins_cost(INSN_COST);
15923   effect(TEMP tmp, TEMP dst);
15924   format %{ "umov  $tmp, $src2, S, 0\n\t"
15925             "mul   $dst, $tmp, $src1\n\t"
15926             "umov  $tmp, $src2, S, 1\n\t"
15927             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15928   %}
15929   ins_encode %{
15930     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15931     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15932     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15933     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15934   %}
15935   ins_pipe(pipe_class_default);
15936 %}
15937 
15938 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15939 %{
15940   match(Set dst (MulReductionVI src1 src2));
15941   ins_cost(INSN_COST);
15942   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15943   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15944             "mul   $tmp, $tmp, $src2\n\t"
15945             "umov  $tmp2, $tmp, S, 0\n\t"
15946             "mul   $dst, $tmp2, $src1\n\t"
15947             "umov  $tmp2, $tmp, S, 1\n\t"
15948             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15949   %}
15950   ins_encode %{
15951     __ ins(as_FloatRegister($tmp$$reg), __ D,
15952            as_FloatRegister($src2$$reg), 0, 1);
15953     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15954            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15955     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15956     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15957     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15958     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15959   %}
15960   ins_pipe(pipe_class_default);
15961 %}
15962 
15963 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15964 %{
15965   match(Set dst (AddReductionVF src1 src2));
15966   ins_cost(INSN_COST);
15967   effect(TEMP tmp, TEMP dst);
15968   format %{ "fadds $dst, $src1, $src2\n\t"
15969             "ins   $tmp, S, $src2, 0, 1\n\t"
15970             "fadds $dst, $dst, $tmp\t add reduction2f"
15971   %}
15972   ins_encode %{
15973     __ fadds(as_FloatRegister($dst$$reg),
15974              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15975     __ ins(as_FloatRegister($tmp$$reg), __ S,
15976            as_FloatRegister($src2$$reg), 0, 1);
15977     __ fadds(as_FloatRegister($dst$$reg),
15978              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15979   %}
15980   ins_pipe(pipe_class_default);
15981 %}
15982 
15983 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15984 %{
15985   match(Set dst (AddReductionVF src1 src2));
15986   ins_cost(INSN_COST);
15987   effect(TEMP tmp, TEMP dst);
15988   format %{ "fadds $dst, $src1, $src2\n\t"
15989             "ins   $tmp, S, $src2, 0, 1\n\t"
15990             "fadds $dst, $dst, $tmp\n\t"
15991             "ins   $tmp, S, $src2, 0, 2\n\t"
15992             "fadds $dst, $dst, $tmp\n\t"
15993             "ins   $tmp, S, $src2, 0, 3\n\t"
15994             "fadds $dst, $dst, $tmp\t add reduction4f"
15995   %}
15996   ins_encode %{
15997     __ fadds(as_FloatRegister($dst$$reg),
15998              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15999     __ ins(as_FloatRegister($tmp$$reg), __ S,
16000            as_FloatRegister($src2$$reg), 0, 1);
16001     __ fadds(as_FloatRegister($dst$$reg),
16002              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16003     __ ins(as_FloatRegister($tmp$$reg), __ S,
16004            as_FloatRegister($src2$$reg), 0, 2);
16005     __ fadds(as_FloatRegister($dst$$reg),
16006              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16007     __ ins(as_FloatRegister($tmp$$reg), __ S,
16008            as_FloatRegister($src2$$reg), 0, 3);
16009     __ fadds(as_FloatRegister($dst$$reg),
16010              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16011   %}
16012   ins_pipe(pipe_class_default);
16013 %}
16014 
16015 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16016 %{
16017   match(Set dst (MulReductionVF src1 src2));
16018   ins_cost(INSN_COST);
16019   effect(TEMP tmp, TEMP dst);
16020   format %{ "fmuls $dst, $src1, $src2\n\t"
16021             "ins   $tmp, S, $src2, 0, 1\n\t"
16022             "fmuls $dst, $dst, $tmp\t add reduction4f"
16023   %}
16024   ins_encode %{
16025     __ fmuls(as_FloatRegister($dst$$reg),
16026              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16027     __ ins(as_FloatRegister($tmp$$reg), __ S,
16028            as_FloatRegister($src2$$reg), 0, 1);
16029     __ fmuls(as_FloatRegister($dst$$reg),
16030              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16031   %}
16032   ins_pipe(pipe_class_default);
16033 %}
16034 
16035 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16036 %{
16037   match(Set dst (MulReductionVF src1 src2));
16038   ins_cost(INSN_COST);
16039   effect(TEMP tmp, TEMP dst);
16040   format %{ "fmuls $dst, $src1, $src2\n\t"
16041             "ins   $tmp, S, $src2, 0, 1\n\t"
16042             "fmuls $dst, $dst, $tmp\n\t"
16043             "ins   $tmp, S, $src2, 0, 2\n\t"
16044             "fmuls $dst, $dst, $tmp\n\t"
16045             "ins   $tmp, S, $src2, 0, 3\n\t"
16046             "fmuls $dst, $dst, $tmp\t add reduction4f"
16047   %}
16048   ins_encode %{
16049     __ fmuls(as_FloatRegister($dst$$reg),
16050              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16051     __ ins(as_FloatRegister($tmp$$reg), __ S,
16052            as_FloatRegister($src2$$reg), 0, 1);
16053     __ fmuls(as_FloatRegister($dst$$reg),
16054              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16055     __ ins(as_FloatRegister($tmp$$reg), __ S,
16056            as_FloatRegister($src2$$reg), 0, 2);
16057     __ fmuls(as_FloatRegister($dst$$reg),
16058              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16059     __ ins(as_FloatRegister($tmp$$reg), __ S,
16060            as_FloatRegister($src2$$reg), 0, 3);
16061     __ fmuls(as_FloatRegister($dst$$reg),
16062              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16063   %}
16064   ins_pipe(pipe_class_default);
16065 %}
16066 
16067 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16068 %{
16069   match(Set dst (AddReductionVD src1 src2));
16070   ins_cost(INSN_COST);
16071   effect(TEMP tmp, TEMP dst);
16072   format %{ "faddd $dst, $src1, $src2\n\t"
16073             "ins   $tmp, D, $src2, 0, 1\n\t"
16074             "faddd $dst, $dst, $tmp\t add reduction2d"
16075   %}
16076   ins_encode %{
16077     __ faddd(as_FloatRegister($dst$$reg),
16078              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16079     __ ins(as_FloatRegister($tmp$$reg), __ D,
16080            as_FloatRegister($src2$$reg), 0, 1);
16081     __ faddd(as_FloatRegister($dst$$reg),
16082              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16083   %}
16084   ins_pipe(pipe_class_default);
16085 %}
16086 
16087 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16088 %{
16089   match(Set dst (MulReductionVD src1 src2));
16090   ins_cost(INSN_COST);
16091   effect(TEMP tmp, TEMP dst);
16092   format %{ "fmuld $dst, $src1, $src2\n\t"
16093             "ins   $tmp, D, $src2, 0, 1\n\t"
16094             "fmuld $dst, $dst, $tmp\t add reduction2d"
16095   %}
16096   ins_encode %{
16097     __ fmuld(as_FloatRegister($dst$$reg),
16098              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16099     __ ins(as_FloatRegister($tmp$$reg), __ D,
16100            as_FloatRegister($src2$$reg), 0, 1);
16101     __ fmuld(as_FloatRegister($dst$$reg),
16102              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16103   %}
16104   ins_pipe(pipe_class_default);
16105 %}
16106 
16107 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16108   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16109   match(Set dst (MaxReductionV src1 src2));
16110   ins_cost(INSN_COST);
16111   effect(TEMP_DEF dst, TEMP tmp);
16112   format %{ "fmaxs $dst, $src1, $src2\n\t"
16113             "ins   $tmp, S, $src2, 0, 1\n\t"
16114             "fmaxs $dst, $dst, $tmp\t max reduction2F" %}
16115   ins_encode %{
16116     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16117     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16118     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16119   %}
16120   ins_pipe(pipe_class_default);
16121 %}
16122 
16123 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
16124   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16125   match(Set dst (MaxReductionV src1 src2));
16126   ins_cost(INSN_COST);
16127   effect(TEMP_DEF dst);
16128   format %{ "fmaxv $dst, T4S, $src2\n\t"
16129             "fmaxs $dst, $dst, $src1\t max reduction4F" %}
16130   ins_encode %{
16131     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16132     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16133   %}
16134   ins_pipe(pipe_class_default);
16135 %}
16136 
16137 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16138   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16139   match(Set dst (MaxReductionV src1 src2));
16140   ins_cost(INSN_COST);
16141   effect(TEMP_DEF dst, TEMP tmp);
16142   format %{ "fmaxd $dst, $src1, $src2\n\t"
16143             "ins   $tmp, D, $src2, 0, 1\n\t"
16144             "fmaxd $dst, $dst, $tmp\t max reduction2D" %}
16145   ins_encode %{
16146     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16147     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16148     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16149   %}
16150   ins_pipe(pipe_class_default);
16151 %}
16152 
16153 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16154   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16155   match(Set dst (MinReductionV src1 src2));
16156   ins_cost(INSN_COST);
16157   effect(TEMP_DEF dst, TEMP tmp);
16158   format %{ "fmins $dst, $src1, $src2\n\t"
16159             "ins   $tmp, S, $src2, 0, 1\n\t"
16160             "fmins $dst, $dst, $tmp\t min reduction2F" %}
16161   ins_encode %{
16162     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16163     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16164     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16165   %}
16166   ins_pipe(pipe_class_default);
16167 %}
16168 
16169 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
16170   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16171   match(Set dst (MinReductionV src1 src2));
16172   ins_cost(INSN_COST);
16173   effect(TEMP_DEF dst);
16174   format %{ "fminv $dst, T4S, $src2\n\t"
16175             "fmins $dst, $dst, $src1\t min reduction4F" %}
16176   ins_encode %{
16177     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16178     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16179   %}
16180   ins_pipe(pipe_class_default);
16181 %}
16182 
16183 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16184   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16185   match(Set dst (MinReductionV src1 src2));
16186   ins_cost(INSN_COST);
16187   effect(TEMP_DEF dst, TEMP tmp);
16188   format %{ "fmind $dst, $src1, $src2\n\t"
16189             "ins   $tmp, D, $src2, 0, 1\n\t"
16190             "fmind $dst, $dst, $tmp\t min reduction2D" %}
16191   ins_encode %{
16192     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16193     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16194     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16195   %}
16196   ins_pipe(pipe_class_default);
16197 %}
16198 
16199 // ====================VECTOR ARITHMETIC=======================================
16200 
16201 // --------------------------------- ADD --------------------------------------
16202 
16203 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16204 %{
16205   predicate(n->as_Vector()->length() == 4 ||
16206             n->as_Vector()->length() == 8);
16207   match(Set dst (AddVB src1 src2));
16208   ins_cost(INSN_COST);
16209   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16210   ins_encode %{
16211     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16212             as_FloatRegister($src1$$reg),
16213             as_FloatRegister($src2$$reg));
16214   %}
16215   ins_pipe(vdop64);
16216 %}
16217 
16218 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16219 %{
16220   predicate(n->as_Vector()->length() == 16);
16221   match(Set dst (AddVB src1 src2));
16222   ins_cost(INSN_COST);
16223   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16224   ins_encode %{
16225     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16226             as_FloatRegister($src1$$reg),
16227             as_FloatRegister($src2$$reg));
16228   %}
16229   ins_pipe(vdop128);
16230 %}
16231 
16232 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16233 %{
16234   predicate(n->as_Vector()->length() == 2 ||
16235             n->as_Vector()->length() == 4);
16236   match(Set dst (AddVS src1 src2));
16237   ins_cost(INSN_COST);
16238   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16239   ins_encode %{
16240     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16241             as_FloatRegister($src1$$reg),
16242             as_FloatRegister($src2$$reg));
16243   %}
16244   ins_pipe(vdop64);
16245 %}
16246 
16247 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16248 %{
16249   predicate(n->as_Vector()->length() == 8);
16250   match(Set dst (AddVS src1 src2));
16251   ins_cost(INSN_COST);
16252   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16253   ins_encode %{
16254     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16255             as_FloatRegister($src1$$reg),
16256             as_FloatRegister($src2$$reg));
16257   %}
16258   ins_pipe(vdop128);
16259 %}
16260 
16261 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16262 %{
16263   predicate(n->as_Vector()->length() == 2);
16264   match(Set dst (AddVI src1 src2));
16265   ins_cost(INSN_COST);
16266   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16267   ins_encode %{
16268     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16269             as_FloatRegister($src1$$reg),
16270             as_FloatRegister($src2$$reg));
16271   %}
16272   ins_pipe(vdop64);
16273 %}
16274 
16275 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16276 %{
16277   predicate(n->as_Vector()->length() == 4);
16278   match(Set dst (AddVI src1 src2));
16279   ins_cost(INSN_COST);
16280   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16281   ins_encode %{
16282     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16283             as_FloatRegister($src1$$reg),
16284             as_FloatRegister($src2$$reg));
16285   %}
16286   ins_pipe(vdop128);
16287 %}
16288 
16289 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16290 %{
16291   predicate(n->as_Vector()->length() == 2);
16292   match(Set dst (AddVL src1 src2));
16293   ins_cost(INSN_COST);
16294   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16295   ins_encode %{
16296     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16297             as_FloatRegister($src1$$reg),
16298             as_FloatRegister($src2$$reg));
16299   %}
16300   ins_pipe(vdop128);
16301 %}
16302 
16303 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16304 %{
16305   predicate(n->as_Vector()->length() == 2);
16306   match(Set dst (AddVF src1 src2));
16307   ins_cost(INSN_COST);
16308   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16309   ins_encode %{
16310     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16311             as_FloatRegister($src1$$reg),
16312             as_FloatRegister($src2$$reg));
16313   %}
16314   ins_pipe(vdop_fp64);
16315 %}
16316 
16317 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16318 %{
16319   predicate(n->as_Vector()->length() == 4);
16320   match(Set dst (AddVF src1 src2));
16321   ins_cost(INSN_COST);
16322   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16323   ins_encode %{
16324     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16325             as_FloatRegister($src1$$reg),
16326             as_FloatRegister($src2$$reg));
16327   %}
16328   ins_pipe(vdop_fp128);
16329 %}
16330 
16331 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16332 %{
16333   match(Set dst (AddVD src1 src2));
16334   ins_cost(INSN_COST);
16335   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16336   ins_encode %{
16337     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16338             as_FloatRegister($src1$$reg),
16339             as_FloatRegister($src2$$reg));
16340   %}
16341   ins_pipe(vdop_fp128);
16342 %}
16343 
16344 // --------------------------------- SUB --------------------------------------
16345 
16346 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16347 %{
16348   predicate(n->as_Vector()->length() == 4 ||
16349             n->as_Vector()->length() == 8);
16350   match(Set dst (SubVB src1 src2));
16351   ins_cost(INSN_COST);
16352   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16353   ins_encode %{
16354     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16355             as_FloatRegister($src1$$reg),
16356             as_FloatRegister($src2$$reg));
16357   %}
16358   ins_pipe(vdop64);
16359 %}
16360 
16361 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16362 %{
16363   predicate(n->as_Vector()->length() == 16);
16364   match(Set dst (SubVB src1 src2));
16365   ins_cost(INSN_COST);
16366   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16367   ins_encode %{
16368     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16369             as_FloatRegister($src1$$reg),
16370             as_FloatRegister($src2$$reg));
16371   %}
16372   ins_pipe(vdop128);
16373 %}
16374 
16375 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16376 %{
16377   predicate(n->as_Vector()->length() == 2 ||
16378             n->as_Vector()->length() == 4);
16379   match(Set dst (SubVS src1 src2));
16380   ins_cost(INSN_COST);
16381   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16382   ins_encode %{
16383     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16384             as_FloatRegister($src1$$reg),
16385             as_FloatRegister($src2$$reg));
16386   %}
16387   ins_pipe(vdop64);
16388 %}
16389 
16390 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16391 %{
16392   predicate(n->as_Vector()->length() == 8);
16393   match(Set dst (SubVS src1 src2));
16394   ins_cost(INSN_COST);
16395   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16396   ins_encode %{
16397     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16398             as_FloatRegister($src1$$reg),
16399             as_FloatRegister($src2$$reg));
16400   %}
16401   ins_pipe(vdop128);
16402 %}
16403 
16404 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16405 %{
16406   predicate(n->as_Vector()->length() == 2);
16407   match(Set dst (SubVI src1 src2));
16408   ins_cost(INSN_COST);
16409   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16410   ins_encode %{
16411     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16412             as_FloatRegister($src1$$reg),
16413             as_FloatRegister($src2$$reg));
16414   %}
16415   ins_pipe(vdop64);
16416 %}
16417 
16418 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16419 %{
16420   predicate(n->as_Vector()->length() == 4);
16421   match(Set dst (SubVI src1 src2));
16422   ins_cost(INSN_COST);
16423   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16424   ins_encode %{
16425     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16426             as_FloatRegister($src1$$reg),
16427             as_FloatRegister($src2$$reg));
16428   %}
16429   ins_pipe(vdop128);
16430 %}
16431 
16432 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16433 %{
16434   predicate(n->as_Vector()->length() == 2);
16435   match(Set dst (SubVL src1 src2));
16436   ins_cost(INSN_COST);
16437   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16438   ins_encode %{
16439     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16440             as_FloatRegister($src1$$reg),
16441             as_FloatRegister($src2$$reg));
16442   %}
16443   ins_pipe(vdop128);
16444 %}
16445 
16446 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16447 %{
16448   predicate(n->as_Vector()->length() == 2);
16449   match(Set dst (SubVF src1 src2));
16450   ins_cost(INSN_COST);
16451   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16452   ins_encode %{
16453     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16454             as_FloatRegister($src1$$reg),
16455             as_FloatRegister($src2$$reg));
16456   %}
16457   ins_pipe(vdop_fp64);
16458 %}
16459 
16460 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16461 %{
16462   predicate(n->as_Vector()->length() == 4);
16463   match(Set dst (SubVF src1 src2));
16464   ins_cost(INSN_COST);
16465   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16466   ins_encode %{
16467     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16468             as_FloatRegister($src1$$reg),
16469             as_FloatRegister($src2$$reg));
16470   %}
16471   ins_pipe(vdop_fp128);
16472 %}
16473 
16474 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16475 %{
16476   predicate(n->as_Vector()->length() == 2);
16477   match(Set dst (SubVD src1 src2));
16478   ins_cost(INSN_COST);
16479   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16480   ins_encode %{
16481     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16482             as_FloatRegister($src1$$reg),
16483             as_FloatRegister($src2$$reg));
16484   %}
16485   ins_pipe(vdop_fp128);
16486 %}
16487 
16488 // --------------------------------- MUL --------------------------------------
16489 
16490 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16491 %{
16492   predicate(n->as_Vector()->length() == 2 ||
16493             n->as_Vector()->length() == 4);
16494   match(Set dst (MulVS src1 src2));
16495   ins_cost(INSN_COST);
16496   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16497   ins_encode %{
16498     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16499             as_FloatRegister($src1$$reg),
16500             as_FloatRegister($src2$$reg));
16501   %}
16502   ins_pipe(vmul64);
16503 %}
16504 
16505 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16506 %{
16507   predicate(n->as_Vector()->length() == 8);
16508   match(Set dst (MulVS src1 src2));
16509   ins_cost(INSN_COST);
16510   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16511   ins_encode %{
16512     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16513             as_FloatRegister($src1$$reg),
16514             as_FloatRegister($src2$$reg));
16515   %}
16516   ins_pipe(vmul128);
16517 %}
16518 
16519 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16520 %{
16521   predicate(n->as_Vector()->length() == 2);
16522   match(Set dst (MulVI src1 src2));
16523   ins_cost(INSN_COST);
16524   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16525   ins_encode %{
16526     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16527             as_FloatRegister($src1$$reg),
16528             as_FloatRegister($src2$$reg));
16529   %}
16530   ins_pipe(vmul64);
16531 %}
16532 
16533 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16534 %{
16535   predicate(n->as_Vector()->length() == 4);
16536   match(Set dst (MulVI src1 src2));
16537   ins_cost(INSN_COST);
16538   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16539   ins_encode %{
16540     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16541             as_FloatRegister($src1$$reg),
16542             as_FloatRegister($src2$$reg));
16543   %}
16544   ins_pipe(vmul128);
16545 %}
16546 
16547 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16548 %{
16549   predicate(n->as_Vector()->length() == 2);
16550   match(Set dst (MulVF src1 src2));
16551   ins_cost(INSN_COST);
16552   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16553   ins_encode %{
16554     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16555             as_FloatRegister($src1$$reg),
16556             as_FloatRegister($src2$$reg));
16557   %}
16558   ins_pipe(vmuldiv_fp64);
16559 %}
16560 
16561 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16562 %{
16563   predicate(n->as_Vector()->length() == 4);
16564   match(Set dst (MulVF src1 src2));
16565   ins_cost(INSN_COST);
16566   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16567   ins_encode %{
16568     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16569             as_FloatRegister($src1$$reg),
16570             as_FloatRegister($src2$$reg));
16571   %}
16572   ins_pipe(vmuldiv_fp128);
16573 %}
16574 
16575 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16576 %{
16577   predicate(n->as_Vector()->length() == 2);
16578   match(Set dst (MulVD src1 src2));
16579   ins_cost(INSN_COST);
16580   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16581   ins_encode %{
16582     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16583             as_FloatRegister($src1$$reg),
16584             as_FloatRegister($src2$$reg));
16585   %}
16586   ins_pipe(vmuldiv_fp128);
16587 %}
16588 
16589 // --------------------------------- MLA --------------------------------------
16590 
16591 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16592 %{
16593   predicate(n->as_Vector()->length() == 2 ||
16594             n->as_Vector()->length() == 4);
16595   match(Set dst (AddVS dst (MulVS src1 src2)));
16596   ins_cost(INSN_COST);
16597   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16598   ins_encode %{
16599     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16600             as_FloatRegister($src1$$reg),
16601             as_FloatRegister($src2$$reg));
16602   %}
16603   ins_pipe(vmla64);
16604 %}
16605 
16606 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16607 %{
16608   predicate(n->as_Vector()->length() == 8);
16609   match(Set dst (AddVS dst (MulVS src1 src2)));
16610   ins_cost(INSN_COST);
16611   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16612   ins_encode %{
16613     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16614             as_FloatRegister($src1$$reg),
16615             as_FloatRegister($src2$$reg));
16616   %}
16617   ins_pipe(vmla128);
16618 %}
16619 
16620 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16621 %{
16622   predicate(n->as_Vector()->length() == 2);
16623   match(Set dst (AddVI dst (MulVI src1 src2)));
16624   ins_cost(INSN_COST);
16625   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16626   ins_encode %{
16627     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16628             as_FloatRegister($src1$$reg),
16629             as_FloatRegister($src2$$reg));
16630   %}
16631   ins_pipe(vmla64);
16632 %}
16633 
16634 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16635 %{
16636   predicate(n->as_Vector()->length() == 4);
16637   match(Set dst (AddVI dst (MulVI src1 src2)));
16638   ins_cost(INSN_COST);
16639   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16640   ins_encode %{
16641     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16642             as_FloatRegister($src1$$reg),
16643             as_FloatRegister($src2$$reg));
16644   %}
16645   ins_pipe(vmla128);
16646 %}
16647 
16648 // dst + src1 * src2
16649 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16650   predicate(UseFMA && n->as_Vector()->length() == 2);
16651   match(Set dst (FmaVF  dst (Binary src1 src2)));
16652   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16653   ins_cost(INSN_COST);
16654   ins_encode %{
16655     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16656             as_FloatRegister($src1$$reg),
16657             as_FloatRegister($src2$$reg));
16658   %}
16659   ins_pipe(vmuldiv_fp64);
16660 %}
16661 
16662 // dst + src1 * src2
16663 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16664   predicate(UseFMA && n->as_Vector()->length() == 4);
16665   match(Set dst (FmaVF  dst (Binary src1 src2)));
16666   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16667   ins_cost(INSN_COST);
16668   ins_encode %{
16669     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16670             as_FloatRegister($src1$$reg),
16671             as_FloatRegister($src2$$reg));
16672   %}
16673   ins_pipe(vmuldiv_fp128);
16674 %}
16675 
16676 // dst + src1 * src2
16677 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16678   predicate(UseFMA && n->as_Vector()->length() == 2);
16679   match(Set dst (FmaVD  dst (Binary src1 src2)));
16680   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16681   ins_cost(INSN_COST);
16682   ins_encode %{
16683     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16684             as_FloatRegister($src1$$reg),
16685             as_FloatRegister($src2$$reg));
16686   %}
16687   ins_pipe(vmuldiv_fp128);
16688 %}
16689 
16690 // --------------------------------- MLS --------------------------------------
16691 
16692 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16693 %{
16694   predicate(n->as_Vector()->length() == 2 ||
16695             n->as_Vector()->length() == 4);
16696   match(Set dst (SubVS dst (MulVS src1 src2)));
16697   ins_cost(INSN_COST);
16698   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16699   ins_encode %{
16700     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16701             as_FloatRegister($src1$$reg),
16702             as_FloatRegister($src2$$reg));
16703   %}
16704   ins_pipe(vmla64);
16705 %}
16706 
16707 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16708 %{
16709   predicate(n->as_Vector()->length() == 8);
16710   match(Set dst (SubVS dst (MulVS src1 src2)));
16711   ins_cost(INSN_COST);
16712   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16713   ins_encode %{
16714     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16715             as_FloatRegister($src1$$reg),
16716             as_FloatRegister($src2$$reg));
16717   %}
16718   ins_pipe(vmla128);
16719 %}
16720 
16721 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16722 %{
16723   predicate(n->as_Vector()->length() == 2);
16724   match(Set dst (SubVI dst (MulVI src1 src2)));
16725   ins_cost(INSN_COST);
16726   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16727   ins_encode %{
16728     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16729             as_FloatRegister($src1$$reg),
16730             as_FloatRegister($src2$$reg));
16731   %}
16732   ins_pipe(vmla64);
16733 %}
16734 
16735 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16736 %{
16737   predicate(n->as_Vector()->length() == 4);
16738   match(Set dst (SubVI dst (MulVI src1 src2)));
16739   ins_cost(INSN_COST);
16740   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16741   ins_encode %{
16742     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16743             as_FloatRegister($src1$$reg),
16744             as_FloatRegister($src2$$reg));
16745   %}
16746   ins_pipe(vmla128);
16747 %}
16748 
16749 // dst - src1 * src2
16750 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16751   predicate(UseFMA && n->as_Vector()->length() == 2);
16752   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16753   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16754   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16755   ins_cost(INSN_COST);
16756   ins_encode %{
16757     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16758             as_FloatRegister($src1$$reg),
16759             as_FloatRegister($src2$$reg));
16760   %}
16761   ins_pipe(vmuldiv_fp64);
16762 %}
16763 
16764 // dst - src1 * src2
16765 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16766   predicate(UseFMA && n->as_Vector()->length() == 4);
16767   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16768   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16769   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16770   ins_cost(INSN_COST);
16771   ins_encode %{
16772     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16773             as_FloatRegister($src1$$reg),
16774             as_FloatRegister($src2$$reg));
16775   %}
16776   ins_pipe(vmuldiv_fp128);
16777 %}
16778 
16779 // dst - src1 * src2
16780 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16781   predicate(UseFMA && n->as_Vector()->length() == 2);
16782   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16783   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16784   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16785   ins_cost(INSN_COST);
16786   ins_encode %{
16787     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16788             as_FloatRegister($src1$$reg),
16789             as_FloatRegister($src2$$reg));
16790   %}
16791   ins_pipe(vmuldiv_fp128);
16792 %}
16793 
16794 // --------------------------------- DIV --------------------------------------
16795 
16796 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16797 %{
16798   predicate(n->as_Vector()->length() == 2);
16799   match(Set dst (DivVF src1 src2));
16800   ins_cost(INSN_COST);
16801   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16802   ins_encode %{
16803     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16804             as_FloatRegister($src1$$reg),
16805             as_FloatRegister($src2$$reg));
16806   %}
16807   ins_pipe(vmuldiv_fp64);
16808 %}
16809 
16810 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16811 %{
16812   predicate(n->as_Vector()->length() == 4);
16813   match(Set dst (DivVF src1 src2));
16814   ins_cost(INSN_COST);
16815   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16816   ins_encode %{
16817     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16818             as_FloatRegister($src1$$reg),
16819             as_FloatRegister($src2$$reg));
16820   %}
16821   ins_pipe(vmuldiv_fp128);
16822 %}
16823 
16824 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16825 %{
16826   predicate(n->as_Vector()->length() == 2);
16827   match(Set dst (DivVD src1 src2));
16828   ins_cost(INSN_COST);
16829   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16830   ins_encode %{
16831     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16832             as_FloatRegister($src1$$reg),
16833             as_FloatRegister($src2$$reg));
16834   %}
16835   ins_pipe(vmuldiv_fp128);
16836 %}
16837 
16838 // --------------------------------- SQRT -------------------------------------
16839 
16840 instruct vsqrt2D(vecX dst, vecX src)
16841 %{
16842   predicate(n->as_Vector()->length() == 2);
16843   match(Set dst (SqrtVD src));
16844   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16845   ins_encode %{
16846     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16847              as_FloatRegister($src$$reg));
16848   %}
16849   ins_pipe(vsqrt_fp128);
16850 %}
16851 
16852 // --------------------------------- ABS --------------------------------------
16853 
16854 instruct vabs2F(vecD dst, vecD src)
16855 %{
16856   predicate(n->as_Vector()->length() == 2);
16857   match(Set dst (AbsVF src));
16858   ins_cost(INSN_COST * 3);
16859   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16860   ins_encode %{
16861     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16862             as_FloatRegister($src$$reg));
16863   %}
16864   ins_pipe(vunop_fp64);
16865 %}
16866 
16867 instruct vabs4F(vecX dst, vecX src)
16868 %{
16869   predicate(n->as_Vector()->length() == 4);
16870   match(Set dst (AbsVF src));
16871   ins_cost(INSN_COST * 3);
16872   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16873   ins_encode %{
16874     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16875             as_FloatRegister($src$$reg));
16876   %}
16877   ins_pipe(vunop_fp128);
16878 %}
16879 
16880 instruct vabs2D(vecX dst, vecX src)
16881 %{
16882   predicate(n->as_Vector()->length() == 2);
16883   match(Set dst (AbsVD src));
16884   ins_cost(INSN_COST * 3);
16885   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16886   ins_encode %{
16887     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16888             as_FloatRegister($src$$reg));
16889   %}
16890   ins_pipe(vunop_fp128);
16891 %}
16892 
16893 // --------------------------------- NEG --------------------------------------
16894 
16895 instruct vneg2F(vecD dst, vecD src)
16896 %{
16897   predicate(n->as_Vector()->length() == 2);
16898   match(Set dst (NegVF src));
16899   ins_cost(INSN_COST * 3);
16900   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16901   ins_encode %{
16902     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16903             as_FloatRegister($src$$reg));
16904   %}
16905   ins_pipe(vunop_fp64);
16906 %}
16907 
16908 instruct vneg4F(vecX dst, vecX src)
16909 %{
16910   predicate(n->as_Vector()->length() == 4);
16911   match(Set dst (NegVF src));
16912   ins_cost(INSN_COST * 3);
16913   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16914   ins_encode %{
16915     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16916             as_FloatRegister($src$$reg));
16917   %}
16918   ins_pipe(vunop_fp128);
16919 %}
16920 
16921 instruct vneg2D(vecX dst, vecX src)
16922 %{
16923   predicate(n->as_Vector()->length() == 2);
16924   match(Set dst (NegVD src));
16925   ins_cost(INSN_COST * 3);
16926   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16927   ins_encode %{
16928     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16929             as_FloatRegister($src$$reg));
16930   %}
16931   ins_pipe(vunop_fp128);
16932 %}
16933 
16934 // --------------------------------- AND --------------------------------------
16935 
16936 instruct vand8B(vecD dst, vecD src1, vecD src2)
16937 %{
16938   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16939             n->as_Vector()->length_in_bytes() == 8);
16940   match(Set dst (AndV src1 src2));
16941   ins_cost(INSN_COST);
16942   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16943   ins_encode %{
16944     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16945             as_FloatRegister($src1$$reg),
16946             as_FloatRegister($src2$$reg));
16947   %}
16948   ins_pipe(vlogical64);
16949 %}
16950 
16951 instruct vand16B(vecX dst, vecX src1, vecX src2)
16952 %{
16953   predicate(n->as_Vector()->length_in_bytes() == 16);
16954   match(Set dst (AndV src1 src2));
16955   ins_cost(INSN_COST);
16956   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16957   ins_encode %{
16958     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16959             as_FloatRegister($src1$$reg),
16960             as_FloatRegister($src2$$reg));
16961   %}
16962   ins_pipe(vlogical128);
16963 %}
16964 
16965 // --------------------------------- OR ---------------------------------------
16966 
16967 instruct vor8B(vecD dst, vecD src1, vecD src2)
16968 %{
16969   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16970             n->as_Vector()->length_in_bytes() == 8);
16971   match(Set dst (OrV src1 src2));
16972   ins_cost(INSN_COST);
16973   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16974   ins_encode %{
16975     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16976             as_FloatRegister($src1$$reg),
16977             as_FloatRegister($src2$$reg));
16978   %}
16979   ins_pipe(vlogical64);
16980 %}
16981 
16982 instruct vor16B(vecX dst, vecX src1, vecX src2)
16983 %{
16984   predicate(n->as_Vector()->length_in_bytes() == 16);
16985   match(Set dst (OrV src1 src2));
16986   ins_cost(INSN_COST);
16987   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16988   ins_encode %{
16989     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16990             as_FloatRegister($src1$$reg),
16991             as_FloatRegister($src2$$reg));
16992   %}
16993   ins_pipe(vlogical128);
16994 %}
16995 
16996 // --------------------------------- XOR --------------------------------------
16997 
16998 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16999 %{
17000   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17001             n->as_Vector()->length_in_bytes() == 8);
17002   match(Set dst (XorV src1 src2));
17003   ins_cost(INSN_COST);
17004   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17005   ins_encode %{
17006     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17007             as_FloatRegister($src1$$reg),
17008             as_FloatRegister($src2$$reg));
17009   %}
17010   ins_pipe(vlogical64);
17011 %}
17012 
17013 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17014 %{
17015   predicate(n->as_Vector()->length_in_bytes() == 16);
17016   match(Set dst (XorV src1 src2));
17017   ins_cost(INSN_COST);
17018   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17019   ins_encode %{
17020     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17021             as_FloatRegister($src1$$reg),
17022             as_FloatRegister($src2$$reg));
17023   %}
17024   ins_pipe(vlogical128);
17025 %}
17026 
17027 // ------------------------------ Shift ---------------------------------------
17028 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17029   predicate(n->as_Vector()->length_in_bytes() == 8);
17030   match(Set dst (LShiftCntV cnt));
17031   match(Set dst (RShiftCntV cnt));
17032   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17033   ins_encode %{
17034     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17035   %}
17036   ins_pipe(vdup_reg_reg64);
17037 %}
17038 
17039 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17040   predicate(n->as_Vector()->length_in_bytes() == 16);
17041   match(Set dst (LShiftCntV cnt));
17042   match(Set dst (RShiftCntV cnt));
17043   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17044   ins_encode %{
17045     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17046   %}
17047   ins_pipe(vdup_reg_reg128);
17048 %}
17049 
17050 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17051   predicate(n->as_Vector()->length() == 4 ||
17052             n->as_Vector()->length() == 8);
17053   match(Set dst (LShiftVB src shift));
17054   ins_cost(INSN_COST);
17055   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17056   ins_encode %{
17057     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17058             as_FloatRegister($src$$reg),
17059             as_FloatRegister($shift$$reg));
17060   %}
17061   ins_pipe(vshift64);
17062 %}
17063 
17064 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17065   predicate(n->as_Vector()->length() == 16);
17066   match(Set dst (LShiftVB src shift));
17067   ins_cost(INSN_COST);
17068   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17069   ins_encode %{
17070     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17071             as_FloatRegister($src$$reg),
17072             as_FloatRegister($shift$$reg));
17073   %}
17074   ins_pipe(vshift128);
17075 %}
17076 
17077 // Right shifts with vector shift count on aarch64 SIMD are implemented
17078 // as left shift by negative shift count.
17079 // There are two cases for vector shift count.
17080 //
17081 // Case 1: The vector shift count is from replication.
17082 //        |            |
17083 //    LoadVector  RShiftCntV
17084 //        |       /
17085 //     RShiftVI
17086 // Note: In inner loop, multiple neg instructions are used, which can be
17087 // moved to outer loop and merge into one neg instruction.
17088 //
17089 // Case 2: The vector shift count is from loading.
17090 // This case isn't supported by middle-end now. But it's supported by
17091 // panama/vectorIntrinsics(JEP 338: Vector API).
17092 //        |            |
17093 //    LoadVector  LoadVector
17094 //        |       /
17095 //     RShiftVI
17096 //
17097 
17098 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17099   predicate(n->as_Vector()->length() == 4 ||
17100             n->as_Vector()->length() == 8);
17101   match(Set dst (RShiftVB src shift));
17102   ins_cost(INSN_COST);
17103   effect(TEMP tmp);
17104   format %{ "negr  $tmp,$shift\t"
17105             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17106   ins_encode %{
17107     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17108             as_FloatRegister($shift$$reg));
17109     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17110             as_FloatRegister($src$$reg),
17111             as_FloatRegister($tmp$$reg));
17112   %}
17113   ins_pipe(vshift64);
17114 %}
17115 
17116 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17117   predicate(n->as_Vector()->length() == 16);
17118   match(Set dst (RShiftVB src shift));
17119   ins_cost(INSN_COST);
17120   effect(TEMP tmp);
17121   format %{ "negr  $tmp,$shift\t"
17122             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17123   ins_encode %{
17124     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17125             as_FloatRegister($shift$$reg));
17126     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17127             as_FloatRegister($src$$reg),
17128             as_FloatRegister($tmp$$reg));
17129   %}
17130   ins_pipe(vshift128);
17131 %}
17132 
17133 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17134   predicate(n->as_Vector()->length() == 4 ||
17135             n->as_Vector()->length() == 8);
17136   match(Set dst (URShiftVB src shift));
17137   ins_cost(INSN_COST);
17138   effect(TEMP tmp);
17139   format %{ "negr  $tmp,$shift\t"
17140             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17141   ins_encode %{
17142     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17143             as_FloatRegister($shift$$reg));
17144     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17145             as_FloatRegister($src$$reg),
17146             as_FloatRegister($tmp$$reg));
17147   %}
17148   ins_pipe(vshift64);
17149 %}
17150 
17151 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17152   predicate(n->as_Vector()->length() == 16);
17153   match(Set dst (URShiftVB src shift));
17154   ins_cost(INSN_COST);
17155   effect(TEMP tmp);
17156   format %{ "negr  $tmp,$shift\t"
17157             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17158   ins_encode %{
17159     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17160             as_FloatRegister($shift$$reg));
17161     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17162             as_FloatRegister($src$$reg),
17163             as_FloatRegister($tmp$$reg));
17164   %}
17165   ins_pipe(vshift128);
17166 %}
17167 
17168 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17169   predicate(n->as_Vector()->length() == 4 ||
17170             n->as_Vector()->length() == 8);
17171   match(Set dst (LShiftVB src shift));
17172   ins_cost(INSN_COST);
17173   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17174   ins_encode %{
17175     int sh = (int)$shift$$constant;
17176     if (sh >= 8) {
17177       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17178              as_FloatRegister($src$$reg),
17179              as_FloatRegister($src$$reg));
17180     } else {
17181       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17182              as_FloatRegister($src$$reg), sh);
17183     }
17184   %}
17185   ins_pipe(vshift64_imm);
17186 %}
17187 
17188 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17189   predicate(n->as_Vector()->length() == 16);
17190   match(Set dst (LShiftVB src shift));
17191   ins_cost(INSN_COST);
17192   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17193   ins_encode %{
17194     int sh = (int)$shift$$constant;
17195     if (sh >= 8) {
17196       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17197              as_FloatRegister($src$$reg),
17198              as_FloatRegister($src$$reg));
17199     } else {
17200       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17201              as_FloatRegister($src$$reg), sh);
17202     }
17203   %}
17204   ins_pipe(vshift128_imm);
17205 %}
17206 
17207 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17208   predicate(n->as_Vector()->length() == 4 ||
17209             n->as_Vector()->length() == 8);
17210   match(Set dst (RShiftVB src shift));
17211   ins_cost(INSN_COST);
17212   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17213   ins_encode %{
17214     int sh = (int)$shift$$constant;
17215     if (sh >= 8) sh = 7;
17216     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17217            as_FloatRegister($src$$reg), sh);
17218   %}
17219   ins_pipe(vshift64_imm);
17220 %}
17221 
17222 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17223   predicate(n->as_Vector()->length() == 16);
17224   match(Set dst (RShiftVB src shift));
17225   ins_cost(INSN_COST);
17226   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17227   ins_encode %{
17228     int sh = (int)$shift$$constant;
17229     if (sh >= 8) sh = 7;
17230     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17231            as_FloatRegister($src$$reg), sh);
17232   %}
17233   ins_pipe(vshift128_imm);
17234 %}
17235 
17236 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17237   predicate(n->as_Vector()->length() == 4 ||
17238             n->as_Vector()->length() == 8);
17239   match(Set dst (URShiftVB src shift));
17240   ins_cost(INSN_COST);
17241   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17242   ins_encode %{
17243     int sh = (int)$shift$$constant;
17244     if (sh >= 8) {
17245       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17246              as_FloatRegister($src$$reg),
17247              as_FloatRegister($src$$reg));
17248     } else {
17249       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17250              as_FloatRegister($src$$reg), sh);
17251     }
17252   %}
17253   ins_pipe(vshift64_imm);
17254 %}
17255 
17256 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17257   predicate(n->as_Vector()->length() == 16);
17258   match(Set dst (URShiftVB src shift));
17259   ins_cost(INSN_COST);
17260   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17261   ins_encode %{
17262     int sh = (int)$shift$$constant;
17263     if (sh >= 8) {
17264       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17265              as_FloatRegister($src$$reg),
17266              as_FloatRegister($src$$reg));
17267     } else {
17268       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17269              as_FloatRegister($src$$reg), sh);
17270     }
17271   %}
17272   ins_pipe(vshift128_imm);
17273 %}
17274 
17275 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17276   predicate(n->as_Vector()->length() == 2 ||
17277             n->as_Vector()->length() == 4);
17278   match(Set dst (LShiftVS src shift));
17279   ins_cost(INSN_COST);
17280   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17281   ins_encode %{
17282     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17283             as_FloatRegister($src$$reg),
17284             as_FloatRegister($shift$$reg));
17285   %}
17286   ins_pipe(vshift64);
17287 %}
17288 
17289 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17290   predicate(n->as_Vector()->length() == 8);
17291   match(Set dst (LShiftVS src shift));
17292   ins_cost(INSN_COST);
17293   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17294   ins_encode %{
17295     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17296             as_FloatRegister($src$$reg),
17297             as_FloatRegister($shift$$reg));
17298   %}
17299   ins_pipe(vshift128);
17300 %}
17301 
17302 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17303   predicate(n->as_Vector()->length() == 2 ||
17304             n->as_Vector()->length() == 4);
17305   match(Set dst (RShiftVS src shift));
17306   ins_cost(INSN_COST);
17307   effect(TEMP tmp);
17308   format %{ "negr  $tmp,$shift\t"
17309             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17310   ins_encode %{
17311     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17312             as_FloatRegister($shift$$reg));
17313     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17314             as_FloatRegister($src$$reg),
17315             as_FloatRegister($tmp$$reg));
17316   %}
17317   ins_pipe(vshift64);
17318 %}
17319 
17320 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17321   predicate(n->as_Vector()->length() == 8);
17322   match(Set dst (RShiftVS src shift));
17323   ins_cost(INSN_COST);
17324   effect(TEMP tmp);
17325   format %{ "negr  $tmp,$shift\t"
17326             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17327   ins_encode %{
17328     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17329             as_FloatRegister($shift$$reg));
17330     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17331             as_FloatRegister($src$$reg),
17332             as_FloatRegister($tmp$$reg));
17333   %}
17334   ins_pipe(vshift128);
17335 %}
17336 
17337 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17338   predicate(n->as_Vector()->length() == 2 ||
17339             n->as_Vector()->length() == 4);
17340   match(Set dst (URShiftVS src shift));
17341   ins_cost(INSN_COST);
17342   effect(TEMP tmp);
17343   format %{ "negr  $tmp,$shift\t"
17344             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17345   ins_encode %{
17346     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17347             as_FloatRegister($shift$$reg));
17348     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17349             as_FloatRegister($src$$reg),
17350             as_FloatRegister($tmp$$reg));
17351   %}
17352   ins_pipe(vshift64);
17353 %}
17354 
17355 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17356   predicate(n->as_Vector()->length() == 8);
17357   match(Set dst (URShiftVS src shift));
17358   ins_cost(INSN_COST);
17359   effect(TEMP tmp);
17360   format %{ "negr  $tmp,$shift\t"
17361             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17362   ins_encode %{
17363     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17364             as_FloatRegister($shift$$reg));
17365     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17366             as_FloatRegister($src$$reg),
17367             as_FloatRegister($tmp$$reg));
17368   %}
17369   ins_pipe(vshift128);
17370 %}
17371 
17372 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17373   predicate(n->as_Vector()->length() == 2 ||
17374             n->as_Vector()->length() == 4);
17375   match(Set dst (LShiftVS src shift));
17376   ins_cost(INSN_COST);
17377   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17378   ins_encode %{
17379     int sh = (int)$shift$$constant;
17380     if (sh >= 16) {
17381       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17382              as_FloatRegister($src$$reg),
17383              as_FloatRegister($src$$reg));
17384     } else {
17385       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17386              as_FloatRegister($src$$reg), sh);
17387     }
17388   %}
17389   ins_pipe(vshift64_imm);
17390 %}
17391 
17392 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17393   predicate(n->as_Vector()->length() == 8);
17394   match(Set dst (LShiftVS src shift));
17395   ins_cost(INSN_COST);
17396   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17397   ins_encode %{
17398     int sh = (int)$shift$$constant;
17399     if (sh >= 16) {
17400       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17401              as_FloatRegister($src$$reg),
17402              as_FloatRegister($src$$reg));
17403     } else {
17404       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17405              as_FloatRegister($src$$reg), sh);
17406     }
17407   %}
17408   ins_pipe(vshift128_imm);
17409 %}
17410 
17411 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17412   predicate(n->as_Vector()->length() == 2 ||
17413             n->as_Vector()->length() == 4);
17414   match(Set dst (RShiftVS src shift));
17415   ins_cost(INSN_COST);
17416   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17417   ins_encode %{
17418     int sh = (int)$shift$$constant;
17419     if (sh >= 16) sh = 15;
17420     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17421            as_FloatRegister($src$$reg), sh);
17422   %}
17423   ins_pipe(vshift64_imm);
17424 %}
17425 
17426 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17427   predicate(n->as_Vector()->length() == 8);
17428   match(Set dst (RShiftVS src shift));
17429   ins_cost(INSN_COST);
17430   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17431   ins_encode %{
17432     int sh = (int)$shift$$constant;
17433     if (sh >= 16) sh = 15;
17434     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17435            as_FloatRegister($src$$reg), sh);
17436   %}
17437   ins_pipe(vshift128_imm);
17438 %}
17439 
17440 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17441   predicate(n->as_Vector()->length() == 2 ||
17442             n->as_Vector()->length() == 4);
17443   match(Set dst (URShiftVS src shift));
17444   ins_cost(INSN_COST);
17445   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17446   ins_encode %{
17447     int sh = (int)$shift$$constant;
17448     if (sh >= 16) {
17449       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17450              as_FloatRegister($src$$reg),
17451              as_FloatRegister($src$$reg));
17452     } else {
17453       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17454              as_FloatRegister($src$$reg), sh);
17455     }
17456   %}
17457   ins_pipe(vshift64_imm);
17458 %}
17459 
17460 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17461   predicate(n->as_Vector()->length() == 8);
17462   match(Set dst (URShiftVS src shift));
17463   ins_cost(INSN_COST);
17464   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17465   ins_encode %{
17466     int sh = (int)$shift$$constant;
17467     if (sh >= 16) {
17468       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17469              as_FloatRegister($src$$reg),
17470              as_FloatRegister($src$$reg));
17471     } else {
17472       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17473              as_FloatRegister($src$$reg), sh);
17474     }
17475   %}
17476   ins_pipe(vshift128_imm);
17477 %}
17478 
17479 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17480   predicate(n->as_Vector()->length() == 2);
17481   match(Set dst (LShiftVI src shift));
17482   ins_cost(INSN_COST);
17483   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17484   ins_encode %{
17485     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17486             as_FloatRegister($src$$reg),
17487             as_FloatRegister($shift$$reg));
17488   %}
17489   ins_pipe(vshift64);
17490 %}
17491 
17492 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17493   predicate(n->as_Vector()->length() == 4);
17494   match(Set dst (LShiftVI src shift));
17495   ins_cost(INSN_COST);
17496   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17497   ins_encode %{
17498     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17499             as_FloatRegister($src$$reg),
17500             as_FloatRegister($shift$$reg));
17501   %}
17502   ins_pipe(vshift128);
17503 %}
17504 
17505 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17506   predicate(n->as_Vector()->length() == 2);
17507   match(Set dst (RShiftVI src shift));
17508   ins_cost(INSN_COST);
17509   effect(TEMP tmp);
17510   format %{ "negr  $tmp,$shift\t"
17511             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17512   ins_encode %{
17513     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17514             as_FloatRegister($shift$$reg));
17515     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17516             as_FloatRegister($src$$reg),
17517             as_FloatRegister($tmp$$reg));
17518   %}
17519   ins_pipe(vshift64);
17520 %}
17521 
17522 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17523   predicate(n->as_Vector()->length() == 4);
17524   match(Set dst (RShiftVI src shift));
17525   ins_cost(INSN_COST);
17526   effect(TEMP tmp);
17527   format %{ "negr  $tmp,$shift\t"
17528             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17529   ins_encode %{
17530     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17531             as_FloatRegister($shift$$reg));
17532     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17533             as_FloatRegister($src$$reg),
17534             as_FloatRegister($tmp$$reg));
17535   %}
17536   ins_pipe(vshift128);
17537 %}
17538 
17539 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17540   predicate(n->as_Vector()->length() == 2);
17541   match(Set dst (URShiftVI src shift));
17542   ins_cost(INSN_COST);
17543   effect(TEMP tmp);
17544   format %{ "negr  $tmp,$shift\t"
17545             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17546   ins_encode %{
17547     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17548             as_FloatRegister($shift$$reg));
17549     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17550             as_FloatRegister($src$$reg),
17551             as_FloatRegister($tmp$$reg));
17552   %}
17553   ins_pipe(vshift64);
17554 %}
17555 
17556 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17557   predicate(n->as_Vector()->length() == 4);
17558   match(Set dst (URShiftVI src shift));
17559   ins_cost(INSN_COST);
17560   effect(TEMP tmp);
17561   format %{ "negr  $tmp,$shift\t"
17562             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17563   ins_encode %{
17564     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17565             as_FloatRegister($shift$$reg));
17566     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17567             as_FloatRegister($src$$reg),
17568             as_FloatRegister($tmp$$reg));
17569   %}
17570   ins_pipe(vshift128);
17571 %}
17572 
17573 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17574   predicate(n->as_Vector()->length() == 2);
17575   match(Set dst (LShiftVI src shift));
17576   ins_cost(INSN_COST);
17577   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17578   ins_encode %{
17579     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17580            as_FloatRegister($src$$reg),
17581            (int)$shift$$constant);
17582   %}
17583   ins_pipe(vshift64_imm);
17584 %}
17585 
17586 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17587   predicate(n->as_Vector()->length() == 4);
17588   match(Set dst (LShiftVI src shift));
17589   ins_cost(INSN_COST);
17590   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17591   ins_encode %{
17592     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17593            as_FloatRegister($src$$reg),
17594            (int)$shift$$constant);
17595   %}
17596   ins_pipe(vshift128_imm);
17597 %}
17598 
17599 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17600   predicate(n->as_Vector()->length() == 2);
17601   match(Set dst (RShiftVI src shift));
17602   ins_cost(INSN_COST);
17603   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17604   ins_encode %{
17605     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17606             as_FloatRegister($src$$reg),
17607             (int)$shift$$constant);
17608   %}
17609   ins_pipe(vshift64_imm);
17610 %}
17611 
17612 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17613   predicate(n->as_Vector()->length() == 4);
17614   match(Set dst (RShiftVI src shift));
17615   ins_cost(INSN_COST);
17616   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17617   ins_encode %{
17618     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17619             as_FloatRegister($src$$reg),
17620             (int)$shift$$constant);
17621   %}
17622   ins_pipe(vshift128_imm);
17623 %}
17624 
17625 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17626   predicate(n->as_Vector()->length() == 2);
17627   match(Set dst (URShiftVI src shift));
17628   ins_cost(INSN_COST);
17629   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17630   ins_encode %{
17631     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17632             as_FloatRegister($src$$reg),
17633             (int)$shift$$constant);
17634   %}
17635   ins_pipe(vshift64_imm);
17636 %}
17637 
17638 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17639   predicate(n->as_Vector()->length() == 4);
17640   match(Set dst (URShiftVI src shift));
17641   ins_cost(INSN_COST);
17642   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17643   ins_encode %{
17644     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17645             as_FloatRegister($src$$reg),
17646             (int)$shift$$constant);
17647   %}
17648   ins_pipe(vshift128_imm);
17649 %}
17650 
17651 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17652   predicate(n->as_Vector()->length() == 2);
17653   match(Set dst (LShiftVL src shift));
17654   ins_cost(INSN_COST);
17655   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17656   ins_encode %{
17657     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17658             as_FloatRegister($src$$reg),
17659             as_FloatRegister($shift$$reg));
17660   %}
17661   ins_pipe(vshift128);
17662 %}
17663 
17664 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17665   predicate(n->as_Vector()->length() == 2);
17666   match(Set dst (RShiftVL src shift));
17667   ins_cost(INSN_COST);
17668   effect(TEMP tmp);
17669   format %{ "negr  $tmp,$shift\t"
17670             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17671   ins_encode %{
17672     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17673             as_FloatRegister($shift$$reg));
17674     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17675             as_FloatRegister($src$$reg),
17676             as_FloatRegister($tmp$$reg));
17677   %}
17678   ins_pipe(vshift128);
17679 %}
17680 
17681 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17682   predicate(n->as_Vector()->length() == 2);
17683   match(Set dst (URShiftVL src shift));
17684   ins_cost(INSN_COST);
17685   effect(TEMP tmp);
17686   format %{ "negr  $tmp,$shift\t"
17687             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17688   ins_encode %{
17689     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17690             as_FloatRegister($shift$$reg));
17691     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17692             as_FloatRegister($src$$reg),
17693             as_FloatRegister($tmp$$reg));
17694   %}
17695   ins_pipe(vshift128);
17696 %}
17697 
17698 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17699   predicate(n->as_Vector()->length() == 2);
17700   match(Set dst (LShiftVL src shift));
17701   ins_cost(INSN_COST);
17702   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17703   ins_encode %{
17704     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17705            as_FloatRegister($src$$reg),
17706            (int)$shift$$constant);
17707   %}
17708   ins_pipe(vshift128_imm);
17709 %}
17710 
17711 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17712   predicate(n->as_Vector()->length() == 2);
17713   match(Set dst (RShiftVL src shift));
17714   ins_cost(INSN_COST);
17715   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17716   ins_encode %{
17717     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17718             as_FloatRegister($src$$reg),
17719             (int)$shift$$constant);
17720   %}
17721   ins_pipe(vshift128_imm);
17722 %}
17723 
17724 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17725   predicate(n->as_Vector()->length() == 2);
17726   match(Set dst (URShiftVL src shift));
17727   ins_cost(INSN_COST);
17728   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17729   ins_encode %{
17730     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17731             as_FloatRegister($src$$reg),
17732             (int)$shift$$constant);
17733   %}
17734   ins_pipe(vshift128_imm);
17735 %}
17736 
17737 instruct vmax2F(vecD dst, vecD src1, vecD src2)
17738 %{
17739   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17740   match(Set dst (MaxV src1 src2));
17741   ins_cost(INSN_COST);
17742   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
17743   ins_encode %{
17744     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
17745             as_FloatRegister($src1$$reg),
17746             as_FloatRegister($src2$$reg));
17747   %}
17748   ins_pipe(vdop_fp64);
17749 %}
17750 
17751 instruct vmax4F(vecX dst, vecX src1, vecX src2)
17752 %{
17753   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17754   match(Set dst (MaxV src1 src2));
17755   ins_cost(INSN_COST);
17756   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
17757   ins_encode %{
17758     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
17759             as_FloatRegister($src1$$reg),
17760             as_FloatRegister($src2$$reg));
17761   %}
17762   ins_pipe(vdop_fp128);
17763 %}
17764 
17765 instruct vmax2D(vecX dst, vecX src1, vecX src2)
17766 %{
17767   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17768   match(Set dst (MaxV src1 src2));
17769   ins_cost(INSN_COST);
17770   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
17771   ins_encode %{
17772     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
17773             as_FloatRegister($src1$$reg),
17774             as_FloatRegister($src2$$reg));
17775   %}
17776   ins_pipe(vdop_fp128);
17777 %}
17778 
17779 instruct vmin2F(vecD dst, vecD src1, vecD src2)
17780 %{
17781   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17782   match(Set dst (MinV src1 src2));
17783   ins_cost(INSN_COST);
17784   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
17785   ins_encode %{
17786     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
17787             as_FloatRegister($src1$$reg),
17788             as_FloatRegister($src2$$reg));
17789   %}
17790   ins_pipe(vdop_fp64);
17791 %}
17792 
17793 instruct vmin4F(vecX dst, vecX src1, vecX src2)
17794 %{
17795   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17796   match(Set dst (MinV src1 src2));
17797   ins_cost(INSN_COST);
17798   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
17799   ins_encode %{
17800     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
17801             as_FloatRegister($src1$$reg),
17802             as_FloatRegister($src2$$reg));
17803   %}
17804   ins_pipe(vdop_fp128);
17805 %}
17806 
17807 instruct vmin2D(vecX dst, vecX src1, vecX src2)
17808 %{
17809   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17810   match(Set dst (MinV src1 src2));
17811   ins_cost(INSN_COST);
17812   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
17813   ins_encode %{
17814     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
17815             as_FloatRegister($src1$$reg),
17816             as_FloatRegister($src2$$reg));
17817   %}
17818   ins_pipe(vdop_fp128);
17819 %}
17820 
17821 //----------PEEPHOLE RULES-----------------------------------------------------
17822 // These must follow all instruction definitions as they use the names
17823 // defined in the instructions definitions.
17824 //
17825 // peepmatch ( root_instr_name [preceding_instruction]* );
17826 //
17827 // peepconstraint %{
17828 // (instruction_number.operand_name relational_op instruction_number.operand_name
17829 //  [, ...] );
17830 // // instruction numbers are zero-based using left to right order in peepmatch
17831 //
17832 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17833 // // provide an instruction_number.operand_name for each operand that appears
17834 // // in the replacement instruction's match rule
17835 //
17836 // ---------VM FLAGS---------------------------------------------------------
17837 //
17838 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17839 //
17840 // Each peephole rule is given an identifying number starting with zero and
17841 // increasing by one in the order seen by the parser.  An individual peephole
17842 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17843 // on the command-line.
17844 //
17845 // ---------CURRENT LIMITATIONS----------------------------------------------
17846 //
17847 // Only match adjacent instructions in same basic block
17848 // Only equality constraints
17849 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17850 // Only one replacement instruction
17851 //
17852 // ---------EXAMPLE----------------------------------------------------------
17853 //
17854 // // pertinent parts of existing instructions in architecture description
17855 // instruct movI(iRegINoSp dst, iRegI src)
17856 // %{
17857 //   match(Set dst (CopyI src));
17858 // %}
17859 //
17860 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17861 // %{
17862 //   match(Set dst (AddI dst src));
17863 //   effect(KILL cr);
17864 // %}
17865 //
17866 // // Change (inc mov) to lea
17867 // peephole %{
17868 //   // increment preceeded by register-register move
17869 //   peepmatch ( incI_iReg movI );
17870 //   // require that the destination register of the increment
17871 //   // match the destination register of the move
17872 //   peepconstraint ( 0.dst == 1.dst );
17873 //   // construct a replacement instruction that sets
17874 //   // the destination to ( move's source register + one )
17875 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17876 // %}
17877 //
17878 
17879 // Implementation no longer uses movX instructions since
17880 // machine-independent system no longer uses CopyX nodes.
17881 //
17882 // peephole
17883 // %{
17884 //   peepmatch (incI_iReg movI);
17885 //   peepconstraint (0.dst == 1.dst);
17886 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17887 // %}
17888 
17889 // peephole
17890 // %{
17891 //   peepmatch (decI_iReg movI);
17892 //   peepconstraint (0.dst == 1.dst);
17893 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17894 // %}
17895 
17896 // peephole
17897 // %{
17898 //   peepmatch (addI_iReg_imm movI);
17899 //   peepconstraint (0.dst == 1.dst);
17900 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17901 // %}
17902 
17903 // peephole
17904 // %{
17905 //   peepmatch (incL_iReg movL);
17906 //   peepconstraint (0.dst == 1.dst);
17907 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17908 // %}
17909 
17910 // peephole
17911 // %{
17912 //   peepmatch (decL_iReg movL);
17913 //   peepconstraint (0.dst == 1.dst);
17914 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17915 // %}
17916 
17917 // peephole
17918 // %{
17919 //   peepmatch (addL_iReg_imm movL);
17920 //   peepconstraint (0.dst == 1.dst);
17921 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17922 // %}
17923 
17924 // peephole
17925 // %{
17926 //   peepmatch (addP_iReg_imm movP);
17927 //   peepconstraint (0.dst == 1.dst);
17928 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17929 // %}
17930 
17931 // // Change load of spilled value to only a spill
17932 // instruct storeI(memory mem, iRegI src)
17933 // %{
17934 //   match(Set mem (StoreI mem src));
17935 // %}
17936 //
17937 // instruct loadI(iRegINoSp dst, memory mem)
17938 // %{
17939 //   match(Set dst (LoadI mem));
17940 // %}
17941 //
17942 
17943 //----------SMARTSPILL RULES---------------------------------------------------
17944 // These must follow all instruction definitions as they use the names
17945 // defined in the instructions definitions.
17946 
17947 // Local Variables:
17948 // mode: c++
17949 // End: