1 //
   2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit general purpose registers
 439 reg_class all_reg32(
 440     R0,
 441     R1,
 442     R2,
 443     R3,
 444     R4,
 445     R5,
 446     R6,
 447     R7,
 448     R10,
 449     R11,
 450     R12,
 451     R13,
 452     R14,
 453     R15,
 454     R16,
 455     R17,
 456     R18,
 457     R19,
 458     R20,
 459     R21,
 460     R22,
 461     R23,
 462     R24,
 463     R25,
 464     R26,
 465     R27,
 466     R28,
 467     R29,
 468     R30,
 469     R31
 470 );
 471 
 472 
 473 // Class for all 32 bit integer registers (excluding SP which
 474 // will never be used as an integer register)
 475 reg_class any_reg32 %{
 476   return _ANY_REG32_mask;
 477 %}
 478 
 479 // Singleton class for R0 int register
 480 reg_class int_r0_reg(R0);
 481 
 482 // Singleton class for R2 int register
 483 reg_class int_r2_reg(R2);
 484 
 485 // Singleton class for R3 int register
 486 reg_class int_r3_reg(R3);
 487 
 488 // Singleton class for R4 int register
 489 reg_class int_r4_reg(R4);
 490 
 491 // Singleton class for R31 int register
 492 reg_class int_r31_reg(R31);
 493 
 494 // Class for all 64 bit general purpose registers
 495 reg_class all_reg(
 496     R0, R0_H,
 497     R1, R1_H,
 498     R2, R2_H,
 499     R3, R3_H,
 500     R4, R4_H,
 501     R5, R5_H,
 502     R6, R6_H,
 503     R7, R7_H,
 504     R10, R10_H,
 505     R11, R11_H,
 506     R12, R12_H,
 507     R13, R13_H,
 508     R14, R14_H,
 509     R15, R15_H,
 510     R16, R16_H,
 511     R17, R17_H,
 512     R18, R18_H,
 513     R19, R19_H,
 514     R20, R20_H,
 515     R21, R21_H,
 516     R22, R22_H,
 517     R23, R23_H,
 518     R24, R24_H,
 519     R25, R25_H,
 520     R26, R26_H,
 521     R27, R27_H,
 522     R28, R28_H,
 523     R29, R29_H,
 524     R30, R30_H,
 525     R31, R31_H
 526 );
 527 
 528 // Class for all long integer registers (including SP)
 529 reg_class any_reg %{
 530   return _ANY_REG_mask;
 531 %}
 532 
 533 // Class for non-allocatable 32 bit registers
 534 reg_class non_allocatable_reg32(
 535     R28,                        // thread
 536     R30,                        // lr
 537     R31                         // sp
 538 );
 539 
 540 // Class for non-allocatable 64 bit registers
 541 reg_class non_allocatable_reg(
 542     R28, R28_H,                 // thread
 543     R30, R30_H,                 // lr
 544     R31, R31_H                  // sp
 545 );
 546 
 547 // Class for all non-special integer registers
 548 reg_class no_special_reg32 %{
 549   return _NO_SPECIAL_REG32_mask;
 550 %}
 551 
 552 // Class for all non-special long integer registers
 553 reg_class no_special_reg %{
 554   return _NO_SPECIAL_REG_mask;
 555 %}
 556 
 557 // Class for 64 bit register r0
 558 reg_class r0_reg(
 559     R0, R0_H
 560 );
 561 
 562 // Class for 64 bit register r1
 563 reg_class r1_reg(
 564     R1, R1_H
 565 );
 566 
 567 // Class for 64 bit register r2
 568 reg_class r2_reg(
 569     R2, R2_H
 570 );
 571 
 572 // Class for 64 bit register r3
 573 reg_class r3_reg(
 574     R3, R3_H
 575 );
 576 
 577 // Class for 64 bit register r4
 578 reg_class r4_reg(
 579     R4, R4_H
 580 );
 581 
 582 // Class for 64 bit register r5
 583 reg_class r5_reg(
 584     R5, R5_H
 585 );
 586 
 587 // Class for 64 bit register r10
 588 reg_class r10_reg(
 589     R10, R10_H
 590 );
 591 
 592 // Class for 64 bit register r11
 593 reg_class r11_reg(
 594     R11, R11_H
 595 );
 596 
 597 // Class for method register
 598 reg_class method_reg(
 599     R12, R12_H
 600 );
 601 
 602 // Class for heapbase register
 603 reg_class heapbase_reg(
 604     R27, R27_H
 605 );
 606 
 607 // Class for thread register
 608 reg_class thread_reg(
 609     R28, R28_H
 610 );
 611 
 612 // Class for frame pointer register
 613 reg_class fp_reg(
 614     R29, R29_H
 615 );
 616 
 617 // Class for link register
 618 reg_class lr_reg(
 619     R30, R30_H
 620 );
 621 
 622 // Class for long sp register
 623 reg_class sp_reg(
 624   R31, R31_H
 625 );
 626 
 627 // Class for all pointer registers
 628 reg_class ptr_reg %{
 629   return _PTR_REG_mask;
 630 %}
 631 
 632 // Class for all non_special pointer registers
 633 reg_class no_special_ptr_reg %{
 634   return _NO_SPECIAL_PTR_REG_mask;
 635 %}
 636 
 637 // Class for all float registers
 638 reg_class float_reg(
 639     V0,
 640     V1,
 641     V2,
 642     V3,
 643     V4,
 644     V5,
 645     V6,
 646     V7,
 647     V8,
 648     V9,
 649     V10,
 650     V11,
 651     V12,
 652     V13,
 653     V14,
 654     V15,
 655     V16,
 656     V17,
 657     V18,
 658     V19,
 659     V20,
 660     V21,
 661     V22,
 662     V23,
 663     V24,
 664     V25,
 665     V26,
 666     V27,
 667     V28,
 668     V29,
 669     V30,
 670     V31
 671 );
 672 
 673 // Double precision float registers have virtual `high halves' that
 674 // are needed by the allocator.
 675 // Class for all double registers
 676 reg_class double_reg(
 677     V0, V0_H,
 678     V1, V1_H,
 679     V2, V2_H,
 680     V3, V3_H,
 681     V4, V4_H,
 682     V5, V5_H,
 683     V6, V6_H,
 684     V7, V7_H,
 685     V8, V8_H,
 686     V9, V9_H,
 687     V10, V10_H,
 688     V11, V11_H,
 689     V12, V12_H,
 690     V13, V13_H,
 691     V14, V14_H,
 692     V15, V15_H,
 693     V16, V16_H,
 694     V17, V17_H,
 695     V18, V18_H,
 696     V19, V19_H,
 697     V20, V20_H,
 698     V21, V21_H,
 699     V22, V22_H,
 700     V23, V23_H,
 701     V24, V24_H,
 702     V25, V25_H,
 703     V26, V26_H,
 704     V27, V27_H,
 705     V28, V28_H,
 706     V29, V29_H,
 707     V30, V30_H,
 708     V31, V31_H
 709 );
 710 
 711 // Class for all 64bit vector registers
 712 reg_class vectord_reg(
 713     V0, V0_H,
 714     V1, V1_H,
 715     V2, V2_H,
 716     V3, V3_H,
 717     V4, V4_H,
 718     V5, V5_H,
 719     V6, V6_H,
 720     V7, V7_H,
 721     V8, V8_H,
 722     V9, V9_H,
 723     V10, V10_H,
 724     V11, V11_H,
 725     V12, V12_H,
 726     V13, V13_H,
 727     V14, V14_H,
 728     V15, V15_H,
 729     V16, V16_H,
 730     V17, V17_H,
 731     V18, V18_H,
 732     V19, V19_H,
 733     V20, V20_H,
 734     V21, V21_H,
 735     V22, V22_H,
 736     V23, V23_H,
 737     V24, V24_H,
 738     V25, V25_H,
 739     V26, V26_H,
 740     V27, V27_H,
 741     V28, V28_H,
 742     V29, V29_H,
 743     V30, V30_H,
 744     V31, V31_H
 745 );
 746 
 747 // Class for all 128bit vector registers
 748 reg_class vectorx_reg(
 749     V0, V0_H, V0_J, V0_K,
 750     V1, V1_H, V1_J, V1_K,
 751     V2, V2_H, V2_J, V2_K,
 752     V3, V3_H, V3_J, V3_K,
 753     V4, V4_H, V4_J, V4_K,
 754     V5, V5_H, V5_J, V5_K,
 755     V6, V6_H, V6_J, V6_K,
 756     V7, V7_H, V7_J, V7_K,
 757     V8, V8_H, V8_J, V8_K,
 758     V9, V9_H, V9_J, V9_K,
 759     V10, V10_H, V10_J, V10_K,
 760     V11, V11_H, V11_J, V11_K,
 761     V12, V12_H, V12_J, V12_K,
 762     V13, V13_H, V13_J, V13_K,
 763     V14, V14_H, V14_J, V14_K,
 764     V15, V15_H, V15_J, V15_K,
 765     V16, V16_H, V16_J, V16_K,
 766     V17, V17_H, V17_J, V17_K,
 767     V18, V18_H, V18_J, V18_K,
 768     V19, V19_H, V19_J, V19_K,
 769     V20, V20_H, V20_J, V20_K,
 770     V21, V21_H, V21_J, V21_K,
 771     V22, V22_H, V22_J, V22_K,
 772     V23, V23_H, V23_J, V23_K,
 773     V24, V24_H, V24_J, V24_K,
 774     V25, V25_H, V25_J, V25_K,
 775     V26, V26_H, V26_J, V26_K,
 776     V27, V27_H, V27_J, V27_K,
 777     V28, V28_H, V28_J, V28_K,
 778     V29, V29_H, V29_J, V29_K,
 779     V30, V30_H, V30_J, V30_K,
 780     V31, V31_H, V31_J, V31_K
 781 );
 782 
 783 // Class for 128 bit register v0
 784 reg_class v0_reg(
 785     V0, V0_H
 786 );
 787 
 788 // Class for 128 bit register v1
 789 reg_class v1_reg(
 790     V1, V1_H
 791 );
 792 
 793 // Class for 128 bit register v2
 794 reg_class v2_reg(
 795     V2, V2_H
 796 );
 797 
 798 // Class for 128 bit register v3
 799 reg_class v3_reg(
 800     V3, V3_H
 801 );
 802 
 803 // Class for 128 bit register v4
 804 reg_class v4_reg(
 805     V4, V4_H
 806 );
 807 
 808 // Class for 128 bit register v5
 809 reg_class v5_reg(
 810     V5, V5_H
 811 );
 812 
 813 // Class for 128 bit register v6
 814 reg_class v6_reg(
 815     V6, V6_H
 816 );
 817 
 818 // Class for 128 bit register v7
 819 reg_class v7_reg(
 820     V7, V7_H
 821 );
 822 
 823 // Class for 128 bit register v8
 824 reg_class v8_reg(
 825     V8, V8_H
 826 );
 827 
 828 // Class for 128 bit register v9
 829 reg_class v9_reg(
 830     V9, V9_H
 831 );
 832 
 833 // Class for 128 bit register v10
 834 reg_class v10_reg(
 835     V10, V10_H
 836 );
 837 
 838 // Class for 128 bit register v11
 839 reg_class v11_reg(
 840     V11, V11_H
 841 );
 842 
 843 // Class for 128 bit register v12
 844 reg_class v12_reg(
 845     V12, V12_H
 846 );
 847 
 848 // Class for 128 bit register v13
 849 reg_class v13_reg(
 850     V13, V13_H
 851 );
 852 
 853 // Class for 128 bit register v14
 854 reg_class v14_reg(
 855     V14, V14_H
 856 );
 857 
 858 // Class for 128 bit register v15
 859 reg_class v15_reg(
 860     V15, V15_H
 861 );
 862 
 863 // Class for 128 bit register v16
 864 reg_class v16_reg(
 865     V16, V16_H
 866 );
 867 
 868 // Class for 128 bit register v17
 869 reg_class v17_reg(
 870     V17, V17_H
 871 );
 872 
 873 // Class for 128 bit register v18
 874 reg_class v18_reg(
 875     V18, V18_H
 876 );
 877 
 878 // Class for 128 bit register v19
 879 reg_class v19_reg(
 880     V19, V19_H
 881 );
 882 
 883 // Class for 128 bit register v20
 884 reg_class v20_reg(
 885     V20, V20_H
 886 );
 887 
 888 // Class for 128 bit register v21
 889 reg_class v21_reg(
 890     V21, V21_H
 891 );
 892 
 893 // Class for 128 bit register v22
 894 reg_class v22_reg(
 895     V22, V22_H
 896 );
 897 
 898 // Class for 128 bit register v23
 899 reg_class v23_reg(
 900     V23, V23_H
 901 );
 902 
 903 // Class for 128 bit register v24
 904 reg_class v24_reg(
 905     V24, V24_H
 906 );
 907 
 908 // Class for 128 bit register v25
 909 reg_class v25_reg(
 910     V25, V25_H
 911 );
 912 
 913 // Class for 128 bit register v26
 914 reg_class v26_reg(
 915     V26, V26_H
 916 );
 917 
 918 // Class for 128 bit register v27
 919 reg_class v27_reg(
 920     V27, V27_H
 921 );
 922 
 923 // Class for 128 bit register v28
 924 reg_class v28_reg(
 925     V28, V28_H
 926 );
 927 
 928 // Class for 128 bit register v29
 929 reg_class v29_reg(
 930     V29, V29_H
 931 );
 932 
 933 // Class for 128 bit register v30
 934 reg_class v30_reg(
 935     V30, V30_H
 936 );
 937 
 938 // Class for 128 bit register v31
 939 reg_class v31_reg(
 940     V31, V31_H
 941 );
 942 
 943 // Singleton class for condition codes
 944 reg_class int_flags(RFLAGS);
 945 
 946 %}
 947 
 948 //----------DEFINITION BLOCK---------------------------------------------------
 949 // Define name --> value mappings to inform the ADLC of an integer valued name
 950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 951 // Format:
 952 //        int_def  <name>         ( <int_value>, <expression>);
 953 // Generated Code in ad_<arch>.hpp
 954 //        #define  <name>   (<expression>)
 955 //        // value == <int_value>
 956 // Generated code in ad_<arch>.cpp adlc_verification()
 957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 958 //
 959 
 960 // we follow the ppc-aix port in using a simple cost model which ranks
 961 // register operations as cheap, memory ops as more expensive and
 962 // branches as most expensive. the first two have a low as well as a
 963 // normal cost. huge cost appears to be a way of saying don't do
 964 // something
 965 
 966 definitions %{
 967   // The default cost (of a register move instruction).
 968   int_def INSN_COST            (    100,     100);
 969   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 970   int_def CALL_COST            (    200,     2 * INSN_COST);
 971   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 972 %}
 973 
 974 
 975 //----------SOURCE BLOCK-------------------------------------------------------
 976 // This is a block of C++ code which provides values, functions, and
 977 // definitions necessary in the rest of the architecture description
 978 
 979 source_hpp %{
 980 
 981 #include "asm/macroAssembler.hpp"
 982 #include "gc/shared/cardTable.hpp"
 983 #include "gc/shared/cardTableBarrierSet.hpp"
 984 #include "gc/shared/collectedHeap.hpp"
 985 #include "opto/addnode.hpp"
 986 #include "opto/convertnode.hpp"
 987 
 988 extern RegMask _ANY_REG32_mask;
 989 extern RegMask _ANY_REG_mask;
 990 extern RegMask _PTR_REG_mask;
 991 extern RegMask _NO_SPECIAL_REG32_mask;
 992 extern RegMask _NO_SPECIAL_REG_mask;
 993 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 994 
 995 class CallStubImpl {
 996 
 997   //--------------------------------------------------------------
 998   //---<  Used for optimization in Compile::shorten_branches  >---
 999   //--------------------------------------------------------------
1000 
1001  public:
1002   // Size of call trampoline stub.
1003   static uint size_call_trampoline() {
1004     return 0; // no call trampolines on this platform
1005   }
1006 
1007   // number of relocations needed by a call trampoline stub
1008   static uint reloc_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 };
1012 
1013 class HandlerImpl {
1014 
1015  public:
1016 
1017   static int emit_exception_handler(CodeBuffer &cbuf);
1018   static int emit_deopt_handler(CodeBuffer& cbuf);
1019 
1020   static uint size_exception_handler() {
1021     return MacroAssembler::far_branch_size();
1022   }
1023 
1024   static uint size_deopt_handler() {
1025     // count one adr and one far branch instruction
1026     return 4 * NativeInstruction::instruction_size;
1027   }
1028 };
1029 
1030 class Node::PD {
1031 public:
1032   enum NodeFlags {
1033     _last_flag = Node::_last_flag
1034   };
1035 };
1036 
1037  bool is_CAS(int opcode, bool maybe_volatile);
1038 
1039   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1040 
1041   bool unnecessary_acquire(const Node *barrier);
1042   bool needs_acquiring_load(const Node *load);
1043 
1044   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1045 
1046   bool unnecessary_release(const Node *barrier);
1047   bool unnecessary_volatile(const Node *barrier);
1048   bool needs_releasing_store(const Node *store);
1049 
1050   // predicate controlling translation of CompareAndSwapX
1051   bool needs_acquiring_load_exclusive(const Node *load);
1052 
1053   // predicate controlling addressing modes
1054   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1055 %}
1056 
1057 source %{
1058 
1059   // Derived RegMask with conditionally allocatable registers
1060 
1061   void PhaseOutput::pd_perform_mach_node_analysis() {
1062   }
1063 
1064   int MachNode::pd_alignment_required() const {
1065     return 1;
1066   }
1067 
1068   int MachNode::compute_padding(int current_offset) const {
1069     return 0;
1070   }
1071 
1072   RegMask _ANY_REG32_mask;
1073   RegMask _ANY_REG_mask;
1074   RegMask _PTR_REG_mask;
1075   RegMask _NO_SPECIAL_REG32_mask;
1076   RegMask _NO_SPECIAL_REG_mask;
1077   RegMask _NO_SPECIAL_PTR_REG_mask;
1078 
1079   void reg_mask_init() {
1080     // We derive below RegMask(s) from the ones which are auto-generated from
1081     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
1082     // registers conditionally reserved.
1083 
1084     _ANY_REG32_mask = _ALL_REG32_mask;
1085     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
1086 
1087     _ANY_REG_mask = _ALL_REG_mask;
1088 
1089     _PTR_REG_mask = _ALL_REG_mask;
1090 
1091     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
1092     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
1093 
1094     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
1095     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1096 
1097     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
1098     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1099 
1100     // r27 is not allocatable when compressed oops is on, compressed klass
1101     // pointers doesn't use r27 after JDK-8234794
1102     if (UseCompressedOops) {
1103       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
1104       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1105       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1106     }
1107 
1108     // r29 is not allocatable when PreserveFramePointer is on
1109     if (PreserveFramePointer) {
1110       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
1111       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
1112       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
1113     }
1114   }
1115 
1116   // Optimizaton of volatile gets and puts
1117   // -------------------------------------
1118   //
1119   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1120   // use to implement volatile reads and writes. For a volatile read
1121   // we simply need
1122   //
1123   //   ldar<x>
1124   //
1125   // and for a volatile write we need
1126   //
1127   //   stlr<x>
1128   //
1129   // Alternatively, we can implement them by pairing a normal
1130   // load/store with a memory barrier. For a volatile read we need
1131   //
1132   //   ldr<x>
1133   //   dmb ishld
1134   //
1135   // for a volatile write
1136   //
1137   //   dmb ish
1138   //   str<x>
1139   //   dmb ish
1140   //
1141   // We can also use ldaxr and stlxr to implement compare and swap CAS
1142   // sequences. These are normally translated to an instruction
1143   // sequence like the following
1144   //
1145   //   dmb      ish
1146   // retry:
1147   //   ldxr<x>   rval raddr
1148   //   cmp       rval rold
1149   //   b.ne done
1150   //   stlxr<x>  rval, rnew, rold
1151   //   cbnz      rval retry
1152   // done:
1153   //   cset      r0, eq
1154   //   dmb ishld
1155   //
1156   // Note that the exclusive store is already using an stlxr
1157   // instruction. That is required to ensure visibility to other
1158   // threads of the exclusive write (assuming it succeeds) before that
1159   // of any subsequent writes.
1160   //
1161   // The following instruction sequence is an improvement on the above
1162   //
1163   // retry:
1164   //   ldaxr<x>  rval raddr
1165   //   cmp       rval rold
1166   //   b.ne done
1167   //   stlxr<x>  rval, rnew, rold
1168   //   cbnz      rval retry
1169   // done:
1170   //   cset      r0, eq
1171   //
1172   // We don't need the leading dmb ish since the stlxr guarantees
1173   // visibility of prior writes in the case that the swap is
1174   // successful. Crucially we don't have to worry about the case where
1175   // the swap is not successful since no valid program should be
1176   // relying on visibility of prior changes by the attempting thread
1177   // in the case where the CAS fails.
1178   //
1179   // Similarly, we don't need the trailing dmb ishld if we substitute
1180   // an ldaxr instruction since that will provide all the guarantees we
1181   // require regarding observation of changes made by other threads
1182   // before any change to the CAS address observed by the load.
1183   //
1184   // In order to generate the desired instruction sequence we need to
1185   // be able to identify specific 'signature' ideal graph node
1186   // sequences which i) occur as a translation of a volatile reads or
1187   // writes or CAS operations and ii) do not occur through any other
1188   // translation or graph transformation. We can then provide
1189   // alternative aldc matching rules which translate these node
1190   // sequences to the desired machine code sequences. Selection of the
1191   // alternative rules can be implemented by predicates which identify
1192   // the relevant node sequences.
1193   //
1194   // The ideal graph generator translates a volatile read to the node
1195   // sequence
1196   //
1197   //   LoadX[mo_acquire]
1198   //   MemBarAcquire
1199   //
1200   // As a special case when using the compressed oops optimization we
1201   // may also see this variant
1202   //
1203   //   LoadN[mo_acquire]
1204   //   DecodeN
1205   //   MemBarAcquire
1206   //
1207   // A volatile write is translated to the node sequence
1208   //
1209   //   MemBarRelease
1210   //   StoreX[mo_release] {CardMark}-optional
1211   //   MemBarVolatile
1212   //
1213   // n.b. the above node patterns are generated with a strict
1214   // 'signature' configuration of input and output dependencies (see
1215   // the predicates below for exact details). The card mark may be as
1216   // simple as a few extra nodes or, in a few GC configurations, may
1217   // include more complex control flow between the leading and
1218   // trailing memory barriers. However, whatever the card mark
1219   // configuration these signatures are unique to translated volatile
1220   // reads/stores -- they will not appear as a result of any other
1221   // bytecode translation or inlining nor as a consequence of
1222   // optimizing transforms.
1223   //
1224   // We also want to catch inlined unsafe volatile gets and puts and
1225   // be able to implement them using either ldar<x>/stlr<x> or some
1226   // combination of ldr<x>/stlr<x> and dmb instructions.
1227   //
1228   // Inlined unsafe volatiles puts manifest as a minor variant of the
1229   // normal volatile put node sequence containing an extra cpuorder
1230   // membar
1231   //
1232   //   MemBarRelease
1233   //   MemBarCPUOrder
1234   //   StoreX[mo_release] {CardMark}-optional
1235   //   MemBarCPUOrder
1236   //   MemBarVolatile
1237   //
1238   // n.b. as an aside, a cpuorder membar is not itself subject to
1239   // matching and translation by adlc rules.  However, the rule
1240   // predicates need to detect its presence in order to correctly
1241   // select the desired adlc rules.
1242   //
1243   // Inlined unsafe volatile gets manifest as a slightly different
1244   // node sequence to a normal volatile get because of the
1245   // introduction of some CPUOrder memory barriers to bracket the
1246   // Load. However, but the same basic skeleton of a LoadX feeding a
1247   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1248   // present
1249   //
1250   //   MemBarCPUOrder
1251   //        ||       \\
1252   //   MemBarCPUOrder LoadX[mo_acquire]
1253   //        ||            |
1254   //        ||       {DecodeN} optional
1255   //        ||       /
1256   //     MemBarAcquire
1257   //
1258   // In this case the acquire membar does not directly depend on the
1259   // load. However, we can be sure that the load is generated from an
1260   // inlined unsafe volatile get if we see it dependent on this unique
1261   // sequence of membar nodes. Similarly, given an acquire membar we
1262   // can know that it was added because of an inlined unsafe volatile
1263   // get if it is fed and feeds a cpuorder membar and if its feed
1264   // membar also feeds an acquiring load.
1265   //
1266   // Finally an inlined (Unsafe) CAS operation is translated to the
1267   // following ideal graph
1268   //
1269   //   MemBarRelease
1270   //   MemBarCPUOrder
1271   //   CompareAndSwapX {CardMark}-optional
1272   //   MemBarCPUOrder
1273   //   MemBarAcquire
1274   //
1275   // So, where we can identify these volatile read and write
1276   // signatures we can choose to plant either of the above two code
1277   // sequences. For a volatile read we can simply plant a normal
1278   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1279   // also choose to inhibit translation of the MemBarAcquire and
1280   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1281   //
1282   // When we recognise a volatile store signature we can choose to
1283   // plant at a dmb ish as a translation for the MemBarRelease, a
1284   // normal str<x> and then a dmb ish for the MemBarVolatile.
1285   // Alternatively, we can inhibit translation of the MemBarRelease
1286   // and MemBarVolatile and instead plant a simple stlr<x>
1287   // instruction.
1288   //
1289   // when we recognise a CAS signature we can choose to plant a dmb
1290   // ish as a translation for the MemBarRelease, the conventional
1291   // macro-instruction sequence for the CompareAndSwap node (which
1292   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1293   // Alternatively, we can elide generation of the dmb instructions
1294   // and plant the alternative CompareAndSwap macro-instruction
1295   // sequence (which uses ldaxr<x>).
1296   //
1297   // Of course, the above only applies when we see these signature
1298   // configurations. We still want to plant dmb instructions in any
1299   // other cases where we may see a MemBarAcquire, MemBarRelease or
1300   // MemBarVolatile. For example, at the end of a constructor which
1301   // writes final/volatile fields we will see a MemBarRelease
1302   // instruction and this needs a 'dmb ish' lest we risk the
1303   // constructed object being visible without making the
1304   // final/volatile field writes visible.
1305   //
1306   // n.b. the translation rules below which rely on detection of the
1307   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1308   // If we see anything other than the signature configurations we
1309   // always just translate the loads and stores to ldr<x> and str<x>
1310   // and translate acquire, release and volatile membars to the
1311   // relevant dmb instructions.
1312   //
1313 
1314   // is_CAS(int opcode, bool maybe_volatile)
1315   //
1316   // return true if opcode is one of the possible CompareAndSwapX
1317   // values otherwise false.
1318 
1319   bool is_CAS(int opcode, bool maybe_volatile)
1320   {
1321     switch(opcode) {
1322       // We handle these
1323     case Op_CompareAndSwapI:
1324     case Op_CompareAndSwapL:
1325     case Op_CompareAndSwapP:
1326     case Op_CompareAndSwapN:
1327     case Op_ShenandoahCompareAndSwapP:
1328     case Op_ShenandoahCompareAndSwapN:
1329     case Op_CompareAndSwapB:
1330     case Op_CompareAndSwapS:
1331     case Op_GetAndSetI:
1332     case Op_GetAndSetL:
1333     case Op_GetAndSetP:
1334     case Op_GetAndSetN:
1335     case Op_GetAndAddI:
1336     case Op_GetAndAddL:
1337       return true;
1338     case Op_CompareAndExchangeI:
1339     case Op_CompareAndExchangeN:
1340     case Op_CompareAndExchangeB:
1341     case Op_CompareAndExchangeS:
1342     case Op_CompareAndExchangeL:
1343     case Op_CompareAndExchangeP:
1344     case Op_WeakCompareAndSwapB:
1345     case Op_WeakCompareAndSwapS:
1346     case Op_WeakCompareAndSwapI:
1347     case Op_WeakCompareAndSwapL:
1348     case Op_WeakCompareAndSwapP:
1349     case Op_WeakCompareAndSwapN:
1350     case Op_ShenandoahWeakCompareAndSwapP:
1351     case Op_ShenandoahWeakCompareAndSwapN:
1352     case Op_ShenandoahCompareAndExchangeP:
1353     case Op_ShenandoahCompareAndExchangeN:
1354       return maybe_volatile;
1355     default:
1356       return false;
1357     }
1358   }
1359 
1360   // helper to determine the maximum number of Phi nodes we may need to
1361   // traverse when searching from a card mark membar for the merge mem
1362   // feeding a trailing membar or vice versa
1363 
1364 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1365 
1366 bool unnecessary_acquire(const Node *barrier)
1367 {
1368   assert(barrier->is_MemBar(), "expecting a membar");
1369 
1370   if (UseBarriersForVolatile) {
1371     // we need to plant a dmb
1372     return false;
1373   }
1374 
1375   MemBarNode* mb = barrier->as_MemBar();
1376 
1377   if (mb->trailing_load()) {
1378     return true;
1379   }
1380 
1381   if (mb->trailing_load_store()) {
1382     Node* load_store = mb->in(MemBarNode::Precedent);
1383     assert(load_store->is_LoadStore(), "unexpected graph shape");
1384     return is_CAS(load_store->Opcode(), true);
1385   }
1386 
1387   return false;
1388 }
1389 
1390 bool needs_acquiring_load(const Node *n)
1391 {
1392   assert(n->is_Load(), "expecting a load");
1393   if (UseBarriersForVolatile) {
1394     // we use a normal load and a dmb
1395     return false;
1396   }
1397 
1398   LoadNode *ld = n->as_Load();
1399 
1400   return ld->is_acquire();
1401 }
1402 
1403 bool unnecessary_release(const Node *n)
1404 {
1405   assert((n->is_MemBar() &&
1406           n->Opcode() == Op_MemBarRelease),
1407          "expecting a release membar");
1408 
1409   if (UseBarriersForVolatile) {
1410     // we need to plant a dmb
1411     return false;
1412   }
1413 
1414   MemBarNode *barrier = n->as_MemBar();
1415   if (!barrier->leading()) {
1416     return false;
1417   } else {
1418     Node* trailing = barrier->trailing_membar();
1419     MemBarNode* trailing_mb = trailing->as_MemBar();
1420     assert(trailing_mb->trailing(), "Not a trailing membar?");
1421     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1422 
1423     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1424     if (mem->is_Store()) {
1425       assert(mem->as_Store()->is_release(), "");
1426       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1427       return true;
1428     } else {
1429       assert(mem->is_LoadStore(), "");
1430       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1431       return is_CAS(mem->Opcode(), true);
1432     }
1433   }
1434   return false;
1435 }
1436 
1437 bool unnecessary_volatile(const Node *n)
1438 {
1439   // assert n->is_MemBar();
1440   if (UseBarriersForVolatile) {
1441     // we need to plant a dmb
1442     return false;
1443   }
1444 
1445   MemBarNode *mbvol = n->as_MemBar();
1446 
1447   bool release = mbvol->trailing_store();
1448   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1449 #ifdef ASSERT
1450   if (release) {
1451     Node* leading = mbvol->leading_membar();
1452     assert(leading->Opcode() == Op_MemBarRelease, "");
1453     assert(leading->as_MemBar()->leading_store(), "");
1454     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1455   }
1456 #endif
1457 
1458   return release;
1459 }
1460 
1461 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1462 
1463 bool needs_releasing_store(const Node *n)
1464 {
1465   // assert n->is_Store();
1466   if (UseBarriersForVolatile) {
1467     // we use a normal store and dmb combination
1468     return false;
1469   }
1470 
1471   StoreNode *st = n->as_Store();
1472 
1473   return st->trailing_membar() != NULL;
1474 }
1475 
1476 // predicate controlling translation of CAS
1477 //
1478 // returns true if CAS needs to use an acquiring load otherwise false
1479 
1480 bool needs_acquiring_load_exclusive(const Node *n)
1481 {
1482   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1483   if (UseBarriersForVolatile) {
1484     return false;
1485   }
1486 
1487   LoadStoreNode* ldst = n->as_LoadStore();
1488   if (is_CAS(n->Opcode(), false)) {
1489     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1490   } else {
1491     return ldst->trailing_membar() != NULL;
1492   }
1493 
1494   // so we can just return true here
1495   return true;
1496 }
1497 
1498 #define __ _masm.
1499 
1500 // advance declarations for helper functions to convert register
1501 // indices to register objects
1502 
1503 // the ad file has to provide implementations of certain methods
1504 // expected by the generic code
1505 //
1506 // REQUIRED FUNCTIONALITY
1507 
1508 //=============================================================================
1509 
1510 // !!!!! Special hack to get all types of calls to specify the byte offset
1511 //       from the start of the call to the point where the return address
1512 //       will point.
1513 
1514 int MachCallStaticJavaNode::ret_addr_offset()
1515 {
1516   // call should be a simple bl
1517   int off = 4;
1518   return off;
1519 }
1520 
1521 int MachCallDynamicJavaNode::ret_addr_offset()
1522 {
1523   return 16; // movz, movk, movk, bl
1524 }
1525 
1526 int MachCallRuntimeNode::ret_addr_offset() {
1527   // for generated stubs the call will be
1528   //   far_call(addr)
1529   // for real runtime callouts it will be six instructions
1530   // see aarch64_enc_java_to_runtime
1531   //   adr(rscratch2, retaddr)
1532   //   lea(rscratch1, RuntimeAddress(addr)
1533   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1534   //   blr(rscratch1)
1535   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1536   if (cb) {
1537     return MacroAssembler::far_branch_size();
1538   } else {
1539     return 6 * NativeInstruction::instruction_size;
1540   }
1541 }
1542 
1543 // Indicate if the safepoint node needs the polling page as an input
1544 
1545 // the shared code plants the oop data at the start of the generated
1546 // code for the safepoint node and that needs ot be at the load
1547 // instruction itself. so we cannot plant a mov of the safepoint poll
1548 // address followed by a load. setting this to true means the mov is
1549 // scheduled as a prior instruction. that's better for scheduling
1550 // anyway.
1551 
1552 bool SafePointNode::needs_polling_address_input()
1553 {
1554   return true;
1555 }
1556 
1557 //=============================================================================
1558 
1559 #ifndef PRODUCT
1560 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1561   st->print("BREAKPOINT");
1562 }
1563 #endif
1564 
1565 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1566   C2_MacroAssembler _masm(&cbuf);
1567   __ brk(0);
1568 }
1569 
1570 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1571   return MachNode::size(ra_);
1572 }
1573 
1574 //=============================================================================
1575 
1576 #ifndef PRODUCT
1577   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1578     st->print("nop \t# %d bytes pad for loops and calls", _count);
1579   }
1580 #endif
1581 
1582   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1583     C2_MacroAssembler _masm(&cbuf);
1584     for (int i = 0; i < _count; i++) {
1585       __ nop();
1586     }
1587   }
1588 
1589   uint MachNopNode::size(PhaseRegAlloc*) const {
1590     return _count * NativeInstruction::instruction_size;
1591   }
1592 
1593 //=============================================================================
1594 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1595 
1596 int ConstantTable::calculate_table_base_offset() const {
1597   return 0;  // absolute addressing, no offset
1598 }
1599 
1600 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1601 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1602   ShouldNotReachHere();
1603 }
1604 
1605 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1606   // Empty encoding
1607 }
1608 
1609 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1610   return 0;
1611 }
1612 
1613 #ifndef PRODUCT
1614 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1615   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1616 }
1617 #endif
1618 
1619 #ifndef PRODUCT
1620 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1621   Compile* C = ra_->C;
1622 
1623   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1624 
1625   if (C->output()->need_stack_bang(framesize))
1626     st->print("# stack bang size=%d\n\t", framesize);
1627 
1628   if (framesize < ((1 << 9) + 2 * wordSize)) {
1629     st->print("sub  sp, sp, #%d\n\t", framesize);
1630     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1631     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1632   } else {
1633     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1634     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1635     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1636     st->print("sub  sp, sp, rscratch1");
1637   }
1638   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
1639     st->print("\n\t");
1640     st->print("ldr  rscratch1, [guard]\n\t");
1641     st->print("dmb ishld\n\t");
1642     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
1643     st->print("cmp  rscratch1, rscratch2\n\t");
1644     st->print("b.eq skip");
1645     st->print("\n\t");
1646     st->print("blr #nmethod_entry_barrier_stub\n\t");
1647     st->print("b skip\n\t");
1648     st->print("guard: int\n\t");
1649     st->print("\n\t");
1650     st->print("skip:\n\t");
1651   }
1652 }
1653 #endif
1654 
1655 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1656   Compile* C = ra_->C;
1657   C2_MacroAssembler _masm(&cbuf);
1658 
1659   // n.b. frame size includes space for return pc and rfp
1660   const long framesize = C->output()->frame_size_in_bytes();
1661   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1662 
1663   // insert a nop at the start of the prolog so we can patch in a
1664   // branch if we need to invalidate the method later
1665   __ nop();
1666 
1667   if (C->clinit_barrier_on_entry()) {
1668     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1669 
1670     Label L_skip_barrier;
1671 
1672     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1673     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1674     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1675     __ bind(L_skip_barrier);
1676   }
1677 
1678   int bangsize = C->output()->bang_size_in_bytes();
1679   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1680     __ generate_stack_overflow_check(bangsize);
1681 
1682   __ build_frame(framesize);
1683 
1684   if (C->stub_function() == NULL) {
1685     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1686     bs->nmethod_entry_barrier(&_masm);
1687   }
1688 
1689   if (VerifyStackAtCalls) {
1690     Unimplemented();
1691   }
1692 
1693   C->output()->set_frame_complete(cbuf.insts_size());
1694 
1695   if (C->has_mach_constant_base_node()) {
1696     // NOTE: We set the table base offset here because users might be
1697     // emitted before MachConstantBaseNode.
1698     ConstantTable& constant_table = C->output()->constant_table();
1699     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1700   }
1701 }
1702 
1703 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1704 {
1705   return MachNode::size(ra_); // too many variables; just compute it
1706                               // the hard way
1707 }
1708 
1709 int MachPrologNode::reloc() const
1710 {
1711   return 0;
1712 }
1713 
1714 //=============================================================================
1715 
1716 #ifndef PRODUCT
1717 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1718   Compile* C = ra_->C;
1719   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1720 
1721   st->print("# pop frame %d\n\t",framesize);
1722 
1723   if (framesize == 0) {
1724     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1725   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1726     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1727     st->print("add  sp, sp, #%d\n\t", framesize);
1728   } else {
1729     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1730     st->print("add  sp, sp, rscratch1\n\t");
1731     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1732   }
1733 
1734   if (do_polling() && C->is_method_compilation()) {
1735     st->print("# touch polling page\n\t");
1736     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
1737     st->print("ldr zr, [rscratch1]");
1738   }
1739 }
1740 #endif
1741 
1742 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1743   Compile* C = ra_->C;
1744   C2_MacroAssembler _masm(&cbuf);
1745   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1746 
1747   __ remove_frame(framesize);
1748 
1749   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1750     __ reserved_stack_check();
1751   }
1752 
1753   if (do_polling() && C->is_method_compilation()) {
1754     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
1755   }
1756 }
1757 
1758 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1759   // Variable size. Determine dynamically.
1760   return MachNode::size(ra_);
1761 }
1762 
1763 int MachEpilogNode::reloc() const {
1764   // Return number of relocatable values contained in this instruction.
1765   return 1; // 1 for polling page.
1766 }
1767 
1768 const Pipeline * MachEpilogNode::pipeline() const {
1769   return MachNode::pipeline_class();
1770 }
1771 
1772 //=============================================================================
1773 
1774 // Figure out which register class each belongs in: rc_int, rc_float or
1775 // rc_stack.
1776 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1777 
1778 static enum RC rc_class(OptoReg::Name reg) {
1779 
1780   if (reg == OptoReg::Bad) {
1781     return rc_bad;
1782   }
1783 
1784   // we have 30 int registers * 2 halves
1785   // (rscratch1 and rscratch2 are omitted)
1786   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1787 
1788   if (reg < slots_of_int_registers) {
1789     return rc_int;
1790   }
1791 
1792   // we have 32 float register * 4 halves
1793   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1794     return rc_float;
1795   }
1796 
1797   // Between float regs & stack is the flags regs.
1798   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1799 
1800   return rc_stack;
1801 }
1802 
1803 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1804   Compile* C = ra_->C;
1805 
1806   // Get registers to move.
1807   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1808   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1809   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1810   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1811 
1812   enum RC src_hi_rc = rc_class(src_hi);
1813   enum RC src_lo_rc = rc_class(src_lo);
1814   enum RC dst_hi_rc = rc_class(dst_hi);
1815   enum RC dst_lo_rc = rc_class(dst_lo);
1816 
1817   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1818 
1819   if (src_hi != OptoReg::Bad) {
1820     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1821            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1822            "expected aligned-adjacent pairs");
1823   }
1824 
1825   if (src_lo == dst_lo && src_hi == dst_hi) {
1826     return 0;            // Self copy, no move.
1827   }
1828 
1829   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1830               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1831   int src_offset = ra_->reg2offset(src_lo);
1832   int dst_offset = ra_->reg2offset(dst_lo);
1833 
1834   if (bottom_type()->isa_vect() != NULL) {
1835     uint ireg = ideal_reg();
1836     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1837     if (cbuf) {
1838       C2_MacroAssembler _masm(cbuf);
1839       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1840       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1841         // stack->stack
1842         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1843         if (ireg == Op_VecD) {
1844           __ unspill(rscratch1, true, src_offset);
1845           __ spill(rscratch1, true, dst_offset);
1846         } else {
1847           __ spill_copy128(src_offset, dst_offset);
1848         }
1849       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1850         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1851                ireg == Op_VecD ? __ T8B : __ T16B,
1852                as_FloatRegister(Matcher::_regEncode[src_lo]));
1853       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1854         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1855                        ireg == Op_VecD ? __ D : __ Q,
1856                        ra_->reg2offset(dst_lo));
1857       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1858         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1859                        ireg == Op_VecD ? __ D : __ Q,
1860                        ra_->reg2offset(src_lo));
1861       } else {
1862         ShouldNotReachHere();
1863       }
1864     }
1865   } else if (cbuf) {
1866     C2_MacroAssembler _masm(cbuf);
1867     switch (src_lo_rc) {
1868     case rc_int:
1869       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1870         if (is64) {
1871             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1872                    as_Register(Matcher::_regEncode[src_lo]));
1873         } else {
1874             C2_MacroAssembler _masm(cbuf);
1875             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1876                     as_Register(Matcher::_regEncode[src_lo]));
1877         }
1878       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1879         if (is64) {
1880             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1881                      as_Register(Matcher::_regEncode[src_lo]));
1882         } else {
1883             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1884                      as_Register(Matcher::_regEncode[src_lo]));
1885         }
1886       } else {                    // gpr --> stack spill
1887         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1888         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1889       }
1890       break;
1891     case rc_float:
1892       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1893         if (is64) {
1894             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1895                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1896         } else {
1897             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1898                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1899         }
1900       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1901           if (cbuf) {
1902             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1903                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1904         } else {
1905             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1906                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1907         }
1908       } else {                    // fpr --> stack spill
1909         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1910         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1911                  is64 ? __ D : __ S, dst_offset);
1912       }
1913       break;
1914     case rc_stack:
1915       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1916         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1917       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1918         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1919                    is64 ? __ D : __ S, src_offset);
1920       } else {                    // stack --> stack copy
1921         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1922         __ unspill(rscratch1, is64, src_offset);
1923         __ spill(rscratch1, is64, dst_offset);
1924       }
1925       break;
1926     default:
1927       assert(false, "bad rc_class for spill");
1928       ShouldNotReachHere();
1929     }
1930   }
1931 
1932   if (st) {
1933     st->print("spill ");
1934     if (src_lo_rc == rc_stack) {
1935       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1936     } else {
1937       st->print("%s -> ", Matcher::regName[src_lo]);
1938     }
1939     if (dst_lo_rc == rc_stack) {
1940       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1941     } else {
1942       st->print("%s", Matcher::regName[dst_lo]);
1943     }
1944     if (bottom_type()->isa_vect() != NULL) {
1945       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1946     } else {
1947       st->print("\t# spill size = %d", is64 ? 64:32);
1948     }
1949   }
1950 
1951   return 0;
1952 
1953 }
1954 
1955 #ifndef PRODUCT
1956 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1957   if (!ra_)
1958     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1959   else
1960     implementation(NULL, ra_, false, st);
1961 }
1962 #endif
1963 
1964 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1965   implementation(&cbuf, ra_, false, NULL);
1966 }
1967 
1968 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1969   return MachNode::size(ra_);
1970 }
1971 
1972 //=============================================================================
1973 
1974 #ifndef PRODUCT
1975 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1976   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1977   int reg = ra_->get_reg_first(this);
1978   st->print("add %s, rsp, #%d]\t# box lock",
1979             Matcher::regName[reg], offset);
1980 }
1981 #endif
1982 
1983 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1984   C2_MacroAssembler _masm(&cbuf);
1985 
1986   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1987   int reg    = ra_->get_encode(this);
1988 
1989   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1990     __ add(as_Register(reg), sp, offset);
1991   } else {
1992     ShouldNotReachHere();
1993   }
1994 }
1995 
1996 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1997   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1998   return 4;
1999 }
2000 
2001 //=============================================================================
2002 
2003 #ifndef PRODUCT
2004 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2005 {
2006   st->print_cr("# MachUEPNode");
2007   if (UseCompressedClassPointers) {
2008     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2009     if (CompressedKlassPointers::shift() != 0) {
2010       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2011     }
2012   } else {
2013    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2014   }
2015   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2016   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2017 }
2018 #endif
2019 
2020 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2021 {
2022   // This is the unverified entry point.
2023   C2_MacroAssembler _masm(&cbuf);
2024 
2025   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2026   Label skip;
2027   // TODO
2028   // can we avoid this skip and still use a reloc?
2029   __ br(Assembler::EQ, skip);
2030   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2031   __ bind(skip);
2032 }
2033 
2034 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2035 {
2036   return MachNode::size(ra_);
2037 }
2038 
2039 // REQUIRED EMIT CODE
2040 
2041 //=============================================================================
2042 
2043 // Emit exception handler code.
2044 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2045 {
2046   // mov rscratch1 #exception_blob_entry_point
2047   // br rscratch1
2048   // Note that the code buffer's insts_mark is always relative to insts.
2049   // That's why we must use the macroassembler to generate a handler.
2050   C2_MacroAssembler _masm(&cbuf);
2051   address base = __ start_a_stub(size_exception_handler());
2052   if (base == NULL) {
2053     ciEnv::current()->record_failure("CodeCache is full");
2054     return 0;  // CodeBuffer::expand failed
2055   }
2056   int offset = __ offset();
2057   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2058   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2059   __ end_a_stub();
2060   return offset;
2061 }
2062 
2063 // Emit deopt handler code.
2064 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2065 {
2066   // Note that the code buffer's insts_mark is always relative to insts.
2067   // That's why we must use the macroassembler to generate a handler.
2068   C2_MacroAssembler _masm(&cbuf);
2069   address base = __ start_a_stub(size_deopt_handler());
2070   if (base == NULL) {
2071     ciEnv::current()->record_failure("CodeCache is full");
2072     return 0;  // CodeBuffer::expand failed
2073   }
2074   int offset = __ offset();
2075 
2076   __ adr(lr, __ pc());
2077   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2078 
2079   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2080   __ end_a_stub();
2081   return offset;
2082 }
2083 
2084 // REQUIRED MATCHER CODE
2085 
2086 //=============================================================================
2087 
2088 const bool Matcher::match_rule_supported(int opcode) {
2089   if (!has_match_rule(opcode))
2090     return false;
2091 
2092   bool ret_value = true;
2093   switch (opcode) {
2094     case Op_CacheWB:
2095     case Op_CacheWBPreSync:
2096     case Op_CacheWBPostSync:
2097       if (!VM_Version::supports_data_cache_line_flush()) {
2098         ret_value = false;
2099       }
2100       break;
2101   }
2102 
2103   return ret_value; // Per default match rules are supported.
2104 }
2105 
2106 // Identify extra cases that we might want to provide match rules for vector nodes and
2107 // other intrinsics guarded with vector length (vlen) and element type (bt).
2108 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
2109   if (!match_rule_supported(opcode)) {
2110     return false;
2111   }
2112 
2113   // Special cases which require vector length
2114   switch (opcode) {
2115     case Op_MulAddVS2VI: {
2116       if (vlen != 4) {
2117         return false;
2118       }
2119       break;
2120     }
2121   }
2122 
2123   return true; // Per default match rules are supported.
2124 }
2125 
2126 const bool Matcher::has_predicated_vectors(void) {
2127   return false;
2128 }
2129 
2130 const int Matcher::float_pressure(int default_pressure_threshold) {
2131   return default_pressure_threshold;
2132 }
2133 
2134 int Matcher::regnum_to_fpu_offset(int regnum)
2135 {
2136   Unimplemented();
2137   return 0;
2138 }
2139 
2140 // Is this branch offset short enough that a short branch can be used?
2141 //
2142 // NOTE: If the platform does not provide any short branch variants, then
2143 //       this method should return false for offset 0.
2144 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2145   // The passed offset is relative to address of the branch.
2146 
2147   return (-32768 <= offset && offset < 32768);
2148 }
2149 
2150 const bool Matcher::isSimpleConstant64(jlong value) {
2151   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2152   // Probably always true, even if a temp register is required.
2153   return true;
2154 }
2155 
2156 // true just means we have fast l2f conversion
2157 const bool Matcher::convL2FSupported(void) {
2158   return true;
2159 }
2160 
2161 // Vector width in bytes.
2162 const int Matcher::vector_width_in_bytes(BasicType bt) {
2163   int size = MIN2(16,(int)MaxVectorSize);
2164   // Minimum 2 values in vector
2165   if (size < 2*type2aelembytes(bt)) size = 0;
2166   // But never < 4
2167   if (size < 4) size = 0;
2168   return size;
2169 }
2170 
2171 // Limits on vector size (number of elements) loaded into vector.
2172 const int Matcher::max_vector_size(const BasicType bt) {
2173   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2174 }
2175 const int Matcher::min_vector_size(const BasicType bt) {
2176 //  For the moment limit the vector size to 8 bytes
2177     int size = 8 / type2aelembytes(bt);
2178     if (size < 2) size = 2;
2179     return size;
2180 }
2181 
2182 // Vector ideal reg.
2183 const uint Matcher::vector_ideal_reg(int len) {
2184   switch(len) {
2185     case  8: return Op_VecD;
2186     case 16: return Op_VecX;
2187   }
2188   ShouldNotReachHere();
2189   return 0;
2190 }
2191 
2192 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2193   switch(size) {
2194     case  8: return Op_VecD;
2195     case 16: return Op_VecX;
2196   }
2197   ShouldNotReachHere();
2198   return 0;
2199 }
2200 
2201 // AES support not yet implemented
2202 const bool Matcher::pass_original_key_for_aes() {
2203   return false;
2204 }
2205 
2206 // aarch64 supports misaligned vectors store/load.
2207 const bool Matcher::misaligned_vectors_ok() {
2208   return true;
2209 }
2210 
2211 // false => size gets scaled to BytesPerLong, ok.
2212 const bool Matcher::init_array_count_is_in_bytes = false;
2213 
2214 // Use conditional move (CMOVL)
2215 const int Matcher::long_cmove_cost() {
2216   // long cmoves are no more expensive than int cmoves
2217   return 0;
2218 }
2219 
2220 const int Matcher::float_cmove_cost() {
2221   // float cmoves are no more expensive than int cmoves
2222   return 0;
2223 }
2224 
2225 // Does the CPU require late expand (see block.cpp for description of late expand)?
2226 const bool Matcher::require_postalloc_expand = false;
2227 
2228 // Do we need to mask the count passed to shift instructions or does
2229 // the cpu only look at the lower 5/6 bits anyway?
2230 const bool Matcher::need_masked_shift_count = false;
2231 
2232 // No support for generic vector operands.
2233 const bool Matcher::supports_generic_vector_operands  = false;
2234 
2235 MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2236   ShouldNotReachHere(); // generic vector operands not supported
2237   return NULL;
2238 }
2239 
2240 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
2241   ShouldNotReachHere();  // generic vector operands not supported
2242   return false;
2243 }
2244 
2245 bool Matcher::is_generic_vector(MachOper* opnd)  {
2246   ShouldNotReachHere();  // generic vector operands not supported
2247   return false;
2248 }
2249 
2250 // This affects two different things:
2251 //  - how Decode nodes are matched
2252 //  - how ImplicitNullCheck opportunities are recognized
2253 // If true, the matcher will try to remove all Decodes and match them
2254 // (as operands) into nodes. NullChecks are not prepared to deal with
2255 // Decodes by final_graph_reshaping().
2256 // If false, final_graph_reshaping() forces the decode behind the Cmp
2257 // for a NullCheck. The matcher matches the Decode node into a register.
2258 // Implicit_null_check optimization moves the Decode along with the
2259 // memory operation back up before the NullCheck.
2260 bool Matcher::narrow_oop_use_complex_address() {
2261   return CompressedOops::shift() == 0;
2262 }
2263 
2264 bool Matcher::narrow_klass_use_complex_address() {
2265 // TODO
2266 // decide whether we need to set this to true
2267   return false;
2268 }
2269 
2270 bool Matcher::const_oop_prefer_decode() {
2271   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2272   return CompressedOops::base() == NULL;
2273 }
2274 
2275 bool Matcher::const_klass_prefer_decode() {
2276   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2277   return CompressedKlassPointers::base() == NULL;
2278 }
2279 
2280 // Is it better to copy float constants, or load them directly from
2281 // memory?  Intel can load a float constant from a direct address,
2282 // requiring no extra registers.  Most RISCs will have to materialize
2283 // an address into a register first, so they would do better to copy
2284 // the constant from stack.
2285 const bool Matcher::rematerialize_float_constants = false;
2286 
2287 // If CPU can load and store mis-aligned doubles directly then no
2288 // fixup is needed.  Else we split the double into 2 integer pieces
2289 // and move it piece-by-piece.  Only happens when passing doubles into
2290 // C code as the Java calling convention forces doubles to be aligned.
2291 const bool Matcher::misaligned_doubles_ok = true;
2292 
2293 // No-op on amd64
2294 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2295   Unimplemented();
2296 }
2297 
2298 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
2299 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2300 
2301 // Are floats converted to double when stored to stack during
2302 // deoptimization?
2303 bool Matcher::float_in_double() { return false; }
2304 
2305 // Do ints take an entire long register or just half?
2306 // The relevant question is how the int is callee-saved:
2307 // the whole long is written but de-opt'ing will have to extract
2308 // the relevant 32 bits.
2309 const bool Matcher::int_in_long = true;
2310 
2311 // Return whether or not this register is ever used as an argument.
2312 // This function is used on startup to build the trampoline stubs in
2313 // generateOptoStub.  Registers not mentioned will be killed by the VM
2314 // call in the trampoline, and arguments in those registers not be
2315 // available to the callee.
2316 bool Matcher::can_be_java_arg(int reg)
2317 {
2318   return
2319     reg ==  R0_num || reg == R0_H_num ||
2320     reg ==  R1_num || reg == R1_H_num ||
2321     reg ==  R2_num || reg == R2_H_num ||
2322     reg ==  R3_num || reg == R3_H_num ||
2323     reg ==  R4_num || reg == R4_H_num ||
2324     reg ==  R5_num || reg == R5_H_num ||
2325     reg ==  R6_num || reg == R6_H_num ||
2326     reg ==  R7_num || reg == R7_H_num ||
2327     reg ==  V0_num || reg == V0_H_num ||
2328     reg ==  V1_num || reg == V1_H_num ||
2329     reg ==  V2_num || reg == V2_H_num ||
2330     reg ==  V3_num || reg == V3_H_num ||
2331     reg ==  V4_num || reg == V4_H_num ||
2332     reg ==  V5_num || reg == V5_H_num ||
2333     reg ==  V6_num || reg == V6_H_num ||
2334     reg ==  V7_num || reg == V7_H_num;
2335 }
2336 
2337 bool Matcher::is_spillable_arg(int reg)
2338 {
2339   return can_be_java_arg(reg);
2340 }
2341 
2342 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2343   return false;
2344 }
2345 
2346 RegMask Matcher::divI_proj_mask() {
2347   ShouldNotReachHere();
2348   return RegMask();
2349 }
2350 
2351 // Register for MODI projection of divmodI.
2352 RegMask Matcher::modI_proj_mask() {
2353   ShouldNotReachHere();
2354   return RegMask();
2355 }
2356 
2357 // Register for DIVL projection of divmodL.
2358 RegMask Matcher::divL_proj_mask() {
2359   ShouldNotReachHere();
2360   return RegMask();
2361 }
2362 
2363 // Register for MODL projection of divmodL.
2364 RegMask Matcher::modL_proj_mask() {
2365   ShouldNotReachHere();
2366   return RegMask();
2367 }
2368 
2369 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2370   return FP_REG_mask();
2371 }
2372 
2373 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2374   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2375     Node* u = addp->fast_out(i);
2376     if (u->is_Mem()) {
2377       int opsize = u->as_Mem()->memory_size();
2378       assert(opsize > 0, "unexpected memory operand size");
2379       if (u->as_Mem()->memory_size() != (1<<shift)) {
2380         return false;
2381       }
2382     }
2383   }
2384   return true;
2385 }
2386 
2387 const bool Matcher::convi2l_type_required = false;
2388 
2389 // Should the matcher clone input 'm' of node 'n'?
2390 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2391   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
2392     mstack.push(m, Visit);           // m = ShiftCntV
2393     return true;
2394   }
2395   return false;
2396 }
2397 
2398 // Should the Matcher clone shifts on addressing modes, expecting them
2399 // to be subsumed into complex addressing expressions or compute them
2400 // into registers?
2401 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2402   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2403     return true;
2404   }
2405 
2406   Node *off = m->in(AddPNode::Offset);
2407   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2408       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2409       // Are there other uses besides address expressions?
2410       !is_visited(off)) {
2411     address_visited.set(off->_idx); // Flag as address_visited
2412     mstack.push(off->in(2), Visit);
2413     Node *conv = off->in(1);
2414     if (conv->Opcode() == Op_ConvI2L &&
2415         // Are there other uses besides address expressions?
2416         !is_visited(conv)) {
2417       address_visited.set(conv->_idx); // Flag as address_visited
2418       mstack.push(conv->in(1), Pre_Visit);
2419     } else {
2420       mstack.push(conv, Pre_Visit);
2421     }
2422     address_visited.test_set(m->_idx); // Flag as address_visited
2423     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2424     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2425     return true;
2426   } else if (off->Opcode() == Op_ConvI2L &&
2427              // Are there other uses besides address expressions?
2428              !is_visited(off)) {
2429     address_visited.test_set(m->_idx); // Flag as address_visited
2430     address_visited.set(off->_idx); // Flag as address_visited
2431     mstack.push(off->in(1), Pre_Visit);
2432     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2433     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2434     return true;
2435   }
2436   return false;
2437 }
2438 
2439 void Compile::reshape_address(AddPNode* addp) {
2440 }
2441 
2442 
2443 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2444   C2_MacroAssembler _masm(&cbuf);                                       \
2445   {                                                                     \
2446     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2447     guarantee(DISP == 0, "mode not permitted for volatile");            \
2448     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2449     __ INSN(REG, as_Register(BASE));                                    \
2450   }
2451 
2452 
2453 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2454   {
2455     Address::extend scale;
2456 
2457     // Hooboy, this is fugly.  We need a way to communicate to the
2458     // encoder that the index needs to be sign extended, so we have to
2459     // enumerate all the cases.
2460     switch (opcode) {
2461     case INDINDEXSCALEDI2L:
2462     case INDINDEXSCALEDI2LN:
2463     case INDINDEXI2L:
2464     case INDINDEXI2LN:
2465       scale = Address::sxtw(size);
2466       break;
2467     default:
2468       scale = Address::lsl(size);
2469     }
2470 
2471     if (index == -1) {
2472       return Address(base, disp);
2473     } else {
2474       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2475       return Address(base, as_Register(index), scale);
2476     }
2477   }
2478 
2479 
2480 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2481 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2482 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2483 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2484                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2485 
2486   // Used for all non-volatile memory accesses.  The use of
2487   // $mem->opcode() to discover whether this pattern uses sign-extended
2488   // offsets is something of a kludge.
2489   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
2490                         Register reg, int opcode,
2491                         Register base, int index, int scale, int disp,
2492                         int size_in_memory)
2493   {
2494     Address addr = mem2address(opcode, base, index, scale, disp);
2495     if (addr.getMode() == Address::base_plus_offset) {
2496       /* If we get an out-of-range offset it is a bug in the compiler,
2497          so we assert here. */
2498       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
2499              "c2 compiler bug");
2500       /* Fix up any out-of-range offsets. */
2501       assert_different_registers(rscratch1, base);
2502       assert_different_registers(rscratch1, reg);
2503       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2504     }
2505     (masm.*insn)(reg, addr);
2506   }
2507 
2508   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
2509                         FloatRegister reg, int opcode,
2510                         Register base, int index, int size, int disp,
2511                         int size_in_memory)
2512   {
2513     Address::extend scale;
2514 
2515     switch (opcode) {
2516     case INDINDEXSCALEDI2L:
2517     case INDINDEXSCALEDI2LN:
2518       scale = Address::sxtw(size);
2519       break;
2520     default:
2521       scale = Address::lsl(size);
2522     }
2523 
2524     if (index == -1) {
2525       /* If we get an out-of-range offset it is a bug in the compiler,
2526          so we assert here. */
2527       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
2528       /* Fix up any out-of-range offsets. */
2529       assert_different_registers(rscratch1, base);
2530       Address addr = Address(base, disp);
2531       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2532       (masm.*insn)(reg, addr);
2533     } else {
2534       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2535       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2536     }
2537   }
2538 
2539   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
2540                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2541                         int opcode, Register base, int index, int size, int disp)
2542   {
2543     if (index == -1) {
2544       (masm.*insn)(reg, T, Address(base, disp));
2545     } else {
2546       assert(disp == 0, "unsupported address mode");
2547       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2548     }
2549   }
2550 
2551 %}
2552 
2553 
2554 
2555 //----------ENCODING BLOCK-----------------------------------------------------
2556 // This block specifies the encoding classes used by the compiler to
2557 // output byte streams.  Encoding classes are parameterized macros
2558 // used by Machine Instruction Nodes in order to generate the bit
2559 // encoding of the instruction.  Operands specify their base encoding
2560 // interface with the interface keyword.  There are currently
2561 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2562 // COND_INTER.  REG_INTER causes an operand to generate a function
2563 // which returns its register number when queried.  CONST_INTER causes
2564 // an operand to generate a function which returns the value of the
2565 // constant when queried.  MEMORY_INTER causes an operand to generate
2566 // four functions which return the Base Register, the Index Register,
2567 // the Scale Value, and the Offset Value of the operand when queried.
2568 // COND_INTER causes an operand to generate six functions which return
2569 // the encoding code (ie - encoding bits for the instruction)
2570 // associated with each basic boolean condition for a conditional
2571 // instruction.
2572 //
2573 // Instructions specify two basic values for encoding.  Again, a
2574 // function is available to check if the constant displacement is an
2575 // oop. They use the ins_encode keyword to specify their encoding
2576 // classes (which must be a sequence of enc_class names, and their
2577 // parameters, specified in the encoding block), and they use the
2578 // opcode keyword to specify, in order, their primary, secondary, and
2579 // tertiary opcode.  Only the opcode sections which a particular
2580 // instruction needs for encoding need to be specified.
2581 encode %{
2582   // Build emit functions for each basic byte or larger field in the
2583   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2584   // from C++ code in the enc_class source block.  Emit functions will
2585   // live in the main source block for now.  In future, we can
2586   // generalize this by adding a syntax that specifies the sizes of
2587   // fields in an order, so that the adlc can build the emit functions
2588   // automagically
2589 
2590   // catch all for unimplemented encodings
2591   enc_class enc_unimplemented %{
2592     C2_MacroAssembler _masm(&cbuf);
2593     __ unimplemented("C2 catch all");
2594   %}
2595 
2596   // BEGIN Non-volatile memory access
2597 
2598   // This encoding class is generated automatically from ad_encode.m4.
2599   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2600   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
2601     Register dst_reg = as_Register($dst$$reg);
2602     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2604   %}
2605 
2606   // This encoding class is generated automatically from ad_encode.m4.
2607   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2608   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
2609     Register dst_reg = as_Register($dst$$reg);
2610     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2611                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2612   %}
2613 
2614   // This encoding class is generated automatically from ad_encode.m4.
2615   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2616   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
2617     Register dst_reg = as_Register($dst$$reg);
2618     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2619                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2620   %}
2621 
2622   // This encoding class is generated automatically from ad_encode.m4.
2623   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2624   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
2625     Register dst_reg = as_Register($dst$$reg);
2626     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2627                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2628   %}
2629 
2630   // This encoding class is generated automatically from ad_encode.m4.
2631   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2632   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
2633     Register dst_reg = as_Register($dst$$reg);
2634     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2635                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2636   %}
2637 
2638   // This encoding class is generated automatically from ad_encode.m4.
2639   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2640   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
2641     Register dst_reg = as_Register($dst$$reg);
2642     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2643                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2644   %}
2645 
2646   // This encoding class is generated automatically from ad_encode.m4.
2647   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2648   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
2649     Register dst_reg = as_Register($dst$$reg);
2650     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2651                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2652   %}
2653 
2654   // This encoding class is generated automatically from ad_encode.m4.
2655   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2656   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
2657     Register dst_reg = as_Register($dst$$reg);
2658     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2659                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2660   %}
2661 
2662   // This encoding class is generated automatically from ad_encode.m4.
2663   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2664   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
2665     Register dst_reg = as_Register($dst$$reg);
2666     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2667                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2668   %}
2669 
2670   // This encoding class is generated automatically from ad_encode.m4.
2671   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2672   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
2673     Register dst_reg = as_Register($dst$$reg);
2674     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2675                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2676   %}
2677 
2678   // This encoding class is generated automatically from ad_encode.m4.
2679   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2680   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
2681     Register dst_reg = as_Register($dst$$reg);
2682     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2683                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2684   %}
2685 
2686   // This encoding class is generated automatically from ad_encode.m4.
2687   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2688   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
2689     Register dst_reg = as_Register($dst$$reg);
2690     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2692   %}
2693 
2694   // This encoding class is generated automatically from ad_encode.m4.
2695   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2696   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
2697     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2698     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2699                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2700   %}
2701 
2702   // This encoding class is generated automatically from ad_encode.m4.
2703   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2704   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
2705     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2706     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2707                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2708   %}
2709 
2710   // This encoding class is generated automatically from ad_encode.m4.
2711   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2712   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
2713     Register src_reg = as_Register($src$$reg);
2714     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2715                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2716   %}
2717 
2718   // This encoding class is generated automatically from ad_encode.m4.
2719   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2720   enc_class aarch64_enc_strb0(memory1 mem) %{
2721     C2_MacroAssembler _masm(&cbuf);
2722     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2723                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2724   %}
2725 
2726   // This encoding class is generated automatically from ad_encode.m4.
2727   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2728   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
2729     Register src_reg = as_Register($src$$reg);
2730     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2731                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2732   %}
2733 
2734   // This encoding class is generated automatically from ad_encode.m4.
2735   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2736   enc_class aarch64_enc_strh0(memory2 mem) %{
2737     C2_MacroAssembler _masm(&cbuf);
2738     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2739                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2740   %}
2741 
2742   // This encoding class is generated automatically from ad_encode.m4.
2743   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2744   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
2745     Register src_reg = as_Register($src$$reg);
2746     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2747                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2748   %}
2749 
2750   // This encoding class is generated automatically from ad_encode.m4.
2751   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2752   enc_class aarch64_enc_strw0(memory4 mem) %{
2753     C2_MacroAssembler _masm(&cbuf);
2754     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2755                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2756   %}
2757 
2758   // This encoding class is generated automatically from ad_encode.m4.
2759   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2760   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
2761     Register src_reg = as_Register($src$$reg);
2762     // we sometimes get asked to store the stack pointer into the
2763     // current thread -- we cannot do that directly on AArch64
2764     if (src_reg == r31_sp) {
2765       C2_MacroAssembler _masm(&cbuf);
2766       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2767       __ mov(rscratch2, sp);
2768       src_reg = rscratch2;
2769     }
2770     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2771                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2772   %}
2773 
2774   // This encoding class is generated automatically from ad_encode.m4.
2775   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2776   enc_class aarch64_enc_str0(memory8 mem) %{
2777     C2_MacroAssembler _masm(&cbuf);
2778     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2779                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2780   %}
2781 
2782   // This encoding class is generated automatically from ad_encode.m4.
2783   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2784   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
2785     FloatRegister src_reg = as_FloatRegister($src$$reg);
2786     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2787                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2788   %}
2789 
2790   // This encoding class is generated automatically from ad_encode.m4.
2791   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2792   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
2793     FloatRegister src_reg = as_FloatRegister($src$$reg);
2794     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2795                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2796   %}
2797 
2798   // This encoding class is generated automatically from ad_encode.m4.
2799   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2800   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
2801     C2_MacroAssembler _masm(&cbuf);
2802     address con = (address)$src$$constant;
2803     // need to do this the hard way until we can manage relocs
2804     // for 32 bit constants
2805     __ movoop(rscratch2, (jobject)con);
2806     if (con) __ encode_heap_oop_not_null(rscratch2);
2807     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2808                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2809   %}
2810 
2811   // This encoding class is generated automatically from ad_encode.m4.
2812   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2813   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
2814     C2_MacroAssembler _masm(&cbuf);
2815     address con = (address)$src$$constant;
2816     // need to do this the hard way until we can manage relocs
2817     // for 32 bit constants
2818     __ movoop(rscratch2, (jobject)con);
2819     __ encode_klass_not_null(rscratch2);
2820     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2821                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2822   %}
2823 
2824   // This encoding class is generated automatically from ad_encode.m4.
2825   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2826   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
2827       C2_MacroAssembler _masm(&cbuf);
2828       __ membar(Assembler::StoreStore);
2829       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2830                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2831   %}
2832 
2833   // END Non-volatile memory access
2834 
2835   // Vector loads and stores
2836   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2837     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2838     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2839        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2840   %}
2841 
2842   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2843     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2844     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2845        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2846   %}
2847 
2848   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2849     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2850     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2851        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2852   %}
2853 
2854   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2855     FloatRegister src_reg = as_FloatRegister($src$$reg);
2856     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2857        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2858   %}
2859 
2860   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2861     FloatRegister src_reg = as_FloatRegister($src$$reg);
2862     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2863        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2864   %}
2865 
2866   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2867     FloatRegister src_reg = as_FloatRegister($src$$reg);
2868     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2869        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2870   %}
2871 
2872   // volatile loads and stores
2873 
2874   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2875     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2876                  rscratch1, stlrb);
2877   %}
2878 
2879   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2880     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2881                  rscratch1, stlrh);
2882   %}
2883 
2884   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2885     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2886                  rscratch1, stlrw);
2887   %}
2888 
2889 
2890   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2891     Register dst_reg = as_Register($dst$$reg);
2892     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2893              rscratch1, ldarb);
2894     __ sxtbw(dst_reg, dst_reg);
2895   %}
2896 
2897   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2898     Register dst_reg = as_Register($dst$$reg);
2899     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2900              rscratch1, ldarb);
2901     __ sxtb(dst_reg, dst_reg);
2902   %}
2903 
2904   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2905     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2906              rscratch1, ldarb);
2907   %}
2908 
2909   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2910     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2911              rscratch1, ldarb);
2912   %}
2913 
2914   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2915     Register dst_reg = as_Register($dst$$reg);
2916     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2917              rscratch1, ldarh);
2918     __ sxthw(dst_reg, dst_reg);
2919   %}
2920 
2921   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2922     Register dst_reg = as_Register($dst$$reg);
2923     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2924              rscratch1, ldarh);
2925     __ sxth(dst_reg, dst_reg);
2926   %}
2927 
2928   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2929     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2930              rscratch1, ldarh);
2931   %}
2932 
2933   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2934     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2935              rscratch1, ldarh);
2936   %}
2937 
2938   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2939     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2940              rscratch1, ldarw);
2941   %}
2942 
2943   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2944     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2945              rscratch1, ldarw);
2946   %}
2947 
2948   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2949     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2950              rscratch1, ldar);
2951   %}
2952 
2953   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2954     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2955              rscratch1, ldarw);
2956     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2957   %}
2958 
2959   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2960     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2961              rscratch1, ldar);
2962     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2963   %}
2964 
2965   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2966     Register src_reg = as_Register($src$$reg);
2967     // we sometimes get asked to store the stack pointer into the
2968     // current thread -- we cannot do that directly on AArch64
2969     if (src_reg == r31_sp) {
2970       C2_MacroAssembler _masm(&cbuf);
2971       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2972       __ mov(rscratch2, sp);
2973       src_reg = rscratch2;
2974     }
2975     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2976                  rscratch1, stlr);
2977   %}
2978 
2979   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2980     {
2981       C2_MacroAssembler _masm(&cbuf);
2982       FloatRegister src_reg = as_FloatRegister($src$$reg);
2983       __ fmovs(rscratch2, src_reg);
2984     }
2985     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2986                  rscratch1, stlrw);
2987   %}
2988 
2989   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2990     {
2991       C2_MacroAssembler _masm(&cbuf);
2992       FloatRegister src_reg = as_FloatRegister($src$$reg);
2993       __ fmovd(rscratch2, src_reg);
2994     }
2995     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2996                  rscratch1, stlr);
2997   %}
2998 
2999   // synchronized read/update encodings
3000 
3001   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
3002     C2_MacroAssembler _masm(&cbuf);
3003     Register dst_reg = as_Register($dst$$reg);
3004     Register base = as_Register($mem$$base);
3005     int index = $mem$$index;
3006     int scale = $mem$$scale;
3007     int disp = $mem$$disp;
3008     if (index == -1) {
3009        if (disp != 0) {
3010         __ lea(rscratch1, Address(base, disp));
3011         __ ldaxr(dst_reg, rscratch1);
3012       } else {
3013         // TODO
3014         // should we ever get anything other than this case?
3015         __ ldaxr(dst_reg, base);
3016       }
3017     } else {
3018       Register index_reg = as_Register(index);
3019       if (disp == 0) {
3020         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3021         __ ldaxr(dst_reg, rscratch1);
3022       } else {
3023         __ lea(rscratch1, Address(base, disp));
3024         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3025         __ ldaxr(dst_reg, rscratch1);
3026       }
3027     }
3028   %}
3029 
3030   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
3031     C2_MacroAssembler _masm(&cbuf);
3032     Register src_reg = as_Register($src$$reg);
3033     Register base = as_Register($mem$$base);
3034     int index = $mem$$index;
3035     int scale = $mem$$scale;
3036     int disp = $mem$$disp;
3037     if (index == -1) {
3038        if (disp != 0) {
3039         __ lea(rscratch2, Address(base, disp));
3040         __ stlxr(rscratch1, src_reg, rscratch2);
3041       } else {
3042         // TODO
3043         // should we ever get anything other than this case?
3044         __ stlxr(rscratch1, src_reg, base);
3045       }
3046     } else {
3047       Register index_reg = as_Register(index);
3048       if (disp == 0) {
3049         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3050         __ stlxr(rscratch1, src_reg, rscratch2);
3051       } else {
3052         __ lea(rscratch2, Address(base, disp));
3053         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3054         __ stlxr(rscratch1, src_reg, rscratch2);
3055       }
3056     }
3057     __ cmpw(rscratch1, zr);
3058   %}
3059 
3060   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3061     C2_MacroAssembler _masm(&cbuf);
3062     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3063     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3064                Assembler::xword, /*acquire*/ false, /*release*/ true,
3065                /*weak*/ false, noreg);
3066   %}
3067 
3068   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3069     C2_MacroAssembler _masm(&cbuf);
3070     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3071     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3072                Assembler::word, /*acquire*/ false, /*release*/ true,
3073                /*weak*/ false, noreg);
3074   %}
3075 
3076   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3077     C2_MacroAssembler _masm(&cbuf);
3078     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3079     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3080                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3081                /*weak*/ false, noreg);
3082   %}
3083 
3084   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3085     C2_MacroAssembler _masm(&cbuf);
3086     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3087     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3088                Assembler::byte, /*acquire*/ false, /*release*/ true,
3089                /*weak*/ false, noreg);
3090   %}
3091 
3092 
3093   // The only difference between aarch64_enc_cmpxchg and
3094   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3095   // CompareAndSwap sequence to serve as a barrier on acquiring a
3096   // lock.
3097   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3098     C2_MacroAssembler _masm(&cbuf);
3099     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3100     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3101                Assembler::xword, /*acquire*/ true, /*release*/ true,
3102                /*weak*/ false, noreg);
3103   %}
3104 
3105   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3106     C2_MacroAssembler _masm(&cbuf);
3107     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3108     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3109                Assembler::word, /*acquire*/ true, /*release*/ true,
3110                /*weak*/ false, noreg);
3111   %}
3112 
3113   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3114     C2_MacroAssembler _masm(&cbuf);
3115     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3116     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3117                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3118                /*weak*/ false, noreg);
3119   %}
3120 
3121   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3122     C2_MacroAssembler _masm(&cbuf);
3123     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3124     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3125                Assembler::byte, /*acquire*/ true, /*release*/ true,
3126                /*weak*/ false, noreg);
3127   %}
3128 
3129   // auxiliary used for CompareAndSwapX to set result register
3130   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3131     C2_MacroAssembler _masm(&cbuf);
3132     Register res_reg = as_Register($res$$reg);
3133     __ cset(res_reg, Assembler::EQ);
3134   %}
3135 
3136   // prefetch encodings
3137 
3138   enc_class aarch64_enc_prefetchw(memory mem) %{
3139     C2_MacroAssembler _masm(&cbuf);
3140     Register base = as_Register($mem$$base);
3141     int index = $mem$$index;
3142     int scale = $mem$$scale;
3143     int disp = $mem$$disp;
3144     if (index == -1) {
3145       __ prfm(Address(base, disp), PSTL1KEEP);
3146     } else {
3147       Register index_reg = as_Register(index);
3148       if (disp == 0) {
3149         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3150       } else {
3151         __ lea(rscratch1, Address(base, disp));
3152         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3153       }
3154     }
3155   %}
3156 
3157   /// mov envcodings
3158 
3159   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3160     C2_MacroAssembler _masm(&cbuf);
3161     u_int32_t con = (u_int32_t)$src$$constant;
3162     Register dst_reg = as_Register($dst$$reg);
3163     if (con == 0) {
3164       __ movw(dst_reg, zr);
3165     } else {
3166       __ movw(dst_reg, con);
3167     }
3168   %}
3169 
3170   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3171     C2_MacroAssembler _masm(&cbuf);
3172     Register dst_reg = as_Register($dst$$reg);
3173     u_int64_t con = (u_int64_t)$src$$constant;
3174     if (con == 0) {
3175       __ mov(dst_reg, zr);
3176     } else {
3177       __ mov(dst_reg, con);
3178     }
3179   %}
3180 
3181   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3182     C2_MacroAssembler _masm(&cbuf);
3183     Register dst_reg = as_Register($dst$$reg);
3184     address con = (address)$src$$constant;
3185     if (con == NULL || con == (address)1) {
3186       ShouldNotReachHere();
3187     } else {
3188       relocInfo::relocType rtype = $src->constant_reloc();
3189       if (rtype == relocInfo::oop_type) {
3190         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3191       } else if (rtype == relocInfo::metadata_type) {
3192         __ mov_metadata(dst_reg, (Metadata*)con);
3193       } else {
3194         assert(rtype == relocInfo::none, "unexpected reloc type");
3195         if (con < (address)(uintptr_t)os::vm_page_size()) {
3196           __ mov(dst_reg, con);
3197         } else {
3198           unsigned long offset;
3199           __ adrp(dst_reg, con, offset);
3200           __ add(dst_reg, dst_reg, offset);
3201         }
3202       }
3203     }
3204   %}
3205 
3206   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3207     C2_MacroAssembler _masm(&cbuf);
3208     Register dst_reg = as_Register($dst$$reg);
3209     __ mov(dst_reg, zr);
3210   %}
3211 
3212   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3213     C2_MacroAssembler _masm(&cbuf);
3214     Register dst_reg = as_Register($dst$$reg);
3215     __ mov(dst_reg, (u_int64_t)1);
3216   %}
3217 
3218   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3219     C2_MacroAssembler _masm(&cbuf);
3220     __ load_byte_map_base($dst$$Register);
3221   %}
3222 
3223   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3224     C2_MacroAssembler _masm(&cbuf);
3225     Register dst_reg = as_Register($dst$$reg);
3226     address con = (address)$src$$constant;
3227     if (con == NULL) {
3228       ShouldNotReachHere();
3229     } else {
3230       relocInfo::relocType rtype = $src->constant_reloc();
3231       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3232       __ set_narrow_oop(dst_reg, (jobject)con);
3233     }
3234   %}
3235 
3236   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3237     C2_MacroAssembler _masm(&cbuf);
3238     Register dst_reg = as_Register($dst$$reg);
3239     __ mov(dst_reg, zr);
3240   %}
3241 
3242   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3243     C2_MacroAssembler _masm(&cbuf);
3244     Register dst_reg = as_Register($dst$$reg);
3245     address con = (address)$src$$constant;
3246     if (con == NULL) {
3247       ShouldNotReachHere();
3248     } else {
3249       relocInfo::relocType rtype = $src->constant_reloc();
3250       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3251       __ set_narrow_klass(dst_reg, (Klass *)con);
3252     }
3253   %}
3254 
3255   // arithmetic encodings
3256 
3257   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3258     C2_MacroAssembler _masm(&cbuf);
3259     Register dst_reg = as_Register($dst$$reg);
3260     Register src_reg = as_Register($src1$$reg);
3261     int32_t con = (int32_t)$src2$$constant;
3262     // add has primary == 0, subtract has primary == 1
3263     if ($primary) { con = -con; }
3264     if (con < 0) {
3265       __ subw(dst_reg, src_reg, -con);
3266     } else {
3267       __ addw(dst_reg, src_reg, con);
3268     }
3269   %}
3270 
3271   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3272     C2_MacroAssembler _masm(&cbuf);
3273     Register dst_reg = as_Register($dst$$reg);
3274     Register src_reg = as_Register($src1$$reg);
3275     int32_t con = (int32_t)$src2$$constant;
3276     // add has primary == 0, subtract has primary == 1
3277     if ($primary) { con = -con; }
3278     if (con < 0) {
3279       __ sub(dst_reg, src_reg, -con);
3280     } else {
3281       __ add(dst_reg, src_reg, con);
3282     }
3283   %}
3284 
3285   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3286     C2_MacroAssembler _masm(&cbuf);
3287    Register dst_reg = as_Register($dst$$reg);
3288    Register src1_reg = as_Register($src1$$reg);
3289    Register src2_reg = as_Register($src2$$reg);
3290     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3291   %}
3292 
3293   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3294     C2_MacroAssembler _masm(&cbuf);
3295    Register dst_reg = as_Register($dst$$reg);
3296    Register src1_reg = as_Register($src1$$reg);
3297    Register src2_reg = as_Register($src2$$reg);
3298     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3299   %}
3300 
3301   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3302     C2_MacroAssembler _masm(&cbuf);
3303    Register dst_reg = as_Register($dst$$reg);
3304    Register src1_reg = as_Register($src1$$reg);
3305    Register src2_reg = as_Register($src2$$reg);
3306     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3307   %}
3308 
3309   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3310     C2_MacroAssembler _masm(&cbuf);
3311    Register dst_reg = as_Register($dst$$reg);
3312    Register src1_reg = as_Register($src1$$reg);
3313    Register src2_reg = as_Register($src2$$reg);
3314     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3315   %}
3316 
3317   // compare instruction encodings
3318 
3319   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3320     C2_MacroAssembler _masm(&cbuf);
3321     Register reg1 = as_Register($src1$$reg);
3322     Register reg2 = as_Register($src2$$reg);
3323     __ cmpw(reg1, reg2);
3324   %}
3325 
3326   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3327     C2_MacroAssembler _masm(&cbuf);
3328     Register reg = as_Register($src1$$reg);
3329     int32_t val = $src2$$constant;
3330     if (val >= 0) {
3331       __ subsw(zr, reg, val);
3332     } else {
3333       __ addsw(zr, reg, -val);
3334     }
3335   %}
3336 
3337   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3338     C2_MacroAssembler _masm(&cbuf);
3339     Register reg1 = as_Register($src1$$reg);
3340     u_int32_t val = (u_int32_t)$src2$$constant;
3341     __ movw(rscratch1, val);
3342     __ cmpw(reg1, rscratch1);
3343   %}
3344 
3345   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3346     C2_MacroAssembler _masm(&cbuf);
3347     Register reg1 = as_Register($src1$$reg);
3348     Register reg2 = as_Register($src2$$reg);
3349     __ cmp(reg1, reg2);
3350   %}
3351 
3352   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3353     C2_MacroAssembler _masm(&cbuf);
3354     Register reg = as_Register($src1$$reg);
3355     int64_t val = $src2$$constant;
3356     if (val >= 0) {
3357       __ subs(zr, reg, val);
3358     } else if (val != -val) {
3359       __ adds(zr, reg, -val);
3360     } else {
3361     // aargh, Long.MIN_VALUE is a special case
3362       __ orr(rscratch1, zr, (u_int64_t)val);
3363       __ subs(zr, reg, rscratch1);
3364     }
3365   %}
3366 
3367   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3368     C2_MacroAssembler _masm(&cbuf);
3369     Register reg1 = as_Register($src1$$reg);
3370     u_int64_t val = (u_int64_t)$src2$$constant;
3371     __ mov(rscratch1, val);
3372     __ cmp(reg1, rscratch1);
3373   %}
3374 
3375   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3376     C2_MacroAssembler _masm(&cbuf);
3377     Register reg1 = as_Register($src1$$reg);
3378     Register reg2 = as_Register($src2$$reg);
3379     __ cmp(reg1, reg2);
3380   %}
3381 
3382   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3383     C2_MacroAssembler _masm(&cbuf);
3384     Register reg1 = as_Register($src1$$reg);
3385     Register reg2 = as_Register($src2$$reg);
3386     __ cmpw(reg1, reg2);
3387   %}
3388 
3389   enc_class aarch64_enc_testp(iRegP src) %{
3390     C2_MacroAssembler _masm(&cbuf);
3391     Register reg = as_Register($src$$reg);
3392     __ cmp(reg, zr);
3393   %}
3394 
3395   enc_class aarch64_enc_testn(iRegN src) %{
3396     C2_MacroAssembler _masm(&cbuf);
3397     Register reg = as_Register($src$$reg);
3398     __ cmpw(reg, zr);
3399   %}
3400 
3401   enc_class aarch64_enc_b(label lbl) %{
3402     C2_MacroAssembler _masm(&cbuf);
3403     Label *L = $lbl$$label;
3404     __ b(*L);
3405   %}
3406 
3407   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3408     C2_MacroAssembler _masm(&cbuf);
3409     Label *L = $lbl$$label;
3410     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3411   %}
3412 
3413   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3414     C2_MacroAssembler _masm(&cbuf);
3415     Label *L = $lbl$$label;
3416     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3417   %}
3418 
3419   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3420   %{
3421      Register sub_reg = as_Register($sub$$reg);
3422      Register super_reg = as_Register($super$$reg);
3423      Register temp_reg = as_Register($temp$$reg);
3424      Register result_reg = as_Register($result$$reg);
3425 
3426      Label miss;
3427      C2_MacroAssembler _masm(&cbuf);
3428      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3429                                      NULL, &miss,
3430                                      /*set_cond_codes:*/ true);
3431      if ($primary) {
3432        __ mov(result_reg, zr);
3433      }
3434      __ bind(miss);
3435   %}
3436 
3437   enc_class aarch64_enc_java_static_call(method meth) %{
3438     C2_MacroAssembler _masm(&cbuf);
3439 
3440     address addr = (address)$meth$$method;
3441     address call;
3442     if (!_method) {
3443       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3444       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3445     } else {
3446       int method_index = resolved_method_index(cbuf);
3447       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3448                                                   : static_call_Relocation::spec(method_index);
3449       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3450 
3451       // Emit stub for static call
3452       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3453       if (stub == NULL) {
3454         ciEnv::current()->record_failure("CodeCache is full");
3455         return;
3456       }
3457     }
3458     if (call == NULL) {
3459       ciEnv::current()->record_failure("CodeCache is full");
3460       return;
3461     }
3462   %}
3463 
3464   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3465     C2_MacroAssembler _masm(&cbuf);
3466     int method_index = resolved_method_index(cbuf);
3467     address call = __ ic_call((address)$meth$$method, method_index);
3468     if (call == NULL) {
3469       ciEnv::current()->record_failure("CodeCache is full");
3470       return;
3471     }
3472   %}
3473 
3474   enc_class aarch64_enc_call_epilog() %{
3475     C2_MacroAssembler _masm(&cbuf);
3476     if (VerifyStackAtCalls) {
3477       // Check that stack depth is unchanged: find majik cookie on stack
3478       __ call_Unimplemented();
3479     }
3480   %}
3481 
3482   enc_class aarch64_enc_java_to_runtime(method meth) %{
3483     C2_MacroAssembler _masm(&cbuf);
3484 
3485     // some calls to generated routines (arraycopy code) are scheduled
3486     // by C2 as runtime calls. if so we can call them using a br (they
3487     // will be in a reachable segment) otherwise we have to use a blr
3488     // which loads the absolute address into a register.
3489     address entry = (address)$meth$$method;
3490     CodeBlob *cb = CodeCache::find_blob(entry);
3491     if (cb) {
3492       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3493       if (call == NULL) {
3494         ciEnv::current()->record_failure("CodeCache is full");
3495         return;
3496       }
3497     } else {
3498       Label retaddr;
3499       __ adr(rscratch2, retaddr);
3500       __ lea(rscratch1, RuntimeAddress(entry));
3501       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3502       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3503       __ blr(rscratch1);
3504       __ bind(retaddr);
3505       __ add(sp, sp, 2 * wordSize);
3506     }
3507   %}
3508 
3509   enc_class aarch64_enc_rethrow() %{
3510     C2_MacroAssembler _masm(&cbuf);
3511     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3512   %}
3513 
3514   enc_class aarch64_enc_ret() %{
3515     C2_MacroAssembler _masm(&cbuf);
3516     __ ret(lr);
3517   %}
3518 
3519   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3520     C2_MacroAssembler _masm(&cbuf);
3521     Register target_reg = as_Register($jump_target$$reg);
3522     __ br(target_reg);
3523   %}
3524 
3525   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3526     C2_MacroAssembler _masm(&cbuf);
3527     Register target_reg = as_Register($jump_target$$reg);
3528     // exception oop should be in r0
3529     // ret addr has been popped into lr
3530     // callee expects it in r3
3531     __ mov(r3, lr);
3532     __ br(target_reg);
3533   %}
3534 
3535   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3536     C2_MacroAssembler _masm(&cbuf);
3537     Register oop = as_Register($object$$reg);
3538     Register box = as_Register($box$$reg);
3539     Register disp_hdr = as_Register($tmp$$reg);
3540     Register tmp = as_Register($tmp2$$reg);
3541     Label cont;
3542     Label object_has_monitor;
3543     Label cas_failed;
3544 
3545     assert_different_registers(oop, box, tmp, disp_hdr);
3546 
3547     // Load markWord from object into displaced_header.
3548     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3549 
3550     if (UseBiasedLocking && !UseOptoBiasInlining) {
3551       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3552     }
3553 
3554     // Check for existing monitor
3555     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3556 
3557     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3558     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3559 
3560     // Initialize the box. (Must happen before we update the object mark!)
3561     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3562 
3563     // Compare object markWord with an unlocked value (tmp) and if
3564     // equal exchange the stack address of our box with object markWord.
3565     // On failure disp_hdr contains the possibly locked markWord.
3566     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3567                /*release*/ true, /*weak*/ false, disp_hdr);
3568     __ br(Assembler::EQ, cont);
3569 
3570     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3571 
3572     // If the compare-and-exchange succeeded, then we found an unlocked
3573     // object, will have now locked it will continue at label cont
3574 
3575     __ bind(cas_failed);
3576     // We did not see an unlocked object so try the fast recursive case.
3577 
3578     // Check if the owner is self by comparing the value in the
3579     // markWord of object (disp_hdr) with the stack pointer.
3580     __ mov(rscratch1, sp);
3581     __ sub(disp_hdr, disp_hdr, rscratch1);
3582     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3583     // If condition is true we are cont and hence we can store 0 as the
3584     // displaced header in the box, which indicates that it is a recursive lock.
3585     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3586     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3587 
3588     __ b(cont);
3589 
3590     // Handle existing monitor.
3591     __ bind(object_has_monitor);
3592 
3593     // The object's monitor m is unlocked iff m->owner == NULL,
3594     // otherwise m->owner may contain a thread or a stack address.
3595     //
3596     // Try to CAS m->owner from NULL to current thread.
3597     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3598     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3599                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3600 
3601     // Store a non-null value into the box to avoid looking like a re-entrant
3602     // lock. The fast-path monitor unlock code checks for
3603     // markWord::monitor_value so use markWord::unused_mark which has the
3604     // relevant bit set, and also matches ObjectSynchronizer::enter.
3605     __ mov(tmp, (address)markWord::unused_mark().value());
3606     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3607 
3608     __ bind(cont);
3609     // flag == EQ indicates success
3610     // flag == NE indicates failure
3611   %}
3612 
3613   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3614     C2_MacroAssembler _masm(&cbuf);
3615     Register oop = as_Register($object$$reg);
3616     Register box = as_Register($box$$reg);
3617     Register disp_hdr = as_Register($tmp$$reg);
3618     Register tmp = as_Register($tmp2$$reg);
3619     Label cont;
3620     Label object_has_monitor;
3621 
3622     assert_different_registers(oop, box, tmp, disp_hdr);
3623 
3624     if (UseBiasedLocking && !UseOptoBiasInlining) {
3625       __ biased_locking_exit(oop, tmp, cont);
3626     }
3627 
3628     // Find the lock address and load the displaced header from the stack.
3629     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3630 
3631     // If the displaced header is 0, we have a recursive unlock.
3632     __ cmp(disp_hdr, zr);
3633     __ br(Assembler::EQ, cont);
3634 
3635     // Handle existing monitor.
3636     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3637     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3638 
3639     // Check if it is still a light weight lock, this is is true if we
3640     // see the stack address of the basicLock in the markWord of the
3641     // object.
3642 
3643     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3644                /*release*/ true, /*weak*/ false, tmp);
3645     __ b(cont);
3646 
3647     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3648 
3649     // Handle existing monitor.
3650     __ bind(object_has_monitor);
3651     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3652     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3653     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3654     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3655     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3656     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3657     __ cmp(rscratch1, zr); // Sets flags for result
3658     __ br(Assembler::NE, cont);
3659 
3660     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3661     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3662     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3663     __ cmp(rscratch1, zr); // Sets flags for result
3664     __ cbnz(rscratch1, cont);
3665     // need a release store here
3666     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3667     __ stlr(zr, tmp); // set unowned
3668 
3669     __ bind(cont);
3670     // flag == EQ indicates success
3671     // flag == NE indicates failure
3672   %}
3673 
3674 %}
3675 
3676 //----------FRAME--------------------------------------------------------------
3677 // Definition of frame structure and management information.
3678 //
3679 //  S T A C K   L A Y O U T    Allocators stack-slot number
3680 //                             |   (to get allocators register number
3681 //  G  Owned by    |        |  v    add OptoReg::stack0())
3682 //  r   CALLER     |        |
3683 //  o     |        +--------+      pad to even-align allocators stack-slot
3684 //  w     V        |  pad0  |        numbers; owned by CALLER
3685 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3686 //  h     ^        |   in   |  5
3687 //        |        |  args  |  4   Holes in incoming args owned by SELF
3688 //  |     |        |        |  3
3689 //  |     |        +--------+
3690 //  V     |        | old out|      Empty on Intel, window on Sparc
3691 //        |    old |preserve|      Must be even aligned.
3692 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3693 //        |        |   in   |  3   area for Intel ret address
3694 //     Owned by    |preserve|      Empty on Sparc.
3695 //       SELF      +--------+
3696 //        |        |  pad2  |  2   pad to align old SP
3697 //        |        +--------+  1
3698 //        |        | locks  |  0
3699 //        |        +--------+----> OptoReg::stack0(), even aligned
3700 //        |        |  pad1  | 11   pad to align new SP
3701 //        |        +--------+
3702 //        |        |        | 10
3703 //        |        | spills |  9   spills
3704 //        V        |        |  8   (pad0 slot for callee)
3705 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3706 //        ^        |  out   |  7
3707 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3708 //     Owned by    +--------+
3709 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3710 //        |    new |preserve|      Must be even-aligned.
3711 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3712 //        |        |        |
3713 //
3714 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3715 //         known from SELF's arguments and the Java calling convention.
3716 //         Region 6-7 is determined per call site.
3717 // Note 2: If the calling convention leaves holes in the incoming argument
3718 //         area, those holes are owned by SELF.  Holes in the outgoing area
3719 //         are owned by the CALLEE.  Holes should not be nessecary in the
3720 //         incoming area, as the Java calling convention is completely under
3721 //         the control of the AD file.  Doubles can be sorted and packed to
3722 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3723 //         varargs C calling conventions.
3724 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3725 //         even aligned with pad0 as needed.
3726 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3727 //           (the latter is true on Intel but is it false on AArch64?)
3728 //         region 6-11 is even aligned; it may be padded out more so that
3729 //         the region from SP to FP meets the minimum stack alignment.
3730 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3731 //         alignment.  Region 11, pad1, may be dynamically extended so that
3732 //         SP meets the minimum alignment.
3733 
3734 frame %{
3735   // What direction does stack grow in (assumed to be same for C & Java)
3736   stack_direction(TOWARDS_LOW);
3737 
3738   // These three registers define part of the calling convention
3739   // between compiled code and the interpreter.
3740 
3741   // Inline Cache Register or methodOop for I2C.
3742   inline_cache_reg(R12);
3743 
3744   // Method Oop Register when calling interpreter.
3745   interpreter_method_oop_reg(R12);
3746 
3747   // Number of stack slots consumed by locking an object
3748   sync_stack_slots(2);
3749 
3750   // Compiled code's Frame Pointer
3751   frame_pointer(R31);
3752 
3753   // Interpreter stores its frame pointer in a register which is
3754   // stored to the stack by I2CAdaptors.
3755   // I2CAdaptors convert from interpreted java to compiled java.
3756   interpreter_frame_pointer(R29);
3757 
3758   // Stack alignment requirement
3759   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3760 
3761   // Number of stack slots between incoming argument block and the start of
3762   // a new frame.  The PROLOG must add this many slots to the stack.  The
3763   // EPILOG must remove this many slots. aarch64 needs two slots for
3764   // return address and fp.
3765   // TODO think this is correct but check
3766   in_preserve_stack_slots(4);
3767 
3768   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3769   // for calls to C.  Supports the var-args backing area for register parms.
3770   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3771 
3772   // The after-PROLOG location of the return address.  Location of
3773   // return address specifies a type (REG or STACK) and a number
3774   // representing the register number (i.e. - use a register name) or
3775   // stack slot.
3776   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3777   // Otherwise, it is above the locks and verification slot and alignment word
3778   // TODO this may well be correct but need to check why that - 2 is there
3779   // ppc port uses 0 but we definitely need to allow for fixed_slots
3780   // which folds in the space used for monitors
3781   return_addr(STACK - 2 +
3782               align_up((Compile::current()->in_preserve_stack_slots() +
3783                         Compile::current()->fixed_slots()),
3784                        stack_alignment_in_slots()));
3785 
3786   // Body of function which returns an integer array locating
3787   // arguments either in registers or in stack slots.  Passed an array
3788   // of ideal registers called "sig" and a "length" count.  Stack-slot
3789   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3790   // arguments for a CALLEE.  Incoming stack arguments are
3791   // automatically biased by the preserve_stack_slots field above.
3792 
3793   calling_convention
3794   %{
3795     // No difference between ingoing/outgoing just pass false
3796     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3797   %}
3798 
3799   c_calling_convention
3800   %{
3801     // This is obviously always outgoing
3802     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3803   %}
3804 
3805   // Location of compiled Java return values.  Same as C for now.
3806   return_value
3807   %{
3808     // TODO do we allow ideal_reg == Op_RegN???
3809     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3810            "only return normal values");
3811 
3812     static const int lo[Op_RegL + 1] = { // enum name
3813       0,                                 // Op_Node
3814       0,                                 // Op_Set
3815       R0_num,                            // Op_RegN
3816       R0_num,                            // Op_RegI
3817       R0_num,                            // Op_RegP
3818       V0_num,                            // Op_RegF
3819       V0_num,                            // Op_RegD
3820       R0_num                             // Op_RegL
3821     };
3822 
3823     static const int hi[Op_RegL + 1] = { // enum name
3824       0,                                 // Op_Node
3825       0,                                 // Op_Set
3826       OptoReg::Bad,                      // Op_RegN
3827       OptoReg::Bad,                      // Op_RegI
3828       R0_H_num,                          // Op_RegP
3829       OptoReg::Bad,                      // Op_RegF
3830       V0_H_num,                          // Op_RegD
3831       R0_H_num                           // Op_RegL
3832     };
3833 
3834     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3835   %}
3836 %}
3837 
3838 //----------ATTRIBUTES---------------------------------------------------------
3839 //----------Operand Attributes-------------------------------------------------
3840 op_attrib op_cost(1);        // Required cost attribute
3841 
3842 //----------Instruction Attributes---------------------------------------------
3843 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3844 ins_attrib ins_size(32);        // Required size attribute (in bits)
3845 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3846                                 // a non-matching short branch variant
3847                                 // of some long branch?
3848 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3849                                 // be a power of 2) specifies the
3850                                 // alignment that some part of the
3851                                 // instruction (not necessarily the
3852                                 // start) requires.  If > 1, a
3853                                 // compute_padding() function must be
3854                                 // provided for the instruction
3855 
3856 //----------OPERANDS-----------------------------------------------------------
3857 // Operand definitions must precede instruction definitions for correct parsing
3858 // in the ADLC because operands constitute user defined types which are used in
3859 // instruction definitions.
3860 
3861 //----------Simple Operands----------------------------------------------------
3862 
3863 // Integer operands 32 bit
3864 // 32 bit immediate
3865 operand immI()
3866 %{
3867   match(ConI);
3868 
3869   op_cost(0);
3870   format %{ %}
3871   interface(CONST_INTER);
3872 %}
3873 
3874 // 32 bit zero
3875 operand immI0()
3876 %{
3877   predicate(n->get_int() == 0);
3878   match(ConI);
3879 
3880   op_cost(0);
3881   format %{ %}
3882   interface(CONST_INTER);
3883 %}
3884 
3885 // 32 bit unit increment
3886 operand immI_1()
3887 %{
3888   predicate(n->get_int() == 1);
3889   match(ConI);
3890 
3891   op_cost(0);
3892   format %{ %}
3893   interface(CONST_INTER);
3894 %}
3895 
3896 // 32 bit unit decrement
3897 operand immI_M1()
3898 %{
3899   predicate(n->get_int() == -1);
3900   match(ConI);
3901 
3902   op_cost(0);
3903   format %{ %}
3904   interface(CONST_INTER);
3905 %}
3906 
3907 // Shift values for add/sub extension shift
3908 operand immIExt()
3909 %{
3910   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3911   match(ConI);
3912 
3913   op_cost(0);
3914   format %{ %}
3915   interface(CONST_INTER);
3916 %}
3917 
3918 operand immI_le_4()
3919 %{
3920   predicate(n->get_int() <= 4);
3921   match(ConI);
3922 
3923   op_cost(0);
3924   format %{ %}
3925   interface(CONST_INTER);
3926 %}
3927 
3928 operand immI_31()
3929 %{
3930   predicate(n->get_int() == 31);
3931   match(ConI);
3932 
3933   op_cost(0);
3934   format %{ %}
3935   interface(CONST_INTER);
3936 %}
3937 
3938 operand immI_8()
3939 %{
3940   predicate(n->get_int() == 8);
3941   match(ConI);
3942 
3943   op_cost(0);
3944   format %{ %}
3945   interface(CONST_INTER);
3946 %}
3947 
3948 operand immI_16()
3949 %{
3950   predicate(n->get_int() == 16);
3951   match(ConI);
3952 
3953   op_cost(0);
3954   format %{ %}
3955   interface(CONST_INTER);
3956 %}
3957 
3958 operand immI_24()
3959 %{
3960   predicate(n->get_int() == 24);
3961   match(ConI);
3962 
3963   op_cost(0);
3964   format %{ %}
3965   interface(CONST_INTER);
3966 %}
3967 
3968 operand immI_32()
3969 %{
3970   predicate(n->get_int() == 32);
3971   match(ConI);
3972 
3973   op_cost(0);
3974   format %{ %}
3975   interface(CONST_INTER);
3976 %}
3977 
3978 operand immI_48()
3979 %{
3980   predicate(n->get_int() == 48);
3981   match(ConI);
3982 
3983   op_cost(0);
3984   format %{ %}
3985   interface(CONST_INTER);
3986 %}
3987 
3988 operand immI_56()
3989 %{
3990   predicate(n->get_int() == 56);
3991   match(ConI);
3992 
3993   op_cost(0);
3994   format %{ %}
3995   interface(CONST_INTER);
3996 %}
3997 
3998 operand immI_63()
3999 %{
4000   predicate(n->get_int() == 63);
4001   match(ConI);
4002 
4003   op_cost(0);
4004   format %{ %}
4005   interface(CONST_INTER);
4006 %}
4007 
4008 operand immI_64()
4009 %{
4010   predicate(n->get_int() == 64);
4011   match(ConI);
4012 
4013   op_cost(0);
4014   format %{ %}
4015   interface(CONST_INTER);
4016 %}
4017 
4018 operand immI_255()
4019 %{
4020   predicate(n->get_int() == 255);
4021   match(ConI);
4022 
4023   op_cost(0);
4024   format %{ %}
4025   interface(CONST_INTER);
4026 %}
4027 
4028 operand immI_65535()
4029 %{
4030   predicate(n->get_int() == 65535);
4031   match(ConI);
4032 
4033   op_cost(0);
4034   format %{ %}
4035   interface(CONST_INTER);
4036 %}
4037 
4038 operand immL_255()
4039 %{
4040   predicate(n->get_long() == 255L);
4041   match(ConL);
4042 
4043   op_cost(0);
4044   format %{ %}
4045   interface(CONST_INTER);
4046 %}
4047 
4048 operand immL_65535()
4049 %{
4050   predicate(n->get_long() == 65535L);
4051   match(ConL);
4052 
4053   op_cost(0);
4054   format %{ %}
4055   interface(CONST_INTER);
4056 %}
4057 
4058 operand immL_4294967295()
4059 %{
4060   predicate(n->get_long() == 4294967295L);
4061   match(ConL);
4062 
4063   op_cost(0);
4064   format %{ %}
4065   interface(CONST_INTER);
4066 %}
4067 
4068 operand immL_bitmask()
4069 %{
4070   predicate((n->get_long() != 0)
4071             && ((n->get_long() & 0xc000000000000000l) == 0)
4072             && is_power_of_2(n->get_long() + 1));
4073   match(ConL);
4074 
4075   op_cost(0);
4076   format %{ %}
4077   interface(CONST_INTER);
4078 %}
4079 
4080 operand immI_bitmask()
4081 %{
4082   predicate((n->get_int() != 0)
4083             && ((n->get_int() & 0xc0000000) == 0)
4084             && is_power_of_2(n->get_int() + 1));
4085   match(ConI);
4086 
4087   op_cost(0);
4088   format %{ %}
4089   interface(CONST_INTER);
4090 %}
4091 
4092 // Scale values for scaled offset addressing modes (up to long but not quad)
4093 operand immIScale()
4094 %{
4095   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4096   match(ConI);
4097 
4098   op_cost(0);
4099   format %{ %}
4100   interface(CONST_INTER);
4101 %}
4102 
4103 // 26 bit signed offset -- for pc-relative branches
4104 operand immI26()
4105 %{
4106   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4107   match(ConI);
4108 
4109   op_cost(0);
4110   format %{ %}
4111   interface(CONST_INTER);
4112 %}
4113 
4114 // 19 bit signed offset -- for pc-relative loads
4115 operand immI19()
4116 %{
4117   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4118   match(ConI);
4119 
4120   op_cost(0);
4121   format %{ %}
4122   interface(CONST_INTER);
4123 %}
4124 
4125 // 12 bit unsigned offset -- for base plus immediate loads
4126 operand immIU12()
4127 %{
4128   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4129   match(ConI);
4130 
4131   op_cost(0);
4132   format %{ %}
4133   interface(CONST_INTER);
4134 %}
4135 
4136 operand immLU12()
4137 %{
4138   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4139   match(ConL);
4140 
4141   op_cost(0);
4142   format %{ %}
4143   interface(CONST_INTER);
4144 %}
4145 
4146 // Offset for scaled or unscaled immediate loads and stores
4147 operand immIOffset()
4148 %{
4149   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4150   match(ConI);
4151 
4152   op_cost(0);
4153   format %{ %}
4154   interface(CONST_INTER);
4155 %}
4156 
4157 operand immIOffset1()
4158 %{
4159   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4160   match(ConI);
4161 
4162   op_cost(0);
4163   format %{ %}
4164   interface(CONST_INTER);
4165 %}
4166 
4167 operand immIOffset2()
4168 %{
4169   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
4170   match(ConI);
4171 
4172   op_cost(0);
4173   format %{ %}
4174   interface(CONST_INTER);
4175 %}
4176 
4177 operand immIOffset4()
4178 %{
4179   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4180   match(ConI);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 operand immIOffset8()
4188 %{
4189   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4190   match(ConI);
4191 
4192   op_cost(0);
4193   format %{ %}
4194   interface(CONST_INTER);
4195 %}
4196 
4197 operand immIOffset16()
4198 %{
4199   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4200   match(ConI);
4201 
4202   op_cost(0);
4203   format %{ %}
4204   interface(CONST_INTER);
4205 %}
4206 
4207 operand immLoffset()
4208 %{
4209   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4210   match(ConL);
4211 
4212   op_cost(0);
4213   format %{ %}
4214   interface(CONST_INTER);
4215 %}
4216 
4217 operand immLoffset1()
4218 %{
4219   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4220   match(ConL);
4221 
4222   op_cost(0);
4223   format %{ %}
4224   interface(CONST_INTER);
4225 %}
4226 
4227 operand immLoffset2()
4228 %{
4229   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
4230   match(ConL);
4231 
4232   op_cost(0);
4233   format %{ %}
4234   interface(CONST_INTER);
4235 %}
4236 
4237 operand immLoffset4()
4238 %{
4239   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4240   match(ConL);
4241 
4242   op_cost(0);
4243   format %{ %}
4244   interface(CONST_INTER);
4245 %}
4246 
4247 operand immLoffset8()
4248 %{
4249   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4250   match(ConL);
4251 
4252   op_cost(0);
4253   format %{ %}
4254   interface(CONST_INTER);
4255 %}
4256 
4257 operand immLoffset16()
4258 %{
4259   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4260   match(ConL);
4261 
4262   op_cost(0);
4263   format %{ %}
4264   interface(CONST_INTER);
4265 %}
4266 
4267 // 32 bit integer valid for add sub immediate
4268 operand immIAddSub()
4269 %{
4270   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4271   match(ConI);
4272   op_cost(0);
4273   format %{ %}
4274   interface(CONST_INTER);
4275 %}
4276 
4277 // 32 bit unsigned integer valid for logical immediate
4278 // TODO -- check this is right when e.g the mask is 0x80000000
4279 operand immILog()
4280 %{
4281   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4282   match(ConI);
4283 
4284   op_cost(0);
4285   format %{ %}
4286   interface(CONST_INTER);
4287 %}
4288 
4289 // Integer operands 64 bit
4290 // 64 bit immediate
4291 operand immL()
4292 %{
4293   match(ConL);
4294 
4295   op_cost(0);
4296   format %{ %}
4297   interface(CONST_INTER);
4298 %}
4299 
4300 // 64 bit zero
4301 operand immL0()
4302 %{
4303   predicate(n->get_long() == 0);
4304   match(ConL);
4305 
4306   op_cost(0);
4307   format %{ %}
4308   interface(CONST_INTER);
4309 %}
4310 
4311 // 64 bit unit increment
4312 operand immL_1()
4313 %{
4314   predicate(n->get_long() == 1);
4315   match(ConL);
4316 
4317   op_cost(0);
4318   format %{ %}
4319   interface(CONST_INTER);
4320 %}
4321 
4322 // 64 bit unit decrement
4323 operand immL_M1()
4324 %{
4325   predicate(n->get_long() == -1);
4326   match(ConL);
4327 
4328   op_cost(0);
4329   format %{ %}
4330   interface(CONST_INTER);
4331 %}
4332 
4333 // 32 bit offset of pc in thread anchor
4334 
4335 operand immL_pc_off()
4336 %{
4337   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4338                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4339   match(ConL);
4340 
4341   op_cost(0);
4342   format %{ %}
4343   interface(CONST_INTER);
4344 %}
4345 
4346 // 64 bit integer valid for add sub immediate
4347 operand immLAddSub()
4348 %{
4349   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4350   match(ConL);
4351   op_cost(0);
4352   format %{ %}
4353   interface(CONST_INTER);
4354 %}
4355 
4356 // 64 bit integer valid for logical immediate
4357 operand immLLog()
4358 %{
4359   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4360   match(ConL);
4361   op_cost(0);
4362   format %{ %}
4363   interface(CONST_INTER);
4364 %}
4365 
4366 // Long Immediate: low 32-bit mask
4367 operand immL_32bits()
4368 %{
4369   predicate(n->get_long() == 0xFFFFFFFFL);
4370   match(ConL);
4371   op_cost(0);
4372   format %{ %}
4373   interface(CONST_INTER);
4374 %}
4375 
4376 // Pointer operands
4377 // Pointer Immediate
4378 operand immP()
4379 %{
4380   match(ConP);
4381 
4382   op_cost(0);
4383   format %{ %}
4384   interface(CONST_INTER);
4385 %}
4386 
4387 // NULL Pointer Immediate
4388 operand immP0()
4389 %{
4390   predicate(n->get_ptr() == 0);
4391   match(ConP);
4392 
4393   op_cost(0);
4394   format %{ %}
4395   interface(CONST_INTER);
4396 %}
4397 
4398 // Pointer Immediate One
4399 // this is used in object initialization (initial object header)
4400 operand immP_1()
4401 %{
4402   predicate(n->get_ptr() == 1);
4403   match(ConP);
4404 
4405   op_cost(0);
4406   format %{ %}
4407   interface(CONST_INTER);
4408 %}
4409 
4410 // Card Table Byte Map Base
4411 operand immByteMapBase()
4412 %{
4413   // Get base of card map
4414   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4415             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4416   match(ConP);
4417 
4418   op_cost(0);
4419   format %{ %}
4420   interface(CONST_INTER);
4421 %}
4422 
4423 // Pointer Immediate Minus One
4424 // this is used when we want to write the current PC to the thread anchor
4425 operand immP_M1()
4426 %{
4427   predicate(n->get_ptr() == -1);
4428   match(ConP);
4429 
4430   op_cost(0);
4431   format %{ %}
4432   interface(CONST_INTER);
4433 %}
4434 
4435 // Pointer Immediate Minus Two
4436 // this is used when we want to write the current PC to the thread anchor
4437 operand immP_M2()
4438 %{
4439   predicate(n->get_ptr() == -2);
4440   match(ConP);
4441 
4442   op_cost(0);
4443   format %{ %}
4444   interface(CONST_INTER);
4445 %}
4446 
4447 // Float and Double operands
4448 // Double Immediate
4449 operand immD()
4450 %{
4451   match(ConD);
4452   op_cost(0);
4453   format %{ %}
4454   interface(CONST_INTER);
4455 %}
4456 
4457 // Double Immediate: +0.0d
4458 operand immD0()
4459 %{
4460   predicate(jlong_cast(n->getd()) == 0);
4461   match(ConD);
4462 
4463   op_cost(0);
4464   format %{ %}
4465   interface(CONST_INTER);
4466 %}
4467 
4468 // constant 'double +0.0'.
4469 operand immDPacked()
4470 %{
4471   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4472   match(ConD);
4473   op_cost(0);
4474   format %{ %}
4475   interface(CONST_INTER);
4476 %}
4477 
4478 // Float Immediate
4479 operand immF()
4480 %{
4481   match(ConF);
4482   op_cost(0);
4483   format %{ %}
4484   interface(CONST_INTER);
4485 %}
4486 
4487 // Float Immediate: +0.0f.
4488 operand immF0()
4489 %{
4490   predicate(jint_cast(n->getf()) == 0);
4491   match(ConF);
4492 
4493   op_cost(0);
4494   format %{ %}
4495   interface(CONST_INTER);
4496 %}
4497 
4498 //
4499 operand immFPacked()
4500 %{
4501   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4502   match(ConF);
4503   op_cost(0);
4504   format %{ %}
4505   interface(CONST_INTER);
4506 %}
4507 
4508 // Narrow pointer operands
4509 // Narrow Pointer Immediate
4510 operand immN()
4511 %{
4512   match(ConN);
4513 
4514   op_cost(0);
4515   format %{ %}
4516   interface(CONST_INTER);
4517 %}
4518 
4519 // Narrow NULL Pointer Immediate
4520 operand immN0()
4521 %{
4522   predicate(n->get_narrowcon() == 0);
4523   match(ConN);
4524 
4525   op_cost(0);
4526   format %{ %}
4527   interface(CONST_INTER);
4528 %}
4529 
4530 operand immNKlass()
4531 %{
4532   match(ConNKlass);
4533 
4534   op_cost(0);
4535   format %{ %}
4536   interface(CONST_INTER);
4537 %}
4538 
4539 // Integer 32 bit Register Operands
4540 // Integer 32 bitRegister (excludes SP)
4541 operand iRegI()
4542 %{
4543   constraint(ALLOC_IN_RC(any_reg32));
4544   match(RegI);
4545   match(iRegINoSp);
4546   op_cost(0);
4547   format %{ %}
4548   interface(REG_INTER);
4549 %}
4550 
4551 // Integer 32 bit Register not Special
4552 operand iRegINoSp()
4553 %{
4554   constraint(ALLOC_IN_RC(no_special_reg32));
4555   match(RegI);
4556   op_cost(0);
4557   format %{ %}
4558   interface(REG_INTER);
4559 %}
4560 
4561 // Integer 64 bit Register Operands
4562 // Integer 64 bit Register (includes SP)
4563 operand iRegL()
4564 %{
4565   constraint(ALLOC_IN_RC(any_reg));
4566   match(RegL);
4567   match(iRegLNoSp);
4568   op_cost(0);
4569   format %{ %}
4570   interface(REG_INTER);
4571 %}
4572 
4573 // Integer 64 bit Register not Special
4574 operand iRegLNoSp()
4575 %{
4576   constraint(ALLOC_IN_RC(no_special_reg));
4577   match(RegL);
4578   match(iRegL_R0);
4579   format %{ %}
4580   interface(REG_INTER);
4581 %}
4582 
4583 // Pointer Register Operands
4584 // Pointer Register
4585 operand iRegP()
4586 %{
4587   constraint(ALLOC_IN_RC(ptr_reg));
4588   match(RegP);
4589   match(iRegPNoSp);
4590   match(iRegP_R0);
4591   //match(iRegP_R2);
4592   //match(iRegP_R4);
4593   //match(iRegP_R5);
4594   match(thread_RegP);
4595   op_cost(0);
4596   format %{ %}
4597   interface(REG_INTER);
4598 %}
4599 
4600 // Pointer 64 bit Register not Special
4601 operand iRegPNoSp()
4602 %{
4603   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4604   match(RegP);
4605   // match(iRegP);
4606   // match(iRegP_R0);
4607   // match(iRegP_R2);
4608   // match(iRegP_R4);
4609   // match(iRegP_R5);
4610   // match(thread_RegP);
4611   op_cost(0);
4612   format %{ %}
4613   interface(REG_INTER);
4614 %}
4615 
4616 // Pointer 64 bit Register R0 only
4617 operand iRegP_R0()
4618 %{
4619   constraint(ALLOC_IN_RC(r0_reg));
4620   match(RegP);
4621   // match(iRegP);
4622   match(iRegPNoSp);
4623   op_cost(0);
4624   format %{ %}
4625   interface(REG_INTER);
4626 %}
4627 
4628 // Pointer 64 bit Register R1 only
4629 operand iRegP_R1()
4630 %{
4631   constraint(ALLOC_IN_RC(r1_reg));
4632   match(RegP);
4633   // match(iRegP);
4634   match(iRegPNoSp);
4635   op_cost(0);
4636   format %{ %}
4637   interface(REG_INTER);
4638 %}
4639 
4640 // Pointer 64 bit Register R2 only
4641 operand iRegP_R2()
4642 %{
4643   constraint(ALLOC_IN_RC(r2_reg));
4644   match(RegP);
4645   // match(iRegP);
4646   match(iRegPNoSp);
4647   op_cost(0);
4648   format %{ %}
4649   interface(REG_INTER);
4650 %}
4651 
4652 // Pointer 64 bit Register R3 only
4653 operand iRegP_R3()
4654 %{
4655   constraint(ALLOC_IN_RC(r3_reg));
4656   match(RegP);
4657   // match(iRegP);
4658   match(iRegPNoSp);
4659   op_cost(0);
4660   format %{ %}
4661   interface(REG_INTER);
4662 %}
4663 
4664 // Pointer 64 bit Register R4 only
4665 operand iRegP_R4()
4666 %{
4667   constraint(ALLOC_IN_RC(r4_reg));
4668   match(RegP);
4669   // match(iRegP);
4670   match(iRegPNoSp);
4671   op_cost(0);
4672   format %{ %}
4673   interface(REG_INTER);
4674 %}
4675 
4676 // Pointer 64 bit Register R5 only
4677 operand iRegP_R5()
4678 %{
4679   constraint(ALLOC_IN_RC(r5_reg));
4680   match(RegP);
4681   // match(iRegP);
4682   match(iRegPNoSp);
4683   op_cost(0);
4684   format %{ %}
4685   interface(REG_INTER);
4686 %}
4687 
4688 // Pointer 64 bit Register R10 only
4689 operand iRegP_R10()
4690 %{
4691   constraint(ALLOC_IN_RC(r10_reg));
4692   match(RegP);
4693   // match(iRegP);
4694   match(iRegPNoSp);
4695   op_cost(0);
4696   format %{ %}
4697   interface(REG_INTER);
4698 %}
4699 
4700 // Long 64 bit Register R0 only
4701 operand iRegL_R0()
4702 %{
4703   constraint(ALLOC_IN_RC(r0_reg));
4704   match(RegL);
4705   match(iRegLNoSp);
4706   op_cost(0);
4707   format %{ %}
4708   interface(REG_INTER);
4709 %}
4710 
4711 // Long 64 bit Register R2 only
4712 operand iRegL_R2()
4713 %{
4714   constraint(ALLOC_IN_RC(r2_reg));
4715   match(RegL);
4716   match(iRegLNoSp);
4717   op_cost(0);
4718   format %{ %}
4719   interface(REG_INTER);
4720 %}
4721 
4722 // Long 64 bit Register R3 only
4723 operand iRegL_R3()
4724 %{
4725   constraint(ALLOC_IN_RC(r3_reg));
4726   match(RegL);
4727   match(iRegLNoSp);
4728   op_cost(0);
4729   format %{ %}
4730   interface(REG_INTER);
4731 %}
4732 
4733 // Long 64 bit Register R11 only
4734 operand iRegL_R11()
4735 %{
4736   constraint(ALLOC_IN_RC(r11_reg));
4737   match(RegL);
4738   match(iRegLNoSp);
4739   op_cost(0);
4740   format %{ %}
4741   interface(REG_INTER);
4742 %}
4743 
4744 // Pointer 64 bit Register FP only
4745 operand iRegP_FP()
4746 %{
4747   constraint(ALLOC_IN_RC(fp_reg));
4748   match(RegP);
4749   // match(iRegP);
4750   op_cost(0);
4751   format %{ %}
4752   interface(REG_INTER);
4753 %}
4754 
4755 // Register R0 only
4756 operand iRegI_R0()
4757 %{
4758   constraint(ALLOC_IN_RC(int_r0_reg));
4759   match(RegI);
4760   match(iRegINoSp);
4761   op_cost(0);
4762   format %{ %}
4763   interface(REG_INTER);
4764 %}
4765 
4766 // Register R2 only
4767 operand iRegI_R2()
4768 %{
4769   constraint(ALLOC_IN_RC(int_r2_reg));
4770   match(RegI);
4771   match(iRegINoSp);
4772   op_cost(0);
4773   format %{ %}
4774   interface(REG_INTER);
4775 %}
4776 
4777 // Register R3 only
4778 operand iRegI_R3()
4779 %{
4780   constraint(ALLOC_IN_RC(int_r3_reg));
4781   match(RegI);
4782   match(iRegINoSp);
4783   op_cost(0);
4784   format %{ %}
4785   interface(REG_INTER);
4786 %}
4787 
4788 
4789 // Register R4 only
4790 operand iRegI_R4()
4791 %{
4792   constraint(ALLOC_IN_RC(int_r4_reg));
4793   match(RegI);
4794   match(iRegINoSp);
4795   op_cost(0);
4796   format %{ %}
4797   interface(REG_INTER);
4798 %}
4799 
4800 
4801 // Pointer Register Operands
4802 // Narrow Pointer Register
4803 operand iRegN()
4804 %{
4805   constraint(ALLOC_IN_RC(any_reg32));
4806   match(RegN);
4807   match(iRegNNoSp);
4808   op_cost(0);
4809   format %{ %}
4810   interface(REG_INTER);
4811 %}
4812 
4813 operand iRegN_R0()
4814 %{
4815   constraint(ALLOC_IN_RC(r0_reg));
4816   match(iRegN);
4817   op_cost(0);
4818   format %{ %}
4819   interface(REG_INTER);
4820 %}
4821 
4822 operand iRegN_R2()
4823 %{
4824   constraint(ALLOC_IN_RC(r2_reg));
4825   match(iRegN);
4826   op_cost(0);
4827   format %{ %}
4828   interface(REG_INTER);
4829 %}
4830 
4831 operand iRegN_R3()
4832 %{
4833   constraint(ALLOC_IN_RC(r3_reg));
4834   match(iRegN);
4835   op_cost(0);
4836   format %{ %}
4837   interface(REG_INTER);
4838 %}
4839 
4840 // Integer 64 bit Register not Special
4841 operand iRegNNoSp()
4842 %{
4843   constraint(ALLOC_IN_RC(no_special_reg32));
4844   match(RegN);
4845   op_cost(0);
4846   format %{ %}
4847   interface(REG_INTER);
4848 %}
4849 
4850 // heap base register -- used for encoding immN0
4851 
4852 operand iRegIHeapbase()
4853 %{
4854   constraint(ALLOC_IN_RC(heapbase_reg));
4855   match(RegI);
4856   op_cost(0);
4857   format %{ %}
4858   interface(REG_INTER);
4859 %}
4860 
4861 // Float Register
4862 // Float register operands
4863 operand vRegF()
4864 %{
4865   constraint(ALLOC_IN_RC(float_reg));
4866   match(RegF);
4867 
4868   op_cost(0);
4869   format %{ %}
4870   interface(REG_INTER);
4871 %}
4872 
4873 // Double Register
4874 // Double register operands
4875 operand vRegD()
4876 %{
4877   constraint(ALLOC_IN_RC(double_reg));
4878   match(RegD);
4879 
4880   op_cost(0);
4881   format %{ %}
4882   interface(REG_INTER);
4883 %}
4884 
4885 operand vecD()
4886 %{
4887   constraint(ALLOC_IN_RC(vectord_reg));
4888   match(VecD);
4889 
4890   op_cost(0);
4891   format %{ %}
4892   interface(REG_INTER);
4893 %}
4894 
4895 operand vecX()
4896 %{
4897   constraint(ALLOC_IN_RC(vectorx_reg));
4898   match(VecX);
4899 
4900   op_cost(0);
4901   format %{ %}
4902   interface(REG_INTER);
4903 %}
4904 
4905 operand vRegD_V0()
4906 %{
4907   constraint(ALLOC_IN_RC(v0_reg));
4908   match(RegD);
4909   op_cost(0);
4910   format %{ %}
4911   interface(REG_INTER);
4912 %}
4913 
4914 operand vRegD_V1()
4915 %{
4916   constraint(ALLOC_IN_RC(v1_reg));
4917   match(RegD);
4918   op_cost(0);
4919   format %{ %}
4920   interface(REG_INTER);
4921 %}
4922 
4923 operand vRegD_V2()
4924 %{
4925   constraint(ALLOC_IN_RC(v2_reg));
4926   match(RegD);
4927   op_cost(0);
4928   format %{ %}
4929   interface(REG_INTER);
4930 %}
4931 
4932 operand vRegD_V3()
4933 %{
4934   constraint(ALLOC_IN_RC(v3_reg));
4935   match(RegD);
4936   op_cost(0);
4937   format %{ %}
4938   interface(REG_INTER);
4939 %}
4940 
4941 operand vRegD_V4()
4942 %{
4943   constraint(ALLOC_IN_RC(v4_reg));
4944   match(RegD);
4945   op_cost(0);
4946   format %{ %}
4947   interface(REG_INTER);
4948 %}
4949 
4950 operand vRegD_V5()
4951 %{
4952   constraint(ALLOC_IN_RC(v5_reg));
4953   match(RegD);
4954   op_cost(0);
4955   format %{ %}
4956   interface(REG_INTER);
4957 %}
4958 
4959 operand vRegD_V6()
4960 %{
4961   constraint(ALLOC_IN_RC(v6_reg));
4962   match(RegD);
4963   op_cost(0);
4964   format %{ %}
4965   interface(REG_INTER);
4966 %}
4967 
4968 operand vRegD_V7()
4969 %{
4970   constraint(ALLOC_IN_RC(v7_reg));
4971   match(RegD);
4972   op_cost(0);
4973   format %{ %}
4974   interface(REG_INTER);
4975 %}
4976 
4977 operand vRegD_V8()
4978 %{
4979   constraint(ALLOC_IN_RC(v8_reg));
4980   match(RegD);
4981   op_cost(0);
4982   format %{ %}
4983   interface(REG_INTER);
4984 %}
4985 
4986 operand vRegD_V9()
4987 %{
4988   constraint(ALLOC_IN_RC(v9_reg));
4989   match(RegD);
4990   op_cost(0);
4991   format %{ %}
4992   interface(REG_INTER);
4993 %}
4994 
4995 operand vRegD_V10()
4996 %{
4997   constraint(ALLOC_IN_RC(v10_reg));
4998   match(RegD);
4999   op_cost(0);
5000   format %{ %}
5001   interface(REG_INTER);
5002 %}
5003 
5004 operand vRegD_V11()
5005 %{
5006   constraint(ALLOC_IN_RC(v11_reg));
5007   match(RegD);
5008   op_cost(0);
5009   format %{ %}
5010   interface(REG_INTER);
5011 %}
5012 
5013 operand vRegD_V12()
5014 %{
5015   constraint(ALLOC_IN_RC(v12_reg));
5016   match(RegD);
5017   op_cost(0);
5018   format %{ %}
5019   interface(REG_INTER);
5020 %}
5021 
5022 operand vRegD_V13()
5023 %{
5024   constraint(ALLOC_IN_RC(v13_reg));
5025   match(RegD);
5026   op_cost(0);
5027   format %{ %}
5028   interface(REG_INTER);
5029 %}
5030 
5031 operand vRegD_V14()
5032 %{
5033   constraint(ALLOC_IN_RC(v14_reg));
5034   match(RegD);
5035   op_cost(0);
5036   format %{ %}
5037   interface(REG_INTER);
5038 %}
5039 
5040 operand vRegD_V15()
5041 %{
5042   constraint(ALLOC_IN_RC(v15_reg));
5043   match(RegD);
5044   op_cost(0);
5045   format %{ %}
5046   interface(REG_INTER);
5047 %}
5048 
5049 operand vRegD_V16()
5050 %{
5051   constraint(ALLOC_IN_RC(v16_reg));
5052   match(RegD);
5053   op_cost(0);
5054   format %{ %}
5055   interface(REG_INTER);
5056 %}
5057 
5058 operand vRegD_V17()
5059 %{
5060   constraint(ALLOC_IN_RC(v17_reg));
5061   match(RegD);
5062   op_cost(0);
5063   format %{ %}
5064   interface(REG_INTER);
5065 %}
5066 
5067 operand vRegD_V18()
5068 %{
5069   constraint(ALLOC_IN_RC(v18_reg));
5070   match(RegD);
5071   op_cost(0);
5072   format %{ %}
5073   interface(REG_INTER);
5074 %}
5075 
5076 operand vRegD_V19()
5077 %{
5078   constraint(ALLOC_IN_RC(v19_reg));
5079   match(RegD);
5080   op_cost(0);
5081   format %{ %}
5082   interface(REG_INTER);
5083 %}
5084 
5085 operand vRegD_V20()
5086 %{
5087   constraint(ALLOC_IN_RC(v20_reg));
5088   match(RegD);
5089   op_cost(0);
5090   format %{ %}
5091   interface(REG_INTER);
5092 %}
5093 
5094 operand vRegD_V21()
5095 %{
5096   constraint(ALLOC_IN_RC(v21_reg));
5097   match(RegD);
5098   op_cost(0);
5099   format %{ %}
5100   interface(REG_INTER);
5101 %}
5102 
5103 operand vRegD_V22()
5104 %{
5105   constraint(ALLOC_IN_RC(v22_reg));
5106   match(RegD);
5107   op_cost(0);
5108   format %{ %}
5109   interface(REG_INTER);
5110 %}
5111 
5112 operand vRegD_V23()
5113 %{
5114   constraint(ALLOC_IN_RC(v23_reg));
5115   match(RegD);
5116   op_cost(0);
5117   format %{ %}
5118   interface(REG_INTER);
5119 %}
5120 
5121 operand vRegD_V24()
5122 %{
5123   constraint(ALLOC_IN_RC(v24_reg));
5124   match(RegD);
5125   op_cost(0);
5126   format %{ %}
5127   interface(REG_INTER);
5128 %}
5129 
5130 operand vRegD_V25()
5131 %{
5132   constraint(ALLOC_IN_RC(v25_reg));
5133   match(RegD);
5134   op_cost(0);
5135   format %{ %}
5136   interface(REG_INTER);
5137 %}
5138 
5139 operand vRegD_V26()
5140 %{
5141   constraint(ALLOC_IN_RC(v26_reg));
5142   match(RegD);
5143   op_cost(0);
5144   format %{ %}
5145   interface(REG_INTER);
5146 %}
5147 
5148 operand vRegD_V27()
5149 %{
5150   constraint(ALLOC_IN_RC(v27_reg));
5151   match(RegD);
5152   op_cost(0);
5153   format %{ %}
5154   interface(REG_INTER);
5155 %}
5156 
5157 operand vRegD_V28()
5158 %{
5159   constraint(ALLOC_IN_RC(v28_reg));
5160   match(RegD);
5161   op_cost(0);
5162   format %{ %}
5163   interface(REG_INTER);
5164 %}
5165 
5166 operand vRegD_V29()
5167 %{
5168   constraint(ALLOC_IN_RC(v29_reg));
5169   match(RegD);
5170   op_cost(0);
5171   format %{ %}
5172   interface(REG_INTER);
5173 %}
5174 
5175 operand vRegD_V30()
5176 %{
5177   constraint(ALLOC_IN_RC(v30_reg));
5178   match(RegD);
5179   op_cost(0);
5180   format %{ %}
5181   interface(REG_INTER);
5182 %}
5183 
5184 operand vRegD_V31()
5185 %{
5186   constraint(ALLOC_IN_RC(v31_reg));
5187   match(RegD);
5188   op_cost(0);
5189   format %{ %}
5190   interface(REG_INTER);
5191 %}
5192 
5193 // Flags register, used as output of signed compare instructions
5194 
5195 // note that on AArch64 we also use this register as the output for
5196 // for floating point compare instructions (CmpF CmpD). this ensures
5197 // that ordered inequality tests use GT, GE, LT or LE none of which
5198 // pass through cases where the result is unordered i.e. one or both
5199 // inputs to the compare is a NaN. this means that the ideal code can
5200 // replace e.g. a GT with an LE and not end up capturing the NaN case
5201 // (where the comparison should always fail). EQ and NE tests are
5202 // always generated in ideal code so that unordered folds into the NE
5203 // case, matching the behaviour of AArch64 NE.
5204 //
5205 // This differs from x86 where the outputs of FP compares use a
5206 // special FP flags registers and where compares based on this
5207 // register are distinguished into ordered inequalities (cmpOpUCF) and
5208 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5209 // to explicitly handle the unordered case in branches. x86 also has
5210 // to include extra CMoveX rules to accept a cmpOpUCF input.
5211 
5212 operand rFlagsReg()
5213 %{
5214   constraint(ALLOC_IN_RC(int_flags));
5215   match(RegFlags);
5216 
5217   op_cost(0);
5218   format %{ "RFLAGS" %}
5219   interface(REG_INTER);
5220 %}
5221 
5222 // Flags register, used as output of unsigned compare instructions
5223 operand rFlagsRegU()
5224 %{
5225   constraint(ALLOC_IN_RC(int_flags));
5226   match(RegFlags);
5227 
5228   op_cost(0);
5229   format %{ "RFLAGSU" %}
5230   interface(REG_INTER);
5231 %}
5232 
5233 // Special Registers
5234 
5235 // Method Register
5236 operand inline_cache_RegP(iRegP reg)
5237 %{
5238   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5239   match(reg);
5240   match(iRegPNoSp);
5241   op_cost(0);
5242   format %{ %}
5243   interface(REG_INTER);
5244 %}
5245 
5246 operand interpreter_method_oop_RegP(iRegP reg)
5247 %{
5248   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5249   match(reg);
5250   match(iRegPNoSp);
5251   op_cost(0);
5252   format %{ %}
5253   interface(REG_INTER);
5254 %}
5255 
5256 // Thread Register
5257 operand thread_RegP(iRegP reg)
5258 %{
5259   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5260   match(reg);
5261   op_cost(0);
5262   format %{ %}
5263   interface(REG_INTER);
5264 %}
5265 
5266 operand lr_RegP(iRegP reg)
5267 %{
5268   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5269   match(reg);
5270   op_cost(0);
5271   format %{ %}
5272   interface(REG_INTER);
5273 %}
5274 
5275 //----------Memory Operands----------------------------------------------------
5276 
5277 operand indirect(iRegP reg)
5278 %{
5279   constraint(ALLOC_IN_RC(ptr_reg));
5280   match(reg);
5281   op_cost(0);
5282   format %{ "[$reg]" %}
5283   interface(MEMORY_INTER) %{
5284     base($reg);
5285     index(0xffffffff);
5286     scale(0x0);
5287     disp(0x0);
5288   %}
5289 %}
5290 
5291 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5292 %{
5293   constraint(ALLOC_IN_RC(ptr_reg));
5294   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5295   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5296   op_cost(0);
5297   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5298   interface(MEMORY_INTER) %{
5299     base($reg);
5300     index($ireg);
5301     scale($scale);
5302     disp(0x0);
5303   %}
5304 %}
5305 
5306 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5307 %{
5308   constraint(ALLOC_IN_RC(ptr_reg));
5309   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5310   match(AddP reg (LShiftL lreg scale));
5311   op_cost(0);
5312   format %{ "$reg, $lreg lsl($scale)" %}
5313   interface(MEMORY_INTER) %{
5314     base($reg);
5315     index($lreg);
5316     scale($scale);
5317     disp(0x0);
5318   %}
5319 %}
5320 
5321 operand indIndexI2L(iRegP reg, iRegI ireg)
5322 %{
5323   constraint(ALLOC_IN_RC(ptr_reg));
5324   match(AddP reg (ConvI2L ireg));
5325   op_cost(0);
5326   format %{ "$reg, $ireg, 0, I2L" %}
5327   interface(MEMORY_INTER) %{
5328     base($reg);
5329     index($ireg);
5330     scale(0x0);
5331     disp(0x0);
5332   %}
5333 %}
5334 
5335 operand indIndex(iRegP reg, iRegL lreg)
5336 %{
5337   constraint(ALLOC_IN_RC(ptr_reg));
5338   match(AddP reg lreg);
5339   op_cost(0);
5340   format %{ "$reg, $lreg" %}
5341   interface(MEMORY_INTER) %{
5342     base($reg);
5343     index($lreg);
5344     scale(0x0);
5345     disp(0x0);
5346   %}
5347 %}
5348 
5349 operand indOffI(iRegP reg, immIOffset off)
5350 %{
5351   constraint(ALLOC_IN_RC(ptr_reg));
5352   match(AddP reg off);
5353   op_cost(0);
5354   format %{ "[$reg, $off]" %}
5355   interface(MEMORY_INTER) %{
5356     base($reg);
5357     index(0xffffffff);
5358     scale(0x0);
5359     disp($off);
5360   %}
5361 %}
5362 
5363 operand indOffI1(iRegP reg, immIOffset1 off)
5364 %{
5365   constraint(ALLOC_IN_RC(ptr_reg));
5366   match(AddP reg off);
5367   op_cost(0);
5368   format %{ "[$reg, $off]" %}
5369   interface(MEMORY_INTER) %{
5370     base($reg);
5371     index(0xffffffff);
5372     scale(0x0);
5373     disp($off);
5374   %}
5375 %}
5376 
5377 operand indOffI2(iRegP reg, immIOffset2 off)
5378 %{
5379   constraint(ALLOC_IN_RC(ptr_reg));
5380   match(AddP reg off);
5381   op_cost(0);
5382   format %{ "[$reg, $off]" %}
5383   interface(MEMORY_INTER) %{
5384     base($reg);
5385     index(0xffffffff);
5386     scale(0x0);
5387     disp($off);
5388   %}
5389 %}
5390 
5391 operand indOffI4(iRegP reg, immIOffset4 off)
5392 %{
5393   constraint(ALLOC_IN_RC(ptr_reg));
5394   match(AddP reg off);
5395   op_cost(0);
5396   format %{ "[$reg, $off]" %}
5397   interface(MEMORY_INTER) %{
5398     base($reg);
5399     index(0xffffffff);
5400     scale(0x0);
5401     disp($off);
5402   %}
5403 %}
5404 
5405 operand indOffI8(iRegP reg, immIOffset8 off)
5406 %{
5407   constraint(ALLOC_IN_RC(ptr_reg));
5408   match(AddP reg off);
5409   op_cost(0);
5410   format %{ "[$reg, $off]" %}
5411   interface(MEMORY_INTER) %{
5412     base($reg);
5413     index(0xffffffff);
5414     scale(0x0);
5415     disp($off);
5416   %}
5417 %}
5418 
5419 operand indOffI16(iRegP reg, immIOffset16 off)
5420 %{
5421   constraint(ALLOC_IN_RC(ptr_reg));
5422   match(AddP reg off);
5423   op_cost(0);
5424   format %{ "[$reg, $off]" %}
5425   interface(MEMORY_INTER) %{
5426     base($reg);
5427     index(0xffffffff);
5428     scale(0x0);
5429     disp($off);
5430   %}
5431 %}
5432 
5433 operand indOffL(iRegP reg, immLoffset off)
5434 %{
5435   constraint(ALLOC_IN_RC(ptr_reg));
5436   match(AddP reg off);
5437   op_cost(0);
5438   format %{ "[$reg, $off]" %}
5439   interface(MEMORY_INTER) %{
5440     base($reg);
5441     index(0xffffffff);
5442     scale(0x0);
5443     disp($off);
5444   %}
5445 %}
5446 
5447 operand indOffL1(iRegP reg, immLoffset1 off)
5448 %{
5449   constraint(ALLOC_IN_RC(ptr_reg));
5450   match(AddP reg off);
5451   op_cost(0);
5452   format %{ "[$reg, $off]" %}
5453   interface(MEMORY_INTER) %{
5454     base($reg);
5455     index(0xffffffff);
5456     scale(0x0);
5457     disp($off);
5458   %}
5459 %}
5460 
5461 operand indOffL2(iRegP reg, immLoffset2 off)
5462 %{
5463   constraint(ALLOC_IN_RC(ptr_reg));
5464   match(AddP reg off);
5465   op_cost(0);
5466   format %{ "[$reg, $off]" %}
5467   interface(MEMORY_INTER) %{
5468     base($reg);
5469     index(0xffffffff);
5470     scale(0x0);
5471     disp($off);
5472   %}
5473 %}
5474 
5475 operand indOffL4(iRegP reg, immLoffset4 off)
5476 %{
5477   constraint(ALLOC_IN_RC(ptr_reg));
5478   match(AddP reg off);
5479   op_cost(0);
5480   format %{ "[$reg, $off]" %}
5481   interface(MEMORY_INTER) %{
5482     base($reg);
5483     index(0xffffffff);
5484     scale(0x0);
5485     disp($off);
5486   %}
5487 %}
5488 
5489 operand indOffL8(iRegP reg, immLoffset8 off)
5490 %{
5491   constraint(ALLOC_IN_RC(ptr_reg));
5492   match(AddP reg off);
5493   op_cost(0);
5494   format %{ "[$reg, $off]" %}
5495   interface(MEMORY_INTER) %{
5496     base($reg);
5497     index(0xffffffff);
5498     scale(0x0);
5499     disp($off);
5500   %}
5501 %}
5502 
5503 operand indOffL16(iRegP reg, immLoffset16 off)
5504 %{
5505   constraint(ALLOC_IN_RC(ptr_reg));
5506   match(AddP reg off);
5507   op_cost(0);
5508   format %{ "[$reg, $off]" %}
5509   interface(MEMORY_INTER) %{
5510     base($reg);
5511     index(0xffffffff);
5512     scale(0x0);
5513     disp($off);
5514   %}
5515 %}
5516 
5517 operand indirectN(iRegN reg)
5518 %{
5519   predicate(CompressedOops::shift() == 0);
5520   constraint(ALLOC_IN_RC(ptr_reg));
5521   match(DecodeN reg);
5522   op_cost(0);
5523   format %{ "[$reg]\t# narrow" %}
5524   interface(MEMORY_INTER) %{
5525     base($reg);
5526     index(0xffffffff);
5527     scale(0x0);
5528     disp(0x0);
5529   %}
5530 %}
5531 
5532 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5533 %{
5534   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5535   constraint(ALLOC_IN_RC(ptr_reg));
5536   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5537   op_cost(0);
5538   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5539   interface(MEMORY_INTER) %{
5540     base($reg);
5541     index($ireg);
5542     scale($scale);
5543     disp(0x0);
5544   %}
5545 %}
5546 
5547 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5548 %{
5549   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5550   constraint(ALLOC_IN_RC(ptr_reg));
5551   match(AddP (DecodeN reg) (LShiftL lreg scale));
5552   op_cost(0);
5553   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5554   interface(MEMORY_INTER) %{
5555     base($reg);
5556     index($lreg);
5557     scale($scale);
5558     disp(0x0);
5559   %}
5560 %}
5561 
5562 operand indIndexI2LN(iRegN reg, iRegI ireg)
5563 %{
5564   predicate(CompressedOops::shift() == 0);
5565   constraint(ALLOC_IN_RC(ptr_reg));
5566   match(AddP (DecodeN reg) (ConvI2L ireg));
5567   op_cost(0);
5568   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5569   interface(MEMORY_INTER) %{
5570     base($reg);
5571     index($ireg);
5572     scale(0x0);
5573     disp(0x0);
5574   %}
5575 %}
5576 
5577 operand indIndexN(iRegN reg, iRegL lreg)
5578 %{
5579   predicate(CompressedOops::shift() == 0);
5580   constraint(ALLOC_IN_RC(ptr_reg));
5581   match(AddP (DecodeN reg) lreg);
5582   op_cost(0);
5583   format %{ "$reg, $lreg\t# narrow" %}
5584   interface(MEMORY_INTER) %{
5585     base($reg);
5586     index($lreg);
5587     scale(0x0);
5588     disp(0x0);
5589   %}
5590 %}
5591 
5592 operand indOffIN(iRegN reg, immIOffset off)
5593 %{
5594   predicate(CompressedOops::shift() == 0);
5595   constraint(ALLOC_IN_RC(ptr_reg));
5596   match(AddP (DecodeN reg) off);
5597   op_cost(0);
5598   format %{ "[$reg, $off]\t# narrow" %}
5599   interface(MEMORY_INTER) %{
5600     base($reg);
5601     index(0xffffffff);
5602     scale(0x0);
5603     disp($off);
5604   %}
5605 %}
5606 
5607 operand indOffLN(iRegN reg, immLoffset off)
5608 %{
5609   predicate(CompressedOops::shift() == 0);
5610   constraint(ALLOC_IN_RC(ptr_reg));
5611   match(AddP (DecodeN reg) off);
5612   op_cost(0);
5613   format %{ "[$reg, $off]\t# narrow" %}
5614   interface(MEMORY_INTER) %{
5615     base($reg);
5616     index(0xffffffff);
5617     scale(0x0);
5618     disp($off);
5619   %}
5620 %}
5621 
5622 
5623 
5624 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5625 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5626 %{
5627   constraint(ALLOC_IN_RC(ptr_reg));
5628   match(AddP reg off);
5629   op_cost(0);
5630   format %{ "[$reg, $off]" %}
5631   interface(MEMORY_INTER) %{
5632     base($reg);
5633     index(0xffffffff);
5634     scale(0x0);
5635     disp($off);
5636   %}
5637 %}
5638 
5639 //----------Special Memory Operands--------------------------------------------
5640 // Stack Slot Operand - This operand is used for loading and storing temporary
5641 //                      values on the stack where a match requires a value to
5642 //                      flow through memory.
5643 operand stackSlotP(sRegP reg)
5644 %{
5645   constraint(ALLOC_IN_RC(stack_slots));
5646   op_cost(100);
5647   // No match rule because this operand is only generated in matching
5648   // match(RegP);
5649   format %{ "[$reg]" %}
5650   interface(MEMORY_INTER) %{
5651     base(0x1e);  // RSP
5652     index(0x0);  // No Index
5653     scale(0x0);  // No Scale
5654     disp($reg);  // Stack Offset
5655   %}
5656 %}
5657 
5658 operand stackSlotI(sRegI reg)
5659 %{
5660   constraint(ALLOC_IN_RC(stack_slots));
5661   // No match rule because this operand is only generated in matching
5662   // match(RegI);
5663   format %{ "[$reg]" %}
5664   interface(MEMORY_INTER) %{
5665     base(0x1e);  // RSP
5666     index(0x0);  // No Index
5667     scale(0x0);  // No Scale
5668     disp($reg);  // Stack Offset
5669   %}
5670 %}
5671 
5672 operand stackSlotF(sRegF reg)
5673 %{
5674   constraint(ALLOC_IN_RC(stack_slots));
5675   // No match rule because this operand is only generated in matching
5676   // match(RegF);
5677   format %{ "[$reg]" %}
5678   interface(MEMORY_INTER) %{
5679     base(0x1e);  // RSP
5680     index(0x0);  // No Index
5681     scale(0x0);  // No Scale
5682     disp($reg);  // Stack Offset
5683   %}
5684 %}
5685 
5686 operand stackSlotD(sRegD reg)
5687 %{
5688   constraint(ALLOC_IN_RC(stack_slots));
5689   // No match rule because this operand is only generated in matching
5690   // match(RegD);
5691   format %{ "[$reg]" %}
5692   interface(MEMORY_INTER) %{
5693     base(0x1e);  // RSP
5694     index(0x0);  // No Index
5695     scale(0x0);  // No Scale
5696     disp($reg);  // Stack Offset
5697   %}
5698 %}
5699 
5700 operand stackSlotL(sRegL reg)
5701 %{
5702   constraint(ALLOC_IN_RC(stack_slots));
5703   // No match rule because this operand is only generated in matching
5704   // match(RegL);
5705   format %{ "[$reg]" %}
5706   interface(MEMORY_INTER) %{
5707     base(0x1e);  // RSP
5708     index(0x0);  // No Index
5709     scale(0x0);  // No Scale
5710     disp($reg);  // Stack Offset
5711   %}
5712 %}
5713 
5714 // Operands for expressing Control Flow
5715 // NOTE: Label is a predefined operand which should not be redefined in
5716 //       the AD file. It is generically handled within the ADLC.
5717 
5718 //----------Conditional Branch Operands----------------------------------------
5719 // Comparison Op  - This is the operation of the comparison, and is limited to
5720 //                  the following set of codes:
5721 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5722 //
5723 // Other attributes of the comparison, such as unsignedness, are specified
5724 // by the comparison instruction that sets a condition code flags register.
5725 // That result is represented by a flags operand whose subtype is appropriate
5726 // to the unsignedness (etc.) of the comparison.
5727 //
5728 // Later, the instruction which matches both the Comparison Op (a Bool) and
5729 // the flags (produced by the Cmp) specifies the coding of the comparison op
5730 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5731 
5732 // used for signed integral comparisons and fp comparisons
5733 
5734 operand cmpOp()
5735 %{
5736   match(Bool);
5737 
5738   format %{ "" %}
5739   interface(COND_INTER) %{
5740     equal(0x0, "eq");
5741     not_equal(0x1, "ne");
5742     less(0xb, "lt");
5743     greater_equal(0xa, "ge");
5744     less_equal(0xd, "le");
5745     greater(0xc, "gt");
5746     overflow(0x6, "vs");
5747     no_overflow(0x7, "vc");
5748   %}
5749 %}
5750 
5751 // used for unsigned integral comparisons
5752 
5753 operand cmpOpU()
5754 %{
5755   match(Bool);
5756 
5757   format %{ "" %}
5758   interface(COND_INTER) %{
5759     equal(0x0, "eq");
5760     not_equal(0x1, "ne");
5761     less(0x3, "lo");
5762     greater_equal(0x2, "hs");
5763     less_equal(0x9, "ls");
5764     greater(0x8, "hi");
5765     overflow(0x6, "vs");
5766     no_overflow(0x7, "vc");
5767   %}
5768 %}
5769 
5770 // used for certain integral comparisons which can be
5771 // converted to cbxx or tbxx instructions
5772 
5773 operand cmpOpEqNe()
5774 %{
5775   match(Bool);
5776   op_cost(0);
5777   predicate(n->as_Bool()->_test._test == BoolTest::ne
5778             || n->as_Bool()->_test._test == BoolTest::eq);
5779 
5780   format %{ "" %}
5781   interface(COND_INTER) %{
5782     equal(0x0, "eq");
5783     not_equal(0x1, "ne");
5784     less(0xb, "lt");
5785     greater_equal(0xa, "ge");
5786     less_equal(0xd, "le");
5787     greater(0xc, "gt");
5788     overflow(0x6, "vs");
5789     no_overflow(0x7, "vc");
5790   %}
5791 %}
5792 
5793 // used for certain integral comparisons which can be
5794 // converted to cbxx or tbxx instructions
5795 
5796 operand cmpOpLtGe()
5797 %{
5798   match(Bool);
5799   op_cost(0);
5800 
5801   predicate(n->as_Bool()->_test._test == BoolTest::lt
5802             || n->as_Bool()->_test._test == BoolTest::ge);
5803 
5804   format %{ "" %}
5805   interface(COND_INTER) %{
5806     equal(0x0, "eq");
5807     not_equal(0x1, "ne");
5808     less(0xb, "lt");
5809     greater_equal(0xa, "ge");
5810     less_equal(0xd, "le");
5811     greater(0xc, "gt");
5812     overflow(0x6, "vs");
5813     no_overflow(0x7, "vc");
5814   %}
5815 %}
5816 
5817 // used for certain unsigned integral comparisons which can be
5818 // converted to cbxx or tbxx instructions
5819 
5820 operand cmpOpUEqNeLtGe()
5821 %{
5822   match(Bool);
5823   op_cost(0);
5824 
5825   predicate(n->as_Bool()->_test._test == BoolTest::eq
5826             || n->as_Bool()->_test._test == BoolTest::ne
5827             || n->as_Bool()->_test._test == BoolTest::lt
5828             || n->as_Bool()->_test._test == BoolTest::ge);
5829 
5830   format %{ "" %}
5831   interface(COND_INTER) %{
5832     equal(0x0, "eq");
5833     not_equal(0x1, "ne");
5834     less(0xb, "lt");
5835     greater_equal(0xa, "ge");
5836     less_equal(0xd, "le");
5837     greater(0xc, "gt");
5838     overflow(0x6, "vs");
5839     no_overflow(0x7, "vc");
5840   %}
5841 %}
5842 
5843 // Special operand allowing long args to int ops to be truncated for free
5844 
5845 operand iRegL2I(iRegL reg) %{
5846 
5847   op_cost(0);
5848 
5849   match(ConvL2I reg);
5850 
5851   format %{ "l2i($reg)" %}
5852 
5853   interface(REG_INTER)
5854 %}
5855 
5856 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5857 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5858 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5859 
5860 //----------OPERAND CLASSES----------------------------------------------------
5861 // Operand Classes are groups of operands that are used as to simplify
5862 // instruction definitions by not requiring the AD writer to specify
5863 // separate instructions for every form of operand when the
5864 // instruction accepts multiple operand types with the same basic
5865 // encoding and format. The classic case of this is memory operands.
5866 
5867 // memory is used to define read/write location for load/store
5868 // instruction defs. we can turn a memory op into an Address
5869 
5870 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
5871                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5872 
5873 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
5874                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5875 
5876 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
5877                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5878 
5879 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
5880                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5881 
5882 // All of the memory operands. For the pipeline description.
5883 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
5884                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
5885                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5886 
5887 
5888 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5889 // operations. it allows the src to be either an iRegI or a (ConvL2I
5890 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5891 // can be elided because the 32-bit instruction will just employ the
5892 // lower 32 bits anyway.
5893 //
5894 // n.b. this does not elide all L2I conversions. if the truncated
5895 // value is consumed by more than one operation then the ConvL2I
5896 // cannot be bundled into the consuming nodes so an l2i gets planted
5897 // (actually a movw $dst $src) and the downstream instructions consume
5898 // the result of the l2i as an iRegI input. That's a shame since the
5899 // movw is actually redundant but its not too costly.
5900 
5901 opclass iRegIorL2I(iRegI, iRegL2I);
5902 
5903 //----------PIPELINE-----------------------------------------------------------
5904 // Rules which define the behavior of the target architectures pipeline.
5905 
5906 // For specific pipelines, eg A53, define the stages of that pipeline
5907 //pipe_desc(ISS, EX1, EX2, WR);
5908 #define ISS S0
5909 #define EX1 S1
5910 #define EX2 S2
5911 #define WR  S3
5912 
5913 // Integer ALU reg operation
5914 pipeline %{
5915 
5916 attributes %{
5917   // ARM instructions are of fixed length
5918   fixed_size_instructions;        // Fixed size instructions TODO does
5919   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5920   // ARM instructions come in 32-bit word units
5921   instruction_unit_size = 4;         // An instruction is 4 bytes long
5922   instruction_fetch_unit_size = 64;  // The processor fetches one line
5923   instruction_fetch_units = 1;       // of 64 bytes
5924 
5925   // List of nop instructions
5926   nops( MachNop );
5927 %}
5928 
5929 // We don't use an actual pipeline model so don't care about resources
5930 // or description. we do use pipeline classes to introduce fixed
5931 // latencies
5932 
5933 //----------RESOURCES----------------------------------------------------------
5934 // Resources are the functional units available to the machine
5935 
5936 resources( INS0, INS1, INS01 = INS0 | INS1,
5937            ALU0, ALU1, ALU = ALU0 | ALU1,
5938            MAC,
5939            DIV,
5940            BRANCH,
5941            LDST,
5942            NEON_FP);
5943 
5944 //----------PIPELINE DESCRIPTION-----------------------------------------------
5945 // Pipeline Description specifies the stages in the machine's pipeline
5946 
5947 // Define the pipeline as a generic 6 stage pipeline
5948 pipe_desc(S0, S1, S2, S3, S4, S5);
5949 
5950 //----------PIPELINE CLASSES---------------------------------------------------
5951 // Pipeline Classes describe the stages in which input and output are
5952 // referenced by the hardware pipeline.
5953 
5954 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5955 %{
5956   single_instruction;
5957   src1   : S1(read);
5958   src2   : S2(read);
5959   dst    : S5(write);
5960   INS01  : ISS;
5961   NEON_FP : S5;
5962 %}
5963 
5964 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5965 %{
5966   single_instruction;
5967   src1   : S1(read);
5968   src2   : S2(read);
5969   dst    : S5(write);
5970   INS01  : ISS;
5971   NEON_FP : S5;
5972 %}
5973 
5974 pipe_class fp_uop_s(vRegF dst, vRegF src)
5975 %{
5976   single_instruction;
5977   src    : S1(read);
5978   dst    : S5(write);
5979   INS01  : ISS;
5980   NEON_FP : S5;
5981 %}
5982 
5983 pipe_class fp_uop_d(vRegD dst, vRegD src)
5984 %{
5985   single_instruction;
5986   src    : S1(read);
5987   dst    : S5(write);
5988   INS01  : ISS;
5989   NEON_FP : S5;
5990 %}
5991 
5992 pipe_class fp_d2f(vRegF dst, vRegD src)
5993 %{
5994   single_instruction;
5995   src    : S1(read);
5996   dst    : S5(write);
5997   INS01  : ISS;
5998   NEON_FP : S5;
5999 %}
6000 
6001 pipe_class fp_f2d(vRegD dst, vRegF src)
6002 %{
6003   single_instruction;
6004   src    : S1(read);
6005   dst    : S5(write);
6006   INS01  : ISS;
6007   NEON_FP : S5;
6008 %}
6009 
6010 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6011 %{
6012   single_instruction;
6013   src    : S1(read);
6014   dst    : S5(write);
6015   INS01  : ISS;
6016   NEON_FP : S5;
6017 %}
6018 
6019 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6020 %{
6021   single_instruction;
6022   src    : S1(read);
6023   dst    : S5(write);
6024   INS01  : ISS;
6025   NEON_FP : S5;
6026 %}
6027 
6028 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6029 %{
6030   single_instruction;
6031   src    : S1(read);
6032   dst    : S5(write);
6033   INS01  : ISS;
6034   NEON_FP : S5;
6035 %}
6036 
6037 pipe_class fp_l2f(vRegF dst, iRegL src)
6038 %{
6039   single_instruction;
6040   src    : S1(read);
6041   dst    : S5(write);
6042   INS01  : ISS;
6043   NEON_FP : S5;
6044 %}
6045 
6046 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6047 %{
6048   single_instruction;
6049   src    : S1(read);
6050   dst    : S5(write);
6051   INS01  : ISS;
6052   NEON_FP : S5;
6053 %}
6054 
6055 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6056 %{
6057   single_instruction;
6058   src    : S1(read);
6059   dst    : S5(write);
6060   INS01  : ISS;
6061   NEON_FP : S5;
6062 %}
6063 
6064 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6065 %{
6066   single_instruction;
6067   src    : S1(read);
6068   dst    : S5(write);
6069   INS01  : ISS;
6070   NEON_FP : S5;
6071 %}
6072 
6073 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6074 %{
6075   single_instruction;
6076   src    : S1(read);
6077   dst    : S5(write);
6078   INS01  : ISS;
6079   NEON_FP : S5;
6080 %}
6081 
6082 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6083 %{
6084   single_instruction;
6085   src1   : S1(read);
6086   src2   : S2(read);
6087   dst    : S5(write);
6088   INS0   : ISS;
6089   NEON_FP : S5;
6090 %}
6091 
6092 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6093 %{
6094   single_instruction;
6095   src1   : S1(read);
6096   src2   : S2(read);
6097   dst    : S5(write);
6098   INS0   : ISS;
6099   NEON_FP : S5;
6100 %}
6101 
6102 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6103 %{
6104   single_instruction;
6105   cr     : S1(read);
6106   src1   : S1(read);
6107   src2   : S1(read);
6108   dst    : S3(write);
6109   INS01  : ISS;
6110   NEON_FP : S3;
6111 %}
6112 
6113 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6114 %{
6115   single_instruction;
6116   cr     : S1(read);
6117   src1   : S1(read);
6118   src2   : S1(read);
6119   dst    : S3(write);
6120   INS01  : ISS;
6121   NEON_FP : S3;
6122 %}
6123 
6124 pipe_class fp_imm_s(vRegF dst)
6125 %{
6126   single_instruction;
6127   dst    : S3(write);
6128   INS01  : ISS;
6129   NEON_FP : S3;
6130 %}
6131 
6132 pipe_class fp_imm_d(vRegD dst)
6133 %{
6134   single_instruction;
6135   dst    : S3(write);
6136   INS01  : ISS;
6137   NEON_FP : S3;
6138 %}
6139 
6140 pipe_class fp_load_constant_s(vRegF dst)
6141 %{
6142   single_instruction;
6143   dst    : S4(write);
6144   INS01  : ISS;
6145   NEON_FP : S4;
6146 %}
6147 
6148 pipe_class fp_load_constant_d(vRegD dst)
6149 %{
6150   single_instruction;
6151   dst    : S4(write);
6152   INS01  : ISS;
6153   NEON_FP : S4;
6154 %}
6155 
6156 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6157 %{
6158   single_instruction;
6159   dst    : S5(write);
6160   src1   : S1(read);
6161   src2   : S1(read);
6162   INS01  : ISS;
6163   NEON_FP : S5;
6164 %}
6165 
6166 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6167 %{
6168   single_instruction;
6169   dst    : S5(write);
6170   src1   : S1(read);
6171   src2   : S1(read);
6172   INS0   : ISS;
6173   NEON_FP : S5;
6174 %}
6175 
6176 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6177 %{
6178   single_instruction;
6179   dst    : S5(write);
6180   src1   : S1(read);
6181   src2   : S1(read);
6182   dst    : S1(read);
6183   INS01  : ISS;
6184   NEON_FP : S5;
6185 %}
6186 
6187 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6188 %{
6189   single_instruction;
6190   dst    : S5(write);
6191   src1   : S1(read);
6192   src2   : S1(read);
6193   dst    : S1(read);
6194   INS0   : ISS;
6195   NEON_FP : S5;
6196 %}
6197 
6198 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6199 %{
6200   single_instruction;
6201   dst    : S4(write);
6202   src1   : S2(read);
6203   src2   : S2(read);
6204   INS01  : ISS;
6205   NEON_FP : S4;
6206 %}
6207 
6208 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6209 %{
6210   single_instruction;
6211   dst    : S4(write);
6212   src1   : S2(read);
6213   src2   : S2(read);
6214   INS0   : ISS;
6215   NEON_FP : S4;
6216 %}
6217 
6218 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6219 %{
6220   single_instruction;
6221   dst    : S3(write);
6222   src1   : S2(read);
6223   src2   : S2(read);
6224   INS01  : ISS;
6225   NEON_FP : S3;
6226 %}
6227 
6228 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6229 %{
6230   single_instruction;
6231   dst    : S3(write);
6232   src1   : S2(read);
6233   src2   : S2(read);
6234   INS0   : ISS;
6235   NEON_FP : S3;
6236 %}
6237 
6238 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6239 %{
6240   single_instruction;
6241   dst    : S3(write);
6242   src    : S1(read);
6243   shift  : S1(read);
6244   INS01  : ISS;
6245   NEON_FP : S3;
6246 %}
6247 
6248 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6249 %{
6250   single_instruction;
6251   dst    : S3(write);
6252   src    : S1(read);
6253   shift  : S1(read);
6254   INS0   : ISS;
6255   NEON_FP : S3;
6256 %}
6257 
6258 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6259 %{
6260   single_instruction;
6261   dst    : S3(write);
6262   src    : S1(read);
6263   INS01  : ISS;
6264   NEON_FP : S3;
6265 %}
6266 
6267 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6268 %{
6269   single_instruction;
6270   dst    : S3(write);
6271   src    : S1(read);
6272   INS0   : ISS;
6273   NEON_FP : S3;
6274 %}
6275 
6276 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6277 %{
6278   single_instruction;
6279   dst    : S5(write);
6280   src1   : S1(read);
6281   src2   : S1(read);
6282   INS01  : ISS;
6283   NEON_FP : S5;
6284 %}
6285 
6286 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6287 %{
6288   single_instruction;
6289   dst    : S5(write);
6290   src1   : S1(read);
6291   src2   : S1(read);
6292   INS0   : ISS;
6293   NEON_FP : S5;
6294 %}
6295 
6296 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6297 %{
6298   single_instruction;
6299   dst    : S5(write);
6300   src1   : S1(read);
6301   src2   : S1(read);
6302   INS0   : ISS;
6303   NEON_FP : S5;
6304 %}
6305 
6306 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6307 %{
6308   single_instruction;
6309   dst    : S5(write);
6310   src1   : S1(read);
6311   src2   : S1(read);
6312   INS0   : ISS;
6313   NEON_FP : S5;
6314 %}
6315 
6316 pipe_class vsqrt_fp128(vecX dst, vecX src)
6317 %{
6318   single_instruction;
6319   dst    : S5(write);
6320   src    : S1(read);
6321   INS0   : ISS;
6322   NEON_FP : S5;
6323 %}
6324 
6325 pipe_class vunop_fp64(vecD dst, vecD src)
6326 %{
6327   single_instruction;
6328   dst    : S5(write);
6329   src    : S1(read);
6330   INS01  : ISS;
6331   NEON_FP : S5;
6332 %}
6333 
6334 pipe_class vunop_fp128(vecX dst, vecX src)
6335 %{
6336   single_instruction;
6337   dst    : S5(write);
6338   src    : S1(read);
6339   INS0   : ISS;
6340   NEON_FP : S5;
6341 %}
6342 
6343 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6344 %{
6345   single_instruction;
6346   dst    : S3(write);
6347   src    : S1(read);
6348   INS01  : ISS;
6349   NEON_FP : S3;
6350 %}
6351 
6352 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6353 %{
6354   single_instruction;
6355   dst    : S3(write);
6356   src    : S1(read);
6357   INS01  : ISS;
6358   NEON_FP : S3;
6359 %}
6360 
6361 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6362 %{
6363   single_instruction;
6364   dst    : S3(write);
6365   src    : S1(read);
6366   INS01  : ISS;
6367   NEON_FP : S3;
6368 %}
6369 
6370 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6371 %{
6372   single_instruction;
6373   dst    : S3(write);
6374   src    : S1(read);
6375   INS01  : ISS;
6376   NEON_FP : S3;
6377 %}
6378 
6379 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6380 %{
6381   single_instruction;
6382   dst    : S3(write);
6383   src    : S1(read);
6384   INS01  : ISS;
6385   NEON_FP : S3;
6386 %}
6387 
6388 pipe_class vmovi_reg_imm64(vecD dst)
6389 %{
6390   single_instruction;
6391   dst    : S3(write);
6392   INS01  : ISS;
6393   NEON_FP : S3;
6394 %}
6395 
6396 pipe_class vmovi_reg_imm128(vecX dst)
6397 %{
6398   single_instruction;
6399   dst    : S3(write);
6400   INS0   : ISS;
6401   NEON_FP : S3;
6402 %}
6403 
6404 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6405 %{
6406   single_instruction;
6407   dst    : S5(write);
6408   mem    : ISS(read);
6409   INS01  : ISS;
6410   NEON_FP : S3;
6411 %}
6412 
6413 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6414 %{
6415   single_instruction;
6416   dst    : S5(write);
6417   mem    : ISS(read);
6418   INS01  : ISS;
6419   NEON_FP : S3;
6420 %}
6421 
6422 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6423 %{
6424   single_instruction;
6425   mem    : ISS(read);
6426   src    : S2(read);
6427   INS01  : ISS;
6428   NEON_FP : S3;
6429 %}
6430 
6431 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6432 %{
6433   single_instruction;
6434   mem    : ISS(read);
6435   src    : S2(read);
6436   INS01  : ISS;
6437   NEON_FP : S3;
6438 %}
6439 
6440 //------- Integer ALU operations --------------------------
6441 
6442 // Integer ALU reg-reg operation
6443 // Operands needed in EX1, result generated in EX2
6444 // Eg.  ADD     x0, x1, x2
6445 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6446 %{
6447   single_instruction;
6448   dst    : EX2(write);
6449   src1   : EX1(read);
6450   src2   : EX1(read);
6451   INS01  : ISS; // Dual issue as instruction 0 or 1
6452   ALU    : EX2;
6453 %}
6454 
6455 // Integer ALU reg-reg operation with constant shift
6456 // Shifted register must be available in LATE_ISS instead of EX1
6457 // Eg.  ADD     x0, x1, x2, LSL #2
6458 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6459 %{
6460   single_instruction;
6461   dst    : EX2(write);
6462   src1   : EX1(read);
6463   src2   : ISS(read);
6464   INS01  : ISS;
6465   ALU    : EX2;
6466 %}
6467 
6468 // Integer ALU reg operation with constant shift
6469 // Eg.  LSL     x0, x1, #shift
6470 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6471 %{
6472   single_instruction;
6473   dst    : EX2(write);
6474   src1   : ISS(read);
6475   INS01  : ISS;
6476   ALU    : EX2;
6477 %}
6478 
6479 // Integer ALU reg-reg operation with variable shift
6480 // Both operands must be available in LATE_ISS instead of EX1
6481 // Result is available in EX1 instead of EX2
6482 // Eg.  LSLV    x0, x1, x2
6483 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6484 %{
6485   single_instruction;
6486   dst    : EX1(write);
6487   src1   : ISS(read);
6488   src2   : ISS(read);
6489   INS01  : ISS;
6490   ALU    : EX1;
6491 %}
6492 
6493 // Integer ALU reg-reg operation with extract
6494 // As for _vshift above, but result generated in EX2
6495 // Eg.  EXTR    x0, x1, x2, #N
6496 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6497 %{
6498   single_instruction;
6499   dst    : EX2(write);
6500   src1   : ISS(read);
6501   src2   : ISS(read);
6502   INS1   : ISS; // Can only dual issue as Instruction 1
6503   ALU    : EX1;
6504 %}
6505 
6506 // Integer ALU reg operation
6507 // Eg.  NEG     x0, x1
6508 pipe_class ialu_reg(iRegI dst, iRegI src)
6509 %{
6510   single_instruction;
6511   dst    : EX2(write);
6512   src    : EX1(read);
6513   INS01  : ISS;
6514   ALU    : EX2;
6515 %}
6516 
6517 // Integer ALU reg mmediate operation
6518 // Eg.  ADD     x0, x1, #N
6519 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6520 %{
6521   single_instruction;
6522   dst    : EX2(write);
6523   src1   : EX1(read);
6524   INS01  : ISS;
6525   ALU    : EX2;
6526 %}
6527 
6528 // Integer ALU immediate operation (no source operands)
6529 // Eg.  MOV     x0, #N
6530 pipe_class ialu_imm(iRegI dst)
6531 %{
6532   single_instruction;
6533   dst    : EX1(write);
6534   INS01  : ISS;
6535   ALU    : EX1;
6536 %}
6537 
6538 //------- Compare operation -------------------------------
6539 
6540 // Compare reg-reg
6541 // Eg.  CMP     x0, x1
6542 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6543 %{
6544   single_instruction;
6545 //  fixed_latency(16);
6546   cr     : EX2(write);
6547   op1    : EX1(read);
6548   op2    : EX1(read);
6549   INS01  : ISS;
6550   ALU    : EX2;
6551 %}
6552 
6553 // Compare reg-reg
6554 // Eg.  CMP     x0, #N
6555 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6556 %{
6557   single_instruction;
6558 //  fixed_latency(16);
6559   cr     : EX2(write);
6560   op1    : EX1(read);
6561   INS01  : ISS;
6562   ALU    : EX2;
6563 %}
6564 
6565 //------- Conditional instructions ------------------------
6566 
6567 // Conditional no operands
6568 // Eg.  CSINC   x0, zr, zr, <cond>
6569 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6570 %{
6571   single_instruction;
6572   cr     : EX1(read);
6573   dst    : EX2(write);
6574   INS01  : ISS;
6575   ALU    : EX2;
6576 %}
6577 
6578 // Conditional 2 operand
6579 // EG.  CSEL    X0, X1, X2, <cond>
6580 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6581 %{
6582   single_instruction;
6583   cr     : EX1(read);
6584   src1   : EX1(read);
6585   src2   : EX1(read);
6586   dst    : EX2(write);
6587   INS01  : ISS;
6588   ALU    : EX2;
6589 %}
6590 
6591 // Conditional 2 operand
6592 // EG.  CSEL    X0, X1, X2, <cond>
6593 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6594 %{
6595   single_instruction;
6596   cr     : EX1(read);
6597   src    : EX1(read);
6598   dst    : EX2(write);
6599   INS01  : ISS;
6600   ALU    : EX2;
6601 %}
6602 
6603 //------- Multiply pipeline operations --------------------
6604 
6605 // Multiply reg-reg
6606 // Eg.  MUL     w0, w1, w2
6607 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6608 %{
6609   single_instruction;
6610   dst    : WR(write);
6611   src1   : ISS(read);
6612   src2   : ISS(read);
6613   INS01  : ISS;
6614   MAC    : WR;
6615 %}
6616 
6617 // Multiply accumulate
6618 // Eg.  MADD    w0, w1, w2, w3
6619 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6620 %{
6621   single_instruction;
6622   dst    : WR(write);
6623   src1   : ISS(read);
6624   src2   : ISS(read);
6625   src3   : ISS(read);
6626   INS01  : ISS;
6627   MAC    : WR;
6628 %}
6629 
6630 // Eg.  MUL     w0, w1, w2
6631 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6632 %{
6633   single_instruction;
6634   fixed_latency(3); // Maximum latency for 64 bit mul
6635   dst    : WR(write);
6636   src1   : ISS(read);
6637   src2   : ISS(read);
6638   INS01  : ISS;
6639   MAC    : WR;
6640 %}
6641 
6642 // Multiply accumulate
6643 // Eg.  MADD    w0, w1, w2, w3
6644 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6645 %{
6646   single_instruction;
6647   fixed_latency(3); // Maximum latency for 64 bit mul
6648   dst    : WR(write);
6649   src1   : ISS(read);
6650   src2   : ISS(read);
6651   src3   : ISS(read);
6652   INS01  : ISS;
6653   MAC    : WR;
6654 %}
6655 
6656 //------- Divide pipeline operations --------------------
6657 
6658 // Eg.  SDIV    w0, w1, w2
6659 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6660 %{
6661   single_instruction;
6662   fixed_latency(8); // Maximum latency for 32 bit divide
6663   dst    : WR(write);
6664   src1   : ISS(read);
6665   src2   : ISS(read);
6666   INS0   : ISS; // Can only dual issue as instruction 0
6667   DIV    : WR;
6668 %}
6669 
6670 // Eg.  SDIV    x0, x1, x2
6671 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6672 %{
6673   single_instruction;
6674   fixed_latency(16); // Maximum latency for 64 bit divide
6675   dst    : WR(write);
6676   src1   : ISS(read);
6677   src2   : ISS(read);
6678   INS0   : ISS; // Can only dual issue as instruction 0
6679   DIV    : WR;
6680 %}
6681 
6682 //------- Load pipeline operations ------------------------
6683 
6684 // Load - prefetch
6685 // Eg.  PFRM    <mem>
6686 pipe_class iload_prefetch(memory mem)
6687 %{
6688   single_instruction;
6689   mem    : ISS(read);
6690   INS01  : ISS;
6691   LDST   : WR;
6692 %}
6693 
6694 // Load - reg, mem
6695 // Eg.  LDR     x0, <mem>
6696 pipe_class iload_reg_mem(iRegI dst, memory mem)
6697 %{
6698   single_instruction;
6699   dst    : WR(write);
6700   mem    : ISS(read);
6701   INS01  : ISS;
6702   LDST   : WR;
6703 %}
6704 
6705 // Load - reg, reg
6706 // Eg.  LDR     x0, [sp, x1]
6707 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6708 %{
6709   single_instruction;
6710   dst    : WR(write);
6711   src    : ISS(read);
6712   INS01  : ISS;
6713   LDST   : WR;
6714 %}
6715 
6716 //------- Store pipeline operations -----------------------
6717 
6718 // Store - zr, mem
6719 // Eg.  STR     zr, <mem>
6720 pipe_class istore_mem(memory mem)
6721 %{
6722   single_instruction;
6723   mem    : ISS(read);
6724   INS01  : ISS;
6725   LDST   : WR;
6726 %}
6727 
6728 // Store - reg, mem
6729 // Eg.  STR     x0, <mem>
6730 pipe_class istore_reg_mem(iRegI src, memory mem)
6731 %{
6732   single_instruction;
6733   mem    : ISS(read);
6734   src    : EX2(read);
6735   INS01  : ISS;
6736   LDST   : WR;
6737 %}
6738 
6739 // Store - reg, reg
6740 // Eg. STR      x0, [sp, x1]
6741 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6742 %{
6743   single_instruction;
6744   dst    : ISS(read);
6745   src    : EX2(read);
6746   INS01  : ISS;
6747   LDST   : WR;
6748 %}
6749 
6750 //------- Store pipeline operations -----------------------
6751 
6752 // Branch
6753 pipe_class pipe_branch()
6754 %{
6755   single_instruction;
6756   INS01  : ISS;
6757   BRANCH : EX1;
6758 %}
6759 
6760 // Conditional branch
6761 pipe_class pipe_branch_cond(rFlagsReg cr)
6762 %{
6763   single_instruction;
6764   cr     : EX1(read);
6765   INS01  : ISS;
6766   BRANCH : EX1;
6767 %}
6768 
6769 // Compare & Branch
6770 // EG.  CBZ/CBNZ
6771 pipe_class pipe_cmp_branch(iRegI op1)
6772 %{
6773   single_instruction;
6774   op1    : EX1(read);
6775   INS01  : ISS;
6776   BRANCH : EX1;
6777 %}
6778 
6779 //------- Synchronisation operations ----------------------
6780 
6781 // Any operation requiring serialization.
6782 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6783 pipe_class pipe_serial()
6784 %{
6785   single_instruction;
6786   force_serialization;
6787   fixed_latency(16);
6788   INS01  : ISS(2); // Cannot dual issue with any other instruction
6789   LDST   : WR;
6790 %}
6791 
6792 // Generic big/slow expanded idiom - also serialized
6793 pipe_class pipe_slow()
6794 %{
6795   instruction_count(10);
6796   multiple_bundles;
6797   force_serialization;
6798   fixed_latency(16);
6799   INS01  : ISS(2); // Cannot dual issue with any other instruction
6800   LDST   : WR;
6801 %}
6802 
6803 // Empty pipeline class
6804 pipe_class pipe_class_empty()
6805 %{
6806   single_instruction;
6807   fixed_latency(0);
6808 %}
6809 
6810 // Default pipeline class.
6811 pipe_class pipe_class_default()
6812 %{
6813   single_instruction;
6814   fixed_latency(2);
6815 %}
6816 
6817 // Pipeline class for compares.
6818 pipe_class pipe_class_compare()
6819 %{
6820   single_instruction;
6821   fixed_latency(16);
6822 %}
6823 
6824 // Pipeline class for memory operations.
6825 pipe_class pipe_class_memory()
6826 %{
6827   single_instruction;
6828   fixed_latency(16);
6829 %}
6830 
6831 // Pipeline class for call.
6832 pipe_class pipe_class_call()
6833 %{
6834   single_instruction;
6835   fixed_latency(100);
6836 %}
6837 
6838 // Define the class for the Nop node.
6839 define %{
6840    MachNop = pipe_class_empty;
6841 %}
6842 
6843 %}
6844 //----------INSTRUCTIONS-------------------------------------------------------
6845 //
6846 // match      -- States which machine-independent subtree may be replaced
6847 //               by this instruction.
6848 // ins_cost   -- The estimated cost of this instruction is used by instruction
6849 //               selection to identify a minimum cost tree of machine
6850 //               instructions that matches a tree of machine-independent
6851 //               instructions.
6852 // format     -- A string providing the disassembly for this instruction.
6853 //               The value of an instruction's operand may be inserted
6854 //               by referring to it with a '$' prefix.
6855 // opcode     -- Three instruction opcodes may be provided.  These are referred
6856 //               to within an encode class as $primary, $secondary, and $tertiary
6857 //               rrspectively.  The primary opcode is commonly used to
6858 //               indicate the type of machine instruction, while secondary
6859 //               and tertiary are often used for prefix options or addressing
6860 //               modes.
6861 // ins_encode -- A list of encode classes with parameters. The encode class
6862 //               name must have been defined in an 'enc_class' specification
6863 //               in the encode section of the architecture description.
6864 
6865 // ============================================================================
6866 // Memory (Load/Store) Instructions
6867 
6868 // Load Instructions
6869 
6870 // Load Byte (8 bit signed)
6871 instruct loadB(iRegINoSp dst, memory1 mem)
6872 %{
6873   match(Set dst (LoadB mem));
6874   predicate(!needs_acquiring_load(n));
6875 
6876   ins_cost(4 * INSN_COST);
6877   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6878 
6879   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6880 
6881   ins_pipe(iload_reg_mem);
6882 %}
6883 
6884 // Load Byte (8 bit signed) into long
6885 instruct loadB2L(iRegLNoSp dst, memory1 mem)
6886 %{
6887   match(Set dst (ConvI2L (LoadB mem)));
6888   predicate(!needs_acquiring_load(n->in(1)));
6889 
6890   ins_cost(4 * INSN_COST);
6891   format %{ "ldrsb  $dst, $mem\t# byte" %}
6892 
6893   ins_encode(aarch64_enc_ldrsb(dst, mem));
6894 
6895   ins_pipe(iload_reg_mem);
6896 %}
6897 
6898 // Load Byte (8 bit unsigned)
6899 instruct loadUB(iRegINoSp dst, memory1 mem)
6900 %{
6901   match(Set dst (LoadUB mem));
6902   predicate(!needs_acquiring_load(n));
6903 
6904   ins_cost(4 * INSN_COST);
6905   format %{ "ldrbw  $dst, $mem\t# byte" %}
6906 
6907   ins_encode(aarch64_enc_ldrb(dst, mem));
6908 
6909   ins_pipe(iload_reg_mem);
6910 %}
6911 
6912 // Load Byte (8 bit unsigned) into long
6913 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
6914 %{
6915   match(Set dst (ConvI2L (LoadUB mem)));
6916   predicate(!needs_acquiring_load(n->in(1)));
6917 
6918   ins_cost(4 * INSN_COST);
6919   format %{ "ldrb  $dst, $mem\t# byte" %}
6920 
6921   ins_encode(aarch64_enc_ldrb(dst, mem));
6922 
6923   ins_pipe(iload_reg_mem);
6924 %}
6925 
6926 // Load Short (16 bit signed)
6927 instruct loadS(iRegINoSp dst, memory2 mem)
6928 %{
6929   match(Set dst (LoadS mem));
6930   predicate(!needs_acquiring_load(n));
6931 
6932   ins_cost(4 * INSN_COST);
6933   format %{ "ldrshw  $dst, $mem\t# short" %}
6934 
6935   ins_encode(aarch64_enc_ldrshw(dst, mem));
6936 
6937   ins_pipe(iload_reg_mem);
6938 %}
6939 
6940 // Load Short (16 bit signed) into long
6941 instruct loadS2L(iRegLNoSp dst, memory2 mem)
6942 %{
6943   match(Set dst (ConvI2L (LoadS mem)));
6944   predicate(!needs_acquiring_load(n->in(1)));
6945 
6946   ins_cost(4 * INSN_COST);
6947   format %{ "ldrsh  $dst, $mem\t# short" %}
6948 
6949   ins_encode(aarch64_enc_ldrsh(dst, mem));
6950 
6951   ins_pipe(iload_reg_mem);
6952 %}
6953 
6954 // Load Char (16 bit unsigned)
6955 instruct loadUS(iRegINoSp dst, memory2 mem)
6956 %{
6957   match(Set dst (LoadUS mem));
6958   predicate(!needs_acquiring_load(n));
6959 
6960   ins_cost(4 * INSN_COST);
6961   format %{ "ldrh  $dst, $mem\t# short" %}
6962 
6963   ins_encode(aarch64_enc_ldrh(dst, mem));
6964 
6965   ins_pipe(iload_reg_mem);
6966 %}
6967 
6968 // Load Short/Char (16 bit unsigned) into long
6969 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
6970 %{
6971   match(Set dst (ConvI2L (LoadUS mem)));
6972   predicate(!needs_acquiring_load(n->in(1)));
6973 
6974   ins_cost(4 * INSN_COST);
6975   format %{ "ldrh  $dst, $mem\t# short" %}
6976 
6977   ins_encode(aarch64_enc_ldrh(dst, mem));
6978 
6979   ins_pipe(iload_reg_mem);
6980 %}
6981 
6982 // Load Integer (32 bit signed)
6983 instruct loadI(iRegINoSp dst, memory4 mem)
6984 %{
6985   match(Set dst (LoadI mem));
6986   predicate(!needs_acquiring_load(n));
6987 
6988   ins_cost(4 * INSN_COST);
6989   format %{ "ldrw  $dst, $mem\t# int" %}
6990 
6991   ins_encode(aarch64_enc_ldrw(dst, mem));
6992 
6993   ins_pipe(iload_reg_mem);
6994 %}
6995 
6996 // Load Integer (32 bit signed) into long
6997 instruct loadI2L(iRegLNoSp dst, memory4 mem)
6998 %{
6999   match(Set dst (ConvI2L (LoadI mem)));
7000   predicate(!needs_acquiring_load(n->in(1)));
7001 
7002   ins_cost(4 * INSN_COST);
7003   format %{ "ldrsw  $dst, $mem\t# int" %}
7004 
7005   ins_encode(aarch64_enc_ldrsw(dst, mem));
7006 
7007   ins_pipe(iload_reg_mem);
7008 %}
7009 
7010 // Load Integer (32 bit unsigned) into long
7011 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
7012 %{
7013   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7014   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7015 
7016   ins_cost(4 * INSN_COST);
7017   format %{ "ldrw  $dst, $mem\t# int" %}
7018 
7019   ins_encode(aarch64_enc_ldrw(dst, mem));
7020 
7021   ins_pipe(iload_reg_mem);
7022 %}
7023 
7024 // Load Long (64 bit signed)
7025 instruct loadL(iRegLNoSp dst, memory8 mem)
7026 %{
7027   match(Set dst (LoadL mem));
7028   predicate(!needs_acquiring_load(n));
7029 
7030   ins_cost(4 * INSN_COST);
7031   format %{ "ldr  $dst, $mem\t# int" %}
7032 
7033   ins_encode(aarch64_enc_ldr(dst, mem));
7034 
7035   ins_pipe(iload_reg_mem);
7036 %}
7037 
7038 // Load Range
7039 instruct loadRange(iRegINoSp dst, memory4 mem)
7040 %{
7041   match(Set dst (LoadRange mem));
7042 
7043   ins_cost(4 * INSN_COST);
7044   format %{ "ldrw  $dst, $mem\t# range" %}
7045 
7046   ins_encode(aarch64_enc_ldrw(dst, mem));
7047 
7048   ins_pipe(iload_reg_mem);
7049 %}
7050 
7051 // Load Pointer
7052 instruct loadP(iRegPNoSp dst, memory8 mem)
7053 %{
7054   match(Set dst (LoadP mem));
7055   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
7056 
7057   ins_cost(4 * INSN_COST);
7058   format %{ "ldr  $dst, $mem\t# ptr" %}
7059 
7060   ins_encode(aarch64_enc_ldr(dst, mem));
7061 
7062   ins_pipe(iload_reg_mem);
7063 %}
7064 
7065 // Load Compressed Pointer
7066 instruct loadN(iRegNNoSp dst, memory4 mem)
7067 %{
7068   match(Set dst (LoadN mem));
7069   predicate(!needs_acquiring_load(n));
7070 
7071   ins_cost(4 * INSN_COST);
7072   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7073 
7074   ins_encode(aarch64_enc_ldrw(dst, mem));
7075 
7076   ins_pipe(iload_reg_mem);
7077 %}
7078 
7079 // Load Klass Pointer
7080 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7081 %{
7082   match(Set dst (LoadKlass mem));
7083   predicate(!needs_acquiring_load(n));
7084 
7085   ins_cost(4 * INSN_COST);
7086   format %{ "ldr  $dst, $mem\t# class" %}
7087 
7088   ins_encode(aarch64_enc_ldr(dst, mem));
7089 
7090   ins_pipe(iload_reg_mem);
7091 %}
7092 
7093 // Load Narrow Klass Pointer
7094 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7095 %{
7096   match(Set dst (LoadNKlass mem));
7097   predicate(!needs_acquiring_load(n));
7098 
7099   ins_cost(4 * INSN_COST);
7100   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7101 
7102   ins_encode(aarch64_enc_ldrw(dst, mem));
7103 
7104   ins_pipe(iload_reg_mem);
7105 %}
7106 
7107 // Load Float
7108 instruct loadF(vRegF dst, memory4 mem)
7109 %{
7110   match(Set dst (LoadF mem));
7111   predicate(!needs_acquiring_load(n));
7112 
7113   ins_cost(4 * INSN_COST);
7114   format %{ "ldrs  $dst, $mem\t# float" %}
7115 
7116   ins_encode( aarch64_enc_ldrs(dst, mem) );
7117 
7118   ins_pipe(pipe_class_memory);
7119 %}
7120 
7121 // Load Double
7122 instruct loadD(vRegD dst, memory8 mem)
7123 %{
7124   match(Set dst (LoadD mem));
7125   predicate(!needs_acquiring_load(n));
7126 
7127   ins_cost(4 * INSN_COST);
7128   format %{ "ldrd  $dst, $mem\t# double" %}
7129 
7130   ins_encode( aarch64_enc_ldrd(dst, mem) );
7131 
7132   ins_pipe(pipe_class_memory);
7133 %}
7134 
7135 
7136 // Load Int Constant
7137 instruct loadConI(iRegINoSp dst, immI src)
7138 %{
7139   match(Set dst src);
7140 
7141   ins_cost(INSN_COST);
7142   format %{ "mov $dst, $src\t# int" %}
7143 
7144   ins_encode( aarch64_enc_movw_imm(dst, src) );
7145 
7146   ins_pipe(ialu_imm);
7147 %}
7148 
7149 // Load Long Constant
7150 instruct loadConL(iRegLNoSp dst, immL src)
7151 %{
7152   match(Set dst src);
7153 
7154   ins_cost(INSN_COST);
7155   format %{ "mov $dst, $src\t# long" %}
7156 
7157   ins_encode( aarch64_enc_mov_imm(dst, src) );
7158 
7159   ins_pipe(ialu_imm);
7160 %}
7161 
7162 // Load Pointer Constant
7163 
7164 instruct loadConP(iRegPNoSp dst, immP con)
7165 %{
7166   match(Set dst con);
7167 
7168   ins_cost(INSN_COST * 4);
7169   format %{
7170     "mov  $dst, $con\t# ptr\n\t"
7171   %}
7172 
7173   ins_encode(aarch64_enc_mov_p(dst, con));
7174 
7175   ins_pipe(ialu_imm);
7176 %}
7177 
7178 // Load Null Pointer Constant
7179 
7180 instruct loadConP0(iRegPNoSp dst, immP0 con)
7181 %{
7182   match(Set dst con);
7183 
7184   ins_cost(INSN_COST);
7185   format %{ "mov  $dst, $con\t# NULL ptr" %}
7186 
7187   ins_encode(aarch64_enc_mov_p0(dst, con));
7188 
7189   ins_pipe(ialu_imm);
7190 %}
7191 
7192 // Load Pointer Constant One
7193 
7194 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7195 %{
7196   match(Set dst con);
7197 
7198   ins_cost(INSN_COST);
7199   format %{ "mov  $dst, $con\t# NULL ptr" %}
7200 
7201   ins_encode(aarch64_enc_mov_p1(dst, con));
7202 
7203   ins_pipe(ialu_imm);
7204 %}
7205 
7206 // Load Byte Map Base Constant
7207 
7208 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7209 %{
7210   match(Set dst con);
7211 
7212   ins_cost(INSN_COST);
7213   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7214 
7215   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7216 
7217   ins_pipe(ialu_imm);
7218 %}
7219 
7220 // Load Narrow Pointer Constant
7221 
7222 instruct loadConN(iRegNNoSp dst, immN con)
7223 %{
7224   match(Set dst con);
7225 
7226   ins_cost(INSN_COST * 4);
7227   format %{ "mov  $dst, $con\t# compressed ptr" %}
7228 
7229   ins_encode(aarch64_enc_mov_n(dst, con));
7230 
7231   ins_pipe(ialu_imm);
7232 %}
7233 
7234 // Load Narrow Null Pointer Constant
7235 
7236 instruct loadConN0(iRegNNoSp dst, immN0 con)
7237 %{
7238   match(Set dst con);
7239 
7240   ins_cost(INSN_COST);
7241   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7242 
7243   ins_encode(aarch64_enc_mov_n0(dst, con));
7244 
7245   ins_pipe(ialu_imm);
7246 %}
7247 
7248 // Load Narrow Klass Constant
7249 
7250 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7251 %{
7252   match(Set dst con);
7253 
7254   ins_cost(INSN_COST);
7255   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7256 
7257   ins_encode(aarch64_enc_mov_nk(dst, con));
7258 
7259   ins_pipe(ialu_imm);
7260 %}
7261 
7262 // Load Packed Float Constant
7263 
7264 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7265   match(Set dst con);
7266   ins_cost(INSN_COST * 4);
7267   format %{ "fmovs  $dst, $con"%}
7268   ins_encode %{
7269     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7270   %}
7271 
7272   ins_pipe(fp_imm_s);
7273 %}
7274 
7275 // Load Float Constant
7276 
7277 instruct loadConF(vRegF dst, immF con) %{
7278   match(Set dst con);
7279 
7280   ins_cost(INSN_COST * 4);
7281 
7282   format %{
7283     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7284   %}
7285 
7286   ins_encode %{
7287     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7288   %}
7289 
7290   ins_pipe(fp_load_constant_s);
7291 %}
7292 
7293 // Load Packed Double Constant
7294 
7295 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7296   match(Set dst con);
7297   ins_cost(INSN_COST);
7298   format %{ "fmovd  $dst, $con"%}
7299   ins_encode %{
7300     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7301   %}
7302 
7303   ins_pipe(fp_imm_d);
7304 %}
7305 
7306 // Load Double Constant
7307 
7308 instruct loadConD(vRegD dst, immD con) %{
7309   match(Set dst con);
7310 
7311   ins_cost(INSN_COST * 5);
7312   format %{
7313     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7314   %}
7315 
7316   ins_encode %{
7317     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7318   %}
7319 
7320   ins_pipe(fp_load_constant_d);
7321 %}
7322 
7323 // Store Instructions
7324 
7325 // Store CMS card-mark Immediate
7326 instruct storeimmCM0(immI0 zero, memory1 mem)
7327 %{
7328   match(Set mem (StoreCM mem zero));
7329 
7330   ins_cost(INSN_COST);
7331   format %{ "storestore (elided)\n\t"
7332             "strb zr, $mem\t# byte" %}
7333 
7334   ins_encode(aarch64_enc_strb0(mem));
7335 
7336   ins_pipe(istore_mem);
7337 %}
7338 
7339 // Store CMS card-mark Immediate with intervening StoreStore
7340 // needed when using CMS with no conditional card marking
7341 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
7342 %{
7343   match(Set mem (StoreCM mem zero));
7344 
7345   ins_cost(INSN_COST * 2);
7346   format %{ "storestore\n\t"
7347             "dmb ishst"
7348             "\n\tstrb zr, $mem\t# byte" %}
7349 
7350   ins_encode(aarch64_enc_strb0_ordered(mem));
7351 
7352   ins_pipe(istore_mem);
7353 %}
7354 
7355 // Store Byte
7356 instruct storeB(iRegIorL2I src, memory1 mem)
7357 %{
7358   match(Set mem (StoreB mem src));
7359   predicate(!needs_releasing_store(n));
7360 
7361   ins_cost(INSN_COST);
7362   format %{ "strb  $src, $mem\t# byte" %}
7363 
7364   ins_encode(aarch64_enc_strb(src, mem));
7365 
7366   ins_pipe(istore_reg_mem);
7367 %}
7368 
7369 
7370 instruct storeimmB0(immI0 zero, memory1 mem)
7371 %{
7372   match(Set mem (StoreB mem zero));
7373   predicate(!needs_releasing_store(n));
7374 
7375   ins_cost(INSN_COST);
7376   format %{ "strb rscractch2, $mem\t# byte" %}
7377 
7378   ins_encode(aarch64_enc_strb0(mem));
7379 
7380   ins_pipe(istore_mem);
7381 %}
7382 
7383 // Store Char/Short
7384 instruct storeC(iRegIorL2I src, memory2 mem)
7385 %{
7386   match(Set mem (StoreC mem src));
7387   predicate(!needs_releasing_store(n));
7388 
7389   ins_cost(INSN_COST);
7390   format %{ "strh  $src, $mem\t# short" %}
7391 
7392   ins_encode(aarch64_enc_strh(src, mem));
7393 
7394   ins_pipe(istore_reg_mem);
7395 %}
7396 
7397 instruct storeimmC0(immI0 zero, memory2 mem)
7398 %{
7399   match(Set mem (StoreC mem zero));
7400   predicate(!needs_releasing_store(n));
7401 
7402   ins_cost(INSN_COST);
7403   format %{ "strh  zr, $mem\t# short" %}
7404 
7405   ins_encode(aarch64_enc_strh0(mem));
7406 
7407   ins_pipe(istore_mem);
7408 %}
7409 
7410 // Store Integer
7411 
7412 instruct storeI(iRegIorL2I src, memory4 mem)
7413 %{
7414   match(Set mem(StoreI mem src));
7415   predicate(!needs_releasing_store(n));
7416 
7417   ins_cost(INSN_COST);
7418   format %{ "strw  $src, $mem\t# int" %}
7419 
7420   ins_encode(aarch64_enc_strw(src, mem));
7421 
7422   ins_pipe(istore_reg_mem);
7423 %}
7424 
7425 instruct storeimmI0(immI0 zero, memory4 mem)
7426 %{
7427   match(Set mem(StoreI mem zero));
7428   predicate(!needs_releasing_store(n));
7429 
7430   ins_cost(INSN_COST);
7431   format %{ "strw  zr, $mem\t# int" %}
7432 
7433   ins_encode(aarch64_enc_strw0(mem));
7434 
7435   ins_pipe(istore_mem);
7436 %}
7437 
7438 // Store Long (64 bit signed)
7439 instruct storeL(iRegL src, memory8 mem)
7440 %{
7441   match(Set mem (StoreL mem src));
7442   predicate(!needs_releasing_store(n));
7443 
7444   ins_cost(INSN_COST);
7445   format %{ "str  $src, $mem\t# int" %}
7446 
7447   ins_encode(aarch64_enc_str(src, mem));
7448 
7449   ins_pipe(istore_reg_mem);
7450 %}
7451 
7452 // Store Long (64 bit signed)
7453 instruct storeimmL0(immL0 zero, memory8 mem)
7454 %{
7455   match(Set mem (StoreL mem zero));
7456   predicate(!needs_releasing_store(n));
7457 
7458   ins_cost(INSN_COST);
7459   format %{ "str  zr, $mem\t# int" %}
7460 
7461   ins_encode(aarch64_enc_str0(mem));
7462 
7463   ins_pipe(istore_mem);
7464 %}
7465 
7466 // Store Pointer
7467 instruct storeP(iRegP src, memory8 mem)
7468 %{
7469   match(Set mem (StoreP mem src));
7470   predicate(!needs_releasing_store(n));
7471 
7472   ins_cost(INSN_COST);
7473   format %{ "str  $src, $mem\t# ptr" %}
7474 
7475   ins_encode(aarch64_enc_str(src, mem));
7476 
7477   ins_pipe(istore_reg_mem);
7478 %}
7479 
7480 // Store Pointer
7481 instruct storeimmP0(immP0 zero, memory8 mem)
7482 %{
7483   match(Set mem (StoreP mem zero));
7484   predicate(!needs_releasing_store(n));
7485 
7486   ins_cost(INSN_COST);
7487   format %{ "str zr, $mem\t# ptr" %}
7488 
7489   ins_encode(aarch64_enc_str0(mem));
7490 
7491   ins_pipe(istore_mem);
7492 %}
7493 
7494 // Store Compressed Pointer
7495 instruct storeN(iRegN src, memory4 mem)
7496 %{
7497   match(Set mem (StoreN mem src));
7498   predicate(!needs_releasing_store(n));
7499 
7500   ins_cost(INSN_COST);
7501   format %{ "strw  $src, $mem\t# compressed ptr" %}
7502 
7503   ins_encode(aarch64_enc_strw(src, mem));
7504 
7505   ins_pipe(istore_reg_mem);
7506 %}
7507 
7508 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory4 mem)
7509 %{
7510   match(Set mem (StoreN mem zero));
7511   predicate(CompressedOops::base() == NULL &&
7512             CompressedKlassPointers::base() == NULL &&
7513             (!needs_releasing_store(n)));
7514 
7515   ins_cost(INSN_COST);
7516   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7517 
7518   ins_encode(aarch64_enc_strw(heapbase, mem));
7519 
7520   ins_pipe(istore_reg_mem);
7521 %}
7522 
7523 // Store Float
7524 instruct storeF(vRegF src, memory4 mem)
7525 %{
7526   match(Set mem (StoreF mem src));
7527   predicate(!needs_releasing_store(n));
7528 
7529   ins_cost(INSN_COST);
7530   format %{ "strs  $src, $mem\t# float" %}
7531 
7532   ins_encode( aarch64_enc_strs(src, mem) );
7533 
7534   ins_pipe(pipe_class_memory);
7535 %}
7536 
7537 // TODO
7538 // implement storeImmF0 and storeFImmPacked
7539 
7540 // Store Double
7541 instruct storeD(vRegD src, memory8 mem)
7542 %{
7543   match(Set mem (StoreD mem src));
7544   predicate(!needs_releasing_store(n));
7545 
7546   ins_cost(INSN_COST);
7547   format %{ "strd  $src, $mem\t# double" %}
7548 
7549   ins_encode( aarch64_enc_strd(src, mem) );
7550 
7551   ins_pipe(pipe_class_memory);
7552 %}
7553 
7554 // Store Compressed Klass Pointer
7555 instruct storeNKlass(iRegN src, memory4 mem)
7556 %{
7557   predicate(!needs_releasing_store(n));
7558   match(Set mem (StoreNKlass mem src));
7559 
7560   ins_cost(INSN_COST);
7561   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7562 
7563   ins_encode(aarch64_enc_strw(src, mem));
7564 
7565   ins_pipe(istore_reg_mem);
7566 %}
7567 
7568 // TODO
7569 // implement storeImmD0 and storeDImmPacked
7570 
7571 // prefetch instructions
7572 // Must be safe to execute with invalid address (cannot fault).
7573 
7574 instruct prefetchalloc( memory8 mem ) %{
7575   match(PrefetchAllocation mem);
7576 
7577   ins_cost(INSN_COST);
7578   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7579 
7580   ins_encode( aarch64_enc_prefetchw(mem) );
7581 
7582   ins_pipe(iload_prefetch);
7583 %}
7584 
7585 //  ---------------- volatile loads and stores ----------------
7586 
7587 // Load Byte (8 bit signed)
7588 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7589 %{
7590   match(Set dst (LoadB mem));
7591 
7592   ins_cost(VOLATILE_REF_COST);
7593   format %{ "ldarsb  $dst, $mem\t# byte" %}
7594 
7595   ins_encode(aarch64_enc_ldarsb(dst, mem));
7596 
7597   ins_pipe(pipe_serial);
7598 %}
7599 
7600 // Load Byte (8 bit signed) into long
7601 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7602 %{
7603   match(Set dst (ConvI2L (LoadB mem)));
7604 
7605   ins_cost(VOLATILE_REF_COST);
7606   format %{ "ldarsb  $dst, $mem\t# byte" %}
7607 
7608   ins_encode(aarch64_enc_ldarsb(dst, mem));
7609 
7610   ins_pipe(pipe_serial);
7611 %}
7612 
7613 // Load Byte (8 bit unsigned)
7614 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7615 %{
7616   match(Set dst (LoadUB mem));
7617 
7618   ins_cost(VOLATILE_REF_COST);
7619   format %{ "ldarb  $dst, $mem\t# byte" %}
7620 
7621   ins_encode(aarch64_enc_ldarb(dst, mem));
7622 
7623   ins_pipe(pipe_serial);
7624 %}
7625 
7626 // Load Byte (8 bit unsigned) into long
7627 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7628 %{
7629   match(Set dst (ConvI2L (LoadUB mem)));
7630 
7631   ins_cost(VOLATILE_REF_COST);
7632   format %{ "ldarb  $dst, $mem\t# byte" %}
7633 
7634   ins_encode(aarch64_enc_ldarb(dst, mem));
7635 
7636   ins_pipe(pipe_serial);
7637 %}
7638 
7639 // Load Short (16 bit signed)
7640 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7641 %{
7642   match(Set dst (LoadS mem));
7643 
7644   ins_cost(VOLATILE_REF_COST);
7645   format %{ "ldarshw  $dst, $mem\t# short" %}
7646 
7647   ins_encode(aarch64_enc_ldarshw(dst, mem));
7648 
7649   ins_pipe(pipe_serial);
7650 %}
7651 
7652 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7653 %{
7654   match(Set dst (LoadUS mem));
7655 
7656   ins_cost(VOLATILE_REF_COST);
7657   format %{ "ldarhw  $dst, $mem\t# short" %}
7658 
7659   ins_encode(aarch64_enc_ldarhw(dst, mem));
7660 
7661   ins_pipe(pipe_serial);
7662 %}
7663 
7664 // Load Short/Char (16 bit unsigned) into long
7665 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7666 %{
7667   match(Set dst (ConvI2L (LoadUS mem)));
7668 
7669   ins_cost(VOLATILE_REF_COST);
7670   format %{ "ldarh  $dst, $mem\t# short" %}
7671 
7672   ins_encode(aarch64_enc_ldarh(dst, mem));
7673 
7674   ins_pipe(pipe_serial);
7675 %}
7676 
7677 // Load Short/Char (16 bit signed) into long
7678 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7679 %{
7680   match(Set dst (ConvI2L (LoadS mem)));
7681 
7682   ins_cost(VOLATILE_REF_COST);
7683   format %{ "ldarh  $dst, $mem\t# short" %}
7684 
7685   ins_encode(aarch64_enc_ldarsh(dst, mem));
7686 
7687   ins_pipe(pipe_serial);
7688 %}
7689 
7690 // Load Integer (32 bit signed)
7691 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7692 %{
7693   match(Set dst (LoadI mem));
7694 
7695   ins_cost(VOLATILE_REF_COST);
7696   format %{ "ldarw  $dst, $mem\t# int" %}
7697 
7698   ins_encode(aarch64_enc_ldarw(dst, mem));
7699 
7700   ins_pipe(pipe_serial);
7701 %}
7702 
7703 // Load Integer (32 bit unsigned) into long
7704 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7705 %{
7706   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7707 
7708   ins_cost(VOLATILE_REF_COST);
7709   format %{ "ldarw  $dst, $mem\t# int" %}
7710 
7711   ins_encode(aarch64_enc_ldarw(dst, mem));
7712 
7713   ins_pipe(pipe_serial);
7714 %}
7715 
7716 // Load Long (64 bit signed)
7717 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7718 %{
7719   match(Set dst (LoadL mem));
7720 
7721   ins_cost(VOLATILE_REF_COST);
7722   format %{ "ldar  $dst, $mem\t# int" %}
7723 
7724   ins_encode(aarch64_enc_ldar(dst, mem));
7725 
7726   ins_pipe(pipe_serial);
7727 %}
7728 
7729 // Load Pointer
7730 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7731 %{
7732   match(Set dst (LoadP mem));
7733   predicate(n->as_Load()->barrier_data() == 0);
7734 
7735   ins_cost(VOLATILE_REF_COST);
7736   format %{ "ldar  $dst, $mem\t# ptr" %}
7737 
7738   ins_encode(aarch64_enc_ldar(dst, mem));
7739 
7740   ins_pipe(pipe_serial);
7741 %}
7742 
7743 // Load Compressed Pointer
7744 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7745 %{
7746   match(Set dst (LoadN mem));
7747 
7748   ins_cost(VOLATILE_REF_COST);
7749   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7750 
7751   ins_encode(aarch64_enc_ldarw(dst, mem));
7752 
7753   ins_pipe(pipe_serial);
7754 %}
7755 
7756 // Load Float
7757 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7758 %{
7759   match(Set dst (LoadF mem));
7760 
7761   ins_cost(VOLATILE_REF_COST);
7762   format %{ "ldars  $dst, $mem\t# float" %}
7763 
7764   ins_encode( aarch64_enc_fldars(dst, mem) );
7765 
7766   ins_pipe(pipe_serial);
7767 %}
7768 
7769 // Load Double
7770 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7771 %{
7772   match(Set dst (LoadD mem));
7773 
7774   ins_cost(VOLATILE_REF_COST);
7775   format %{ "ldard  $dst, $mem\t# double" %}
7776 
7777   ins_encode( aarch64_enc_fldard(dst, mem) );
7778 
7779   ins_pipe(pipe_serial);
7780 %}
7781 
7782 // Store Byte
7783 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7784 %{
7785   match(Set mem (StoreB mem src));
7786 
7787   ins_cost(VOLATILE_REF_COST);
7788   format %{ "stlrb  $src, $mem\t# byte" %}
7789 
7790   ins_encode(aarch64_enc_stlrb(src, mem));
7791 
7792   ins_pipe(pipe_class_memory);
7793 %}
7794 
7795 // Store Char/Short
7796 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7797 %{
7798   match(Set mem (StoreC mem src));
7799 
7800   ins_cost(VOLATILE_REF_COST);
7801   format %{ "stlrh  $src, $mem\t# short" %}
7802 
7803   ins_encode(aarch64_enc_stlrh(src, mem));
7804 
7805   ins_pipe(pipe_class_memory);
7806 %}
7807 
7808 // Store Integer
7809 
7810 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7811 %{
7812   match(Set mem(StoreI mem src));
7813 
7814   ins_cost(VOLATILE_REF_COST);
7815   format %{ "stlrw  $src, $mem\t# int" %}
7816 
7817   ins_encode(aarch64_enc_stlrw(src, mem));
7818 
7819   ins_pipe(pipe_class_memory);
7820 %}
7821 
7822 // Store Long (64 bit signed)
7823 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7824 %{
7825   match(Set mem (StoreL mem src));
7826 
7827   ins_cost(VOLATILE_REF_COST);
7828   format %{ "stlr  $src, $mem\t# int" %}
7829 
7830   ins_encode(aarch64_enc_stlr(src, mem));
7831 
7832   ins_pipe(pipe_class_memory);
7833 %}
7834 
7835 // Store Pointer
7836 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7837 %{
7838   match(Set mem (StoreP mem src));
7839 
7840   ins_cost(VOLATILE_REF_COST);
7841   format %{ "stlr  $src, $mem\t# ptr" %}
7842 
7843   ins_encode(aarch64_enc_stlr(src, mem));
7844 
7845   ins_pipe(pipe_class_memory);
7846 %}
7847 
7848 // Store Compressed Pointer
7849 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7850 %{
7851   match(Set mem (StoreN mem src));
7852 
7853   ins_cost(VOLATILE_REF_COST);
7854   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7855 
7856   ins_encode(aarch64_enc_stlrw(src, mem));
7857 
7858   ins_pipe(pipe_class_memory);
7859 %}
7860 
7861 // Store Float
7862 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7863 %{
7864   match(Set mem (StoreF mem src));
7865 
7866   ins_cost(VOLATILE_REF_COST);
7867   format %{ "stlrs  $src, $mem\t# float" %}
7868 
7869   ins_encode( aarch64_enc_fstlrs(src, mem) );
7870 
7871   ins_pipe(pipe_class_memory);
7872 %}
7873 
7874 // TODO
7875 // implement storeImmF0 and storeFImmPacked
7876 
7877 // Store Double
7878 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7879 %{
7880   match(Set mem (StoreD mem src));
7881 
7882   ins_cost(VOLATILE_REF_COST);
7883   format %{ "stlrd  $src, $mem\t# double" %}
7884 
7885   ins_encode( aarch64_enc_fstlrd(src, mem) );
7886 
7887   ins_pipe(pipe_class_memory);
7888 %}
7889 
7890 //  ---------------- end of volatile loads and stores ----------------
7891 
7892 instruct cacheWB(indirect addr)
7893 %{
7894   predicate(VM_Version::supports_data_cache_line_flush());
7895   match(CacheWB addr);
7896 
7897   ins_cost(100);
7898   format %{"cache wb $addr" %}
7899   ins_encode %{
7900     assert($addr->index_position() < 0, "should be");
7901     assert($addr$$disp == 0, "should be");
7902     __ cache_wb(Address($addr$$base$$Register, 0));
7903   %}
7904   ins_pipe(pipe_slow); // XXX
7905 %}
7906 
7907 instruct cacheWBPreSync()
7908 %{
7909   predicate(VM_Version::supports_data_cache_line_flush());
7910   match(CacheWBPreSync);
7911 
7912   ins_cost(100);
7913   format %{"cache wb presync" %}
7914   ins_encode %{
7915     __ cache_wbsync(true);
7916   %}
7917   ins_pipe(pipe_slow); // XXX
7918 %}
7919 
7920 instruct cacheWBPostSync()
7921 %{
7922   predicate(VM_Version::supports_data_cache_line_flush());
7923   match(CacheWBPostSync);
7924 
7925   ins_cost(100);
7926   format %{"cache wb postsync" %}
7927   ins_encode %{
7928     __ cache_wbsync(false);
7929   %}
7930   ins_pipe(pipe_slow); // XXX
7931 %}
7932 
7933 // ============================================================================
7934 // BSWAP Instructions
7935 
7936 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7937   match(Set dst (ReverseBytesI src));
7938 
7939   ins_cost(INSN_COST);
7940   format %{ "revw  $dst, $src" %}
7941 
7942   ins_encode %{
7943     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7944   %}
7945 
7946   ins_pipe(ialu_reg);
7947 %}
7948 
7949 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7950   match(Set dst (ReverseBytesL src));
7951 
7952   ins_cost(INSN_COST);
7953   format %{ "rev  $dst, $src" %}
7954 
7955   ins_encode %{
7956     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7957   %}
7958 
7959   ins_pipe(ialu_reg);
7960 %}
7961 
7962 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7963   match(Set dst (ReverseBytesUS src));
7964 
7965   ins_cost(INSN_COST);
7966   format %{ "rev16w  $dst, $src" %}
7967 
7968   ins_encode %{
7969     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7970   %}
7971 
7972   ins_pipe(ialu_reg);
7973 %}
7974 
7975 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7976   match(Set dst (ReverseBytesS src));
7977 
7978   ins_cost(INSN_COST);
7979   format %{ "rev16w  $dst, $src\n\t"
7980             "sbfmw $dst, $dst, #0, #15" %}
7981 
7982   ins_encode %{
7983     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7984     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7985   %}
7986 
7987   ins_pipe(ialu_reg);
7988 %}
7989 
7990 // ============================================================================
7991 // Zero Count Instructions
7992 
7993 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7994   match(Set dst (CountLeadingZerosI src));
7995 
7996   ins_cost(INSN_COST);
7997   format %{ "clzw  $dst, $src" %}
7998   ins_encode %{
7999     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8000   %}
8001 
8002   ins_pipe(ialu_reg);
8003 %}
8004 
8005 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8006   match(Set dst (CountLeadingZerosL src));
8007 
8008   ins_cost(INSN_COST);
8009   format %{ "clz   $dst, $src" %}
8010   ins_encode %{
8011     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8012   %}
8013 
8014   ins_pipe(ialu_reg);
8015 %}
8016 
8017 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8018   match(Set dst (CountTrailingZerosI src));
8019 
8020   ins_cost(INSN_COST * 2);
8021   format %{ "rbitw  $dst, $src\n\t"
8022             "clzw   $dst, $dst" %}
8023   ins_encode %{
8024     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8025     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8026   %}
8027 
8028   ins_pipe(ialu_reg);
8029 %}
8030 
8031 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8032   match(Set dst (CountTrailingZerosL src));
8033 
8034   ins_cost(INSN_COST * 2);
8035   format %{ "rbit   $dst, $src\n\t"
8036             "clz    $dst, $dst" %}
8037   ins_encode %{
8038     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8039     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8040   %}
8041 
8042   ins_pipe(ialu_reg);
8043 %}
8044 
8045 //---------- Population Count Instructions -------------------------------------
8046 //
8047 
8048 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8049   predicate(UsePopCountInstruction);
8050   match(Set dst (PopCountI src));
8051   effect(TEMP tmp);
8052   ins_cost(INSN_COST * 13);
8053 
8054   format %{ "movw   $src, $src\n\t"
8055             "mov    $tmp, $src\t# vector (1D)\n\t"
8056             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8057             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8058             "mov    $dst, $tmp\t# vector (1D)" %}
8059   ins_encode %{
8060     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8061     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8062     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8063     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8064     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8065   %}
8066 
8067   ins_pipe(pipe_class_default);
8068 %}
8069 
8070 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
8071   predicate(UsePopCountInstruction);
8072   match(Set dst (PopCountI (LoadI mem)));
8073   effect(TEMP tmp);
8074   ins_cost(INSN_COST * 13);
8075 
8076   format %{ "ldrs   $tmp, $mem\n\t"
8077             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8078             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8079             "mov    $dst, $tmp\t# vector (1D)" %}
8080   ins_encode %{
8081     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8082     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8083               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
8084     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8085     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8086     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8087   %}
8088 
8089   ins_pipe(pipe_class_default);
8090 %}
8091 
8092 // Note: Long.bitCount(long) returns an int.
8093 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8094   predicate(UsePopCountInstruction);
8095   match(Set dst (PopCountL src));
8096   effect(TEMP tmp);
8097   ins_cost(INSN_COST * 13);
8098 
8099   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8100             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8101             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8102             "mov    $dst, $tmp\t# vector (1D)" %}
8103   ins_encode %{
8104     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8105     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8106     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8107     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8108   %}
8109 
8110   ins_pipe(pipe_class_default);
8111 %}
8112 
8113 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
8114   predicate(UsePopCountInstruction);
8115   match(Set dst (PopCountL (LoadL mem)));
8116   effect(TEMP tmp);
8117   ins_cost(INSN_COST * 13);
8118 
8119   format %{ "ldrd   $tmp, $mem\n\t"
8120             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8121             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8122             "mov    $dst, $tmp\t# vector (1D)" %}
8123   ins_encode %{
8124     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8125     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8126               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
8127     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8128     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8129     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8130   %}
8131 
8132   ins_pipe(pipe_class_default);
8133 %}
8134 
8135 // ============================================================================
8136 // MemBar Instruction
8137 
8138 instruct load_fence() %{
8139   match(LoadFence);
8140   ins_cost(VOLATILE_REF_COST);
8141 
8142   format %{ "load_fence" %}
8143 
8144   ins_encode %{
8145     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8146   %}
8147   ins_pipe(pipe_serial);
8148 %}
8149 
8150 instruct unnecessary_membar_acquire() %{
8151   predicate(unnecessary_acquire(n));
8152   match(MemBarAcquire);
8153   ins_cost(0);
8154 
8155   format %{ "membar_acquire (elided)" %}
8156 
8157   ins_encode %{
8158     __ block_comment("membar_acquire (elided)");
8159   %}
8160 
8161   ins_pipe(pipe_class_empty);
8162 %}
8163 
8164 instruct membar_acquire() %{
8165   match(MemBarAcquire);
8166   ins_cost(VOLATILE_REF_COST);
8167 
8168   format %{ "membar_acquire\n\t"
8169             "dmb ish" %}
8170 
8171   ins_encode %{
8172     __ block_comment("membar_acquire");
8173     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8174   %}
8175 
8176   ins_pipe(pipe_serial);
8177 %}
8178 
8179 
8180 instruct membar_acquire_lock() %{
8181   match(MemBarAcquireLock);
8182   ins_cost(VOLATILE_REF_COST);
8183 
8184   format %{ "membar_acquire_lock (elided)" %}
8185 
8186   ins_encode %{
8187     __ block_comment("membar_acquire_lock (elided)");
8188   %}
8189 
8190   ins_pipe(pipe_serial);
8191 %}
8192 
8193 instruct store_fence() %{
8194   match(StoreFence);
8195   ins_cost(VOLATILE_REF_COST);
8196 
8197   format %{ "store_fence" %}
8198 
8199   ins_encode %{
8200     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8201   %}
8202   ins_pipe(pipe_serial);
8203 %}
8204 
8205 instruct unnecessary_membar_release() %{
8206   predicate(unnecessary_release(n));
8207   match(MemBarRelease);
8208   ins_cost(0);
8209 
8210   format %{ "membar_release (elided)" %}
8211 
8212   ins_encode %{
8213     __ block_comment("membar_release (elided)");
8214   %}
8215   ins_pipe(pipe_serial);
8216 %}
8217 
8218 instruct membar_release() %{
8219   match(MemBarRelease);
8220   ins_cost(VOLATILE_REF_COST);
8221 
8222   format %{ "membar_release\n\t"
8223             "dmb ish" %}
8224 
8225   ins_encode %{
8226     __ block_comment("membar_release");
8227     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8228   %}
8229   ins_pipe(pipe_serial);
8230 %}
8231 
8232 instruct membar_storestore() %{
8233   match(MemBarStoreStore);
8234   ins_cost(VOLATILE_REF_COST);
8235 
8236   format %{ "MEMBAR-store-store" %}
8237 
8238   ins_encode %{
8239     __ membar(Assembler::StoreStore);
8240   %}
8241   ins_pipe(pipe_serial);
8242 %}
8243 
8244 instruct membar_release_lock() %{
8245   match(MemBarReleaseLock);
8246   ins_cost(VOLATILE_REF_COST);
8247 
8248   format %{ "membar_release_lock (elided)" %}
8249 
8250   ins_encode %{
8251     __ block_comment("membar_release_lock (elided)");
8252   %}
8253 
8254   ins_pipe(pipe_serial);
8255 %}
8256 
8257 instruct unnecessary_membar_volatile() %{
8258   predicate(unnecessary_volatile(n));
8259   match(MemBarVolatile);
8260   ins_cost(0);
8261 
8262   format %{ "membar_volatile (elided)" %}
8263 
8264   ins_encode %{
8265     __ block_comment("membar_volatile (elided)");
8266   %}
8267 
8268   ins_pipe(pipe_serial);
8269 %}
8270 
8271 instruct membar_volatile() %{
8272   match(MemBarVolatile);
8273   ins_cost(VOLATILE_REF_COST*100);
8274 
8275   format %{ "membar_volatile\n\t"
8276              "dmb ish"%}
8277 
8278   ins_encode %{
8279     __ block_comment("membar_volatile");
8280     __ membar(Assembler::StoreLoad);
8281   %}
8282 
8283   ins_pipe(pipe_serial);
8284 %}
8285 
8286 // ============================================================================
8287 // Cast/Convert Instructions
8288 
8289 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8290   match(Set dst (CastX2P src));
8291 
8292   ins_cost(INSN_COST);
8293   format %{ "mov $dst, $src\t# long -> ptr" %}
8294 
8295   ins_encode %{
8296     if ($dst$$reg != $src$$reg) {
8297       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8298     }
8299   %}
8300 
8301   ins_pipe(ialu_reg);
8302 %}
8303 
8304 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8305   match(Set dst (CastP2X src));
8306 
8307   ins_cost(INSN_COST);
8308   format %{ "mov $dst, $src\t# ptr -> long" %}
8309 
8310   ins_encode %{
8311     if ($dst$$reg != $src$$reg) {
8312       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8313     }
8314   %}
8315 
8316   ins_pipe(ialu_reg);
8317 %}
8318 
8319 // Convert oop into int for vectors alignment masking
8320 instruct convP2I(iRegINoSp dst, iRegP src) %{
8321   match(Set dst (ConvL2I (CastP2X src)));
8322 
8323   ins_cost(INSN_COST);
8324   format %{ "movw $dst, $src\t# ptr -> int" %}
8325   ins_encode %{
8326     __ movw($dst$$Register, $src$$Register);
8327   %}
8328 
8329   ins_pipe(ialu_reg);
8330 %}
8331 
8332 // Convert compressed oop into int for vectors alignment masking
8333 // in case of 32bit oops (heap < 4Gb).
8334 instruct convN2I(iRegINoSp dst, iRegN src)
8335 %{
8336   predicate(CompressedOops::shift() == 0);
8337   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8338 
8339   ins_cost(INSN_COST);
8340   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8341   ins_encode %{
8342     __ movw($dst$$Register, $src$$Register);
8343   %}
8344 
8345   ins_pipe(ialu_reg);
8346 %}
8347 
8348 
8349 // Convert oop pointer into compressed form
8350 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8351   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8352   match(Set dst (EncodeP src));
8353   effect(KILL cr);
8354   ins_cost(INSN_COST * 3);
8355   format %{ "encode_heap_oop $dst, $src" %}
8356   ins_encode %{
8357     Register s = $src$$Register;
8358     Register d = $dst$$Register;
8359     __ encode_heap_oop(d, s);
8360   %}
8361   ins_pipe(ialu_reg);
8362 %}
8363 
8364 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8365   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8366   match(Set dst (EncodeP src));
8367   ins_cost(INSN_COST * 3);
8368   format %{ "encode_heap_oop_not_null $dst, $src" %}
8369   ins_encode %{
8370     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8371   %}
8372   ins_pipe(ialu_reg);
8373 %}
8374 
8375 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8376   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8377             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8378   match(Set dst (DecodeN src));
8379   ins_cost(INSN_COST * 3);
8380   format %{ "decode_heap_oop $dst, $src" %}
8381   ins_encode %{
8382     Register s = $src$$Register;
8383     Register d = $dst$$Register;
8384     __ decode_heap_oop(d, s);
8385   %}
8386   ins_pipe(ialu_reg);
8387 %}
8388 
8389 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8390   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8391             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8392   match(Set dst (DecodeN src));
8393   ins_cost(INSN_COST * 3);
8394   format %{ "decode_heap_oop_not_null $dst, $src" %}
8395   ins_encode %{
8396     Register s = $src$$Register;
8397     Register d = $dst$$Register;
8398     __ decode_heap_oop_not_null(d, s);
8399   %}
8400   ins_pipe(ialu_reg);
8401 %}
8402 
8403 // n.b. AArch64 implementations of encode_klass_not_null and
8404 // decode_klass_not_null do not modify the flags register so, unlike
8405 // Intel, we don't kill CR as a side effect here
8406 
8407 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8408   match(Set dst (EncodePKlass src));
8409 
8410   ins_cost(INSN_COST * 3);
8411   format %{ "encode_klass_not_null $dst,$src" %}
8412 
8413   ins_encode %{
8414     Register src_reg = as_Register($src$$reg);
8415     Register dst_reg = as_Register($dst$$reg);
8416     __ encode_klass_not_null(dst_reg, src_reg);
8417   %}
8418 
8419    ins_pipe(ialu_reg);
8420 %}
8421 
8422 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8423   match(Set dst (DecodeNKlass src));
8424 
8425   ins_cost(INSN_COST * 3);
8426   format %{ "decode_klass_not_null $dst,$src" %}
8427 
8428   ins_encode %{
8429     Register src_reg = as_Register($src$$reg);
8430     Register dst_reg = as_Register($dst$$reg);
8431     if (dst_reg != src_reg) {
8432       __ decode_klass_not_null(dst_reg, src_reg);
8433     } else {
8434       __ decode_klass_not_null(dst_reg);
8435     }
8436   %}
8437 
8438    ins_pipe(ialu_reg);
8439 %}
8440 
8441 instruct checkCastPP(iRegPNoSp dst)
8442 %{
8443   match(Set dst (CheckCastPP dst));
8444 
8445   size(0);
8446   format %{ "# checkcastPP of $dst" %}
8447   ins_encode(/* empty encoding */);
8448   ins_pipe(pipe_class_empty);
8449 %}
8450 
8451 instruct castPP(iRegPNoSp dst)
8452 %{
8453   match(Set dst (CastPP dst));
8454 
8455   size(0);
8456   format %{ "# castPP of $dst" %}
8457   ins_encode(/* empty encoding */);
8458   ins_pipe(pipe_class_empty);
8459 %}
8460 
8461 instruct castII(iRegI dst)
8462 %{
8463   match(Set dst (CastII dst));
8464 
8465   size(0);
8466   format %{ "# castII of $dst" %}
8467   ins_encode(/* empty encoding */);
8468   ins_cost(0);
8469   ins_pipe(pipe_class_empty);
8470 %}
8471 
8472 instruct castLL(iRegL dst)
8473 %{
8474   match(Set dst (CastLL dst));
8475 
8476   size(0);
8477   format %{ "# castLL of $dst" %}
8478   ins_encode(/* empty encoding */);
8479   ins_cost(0);
8480   ins_pipe(pipe_class_empty);
8481 %}
8482 
8483 // ============================================================================
8484 // Atomic operation instructions
8485 //
8486 // Intel and SPARC both implement Ideal Node LoadPLocked and
8487 // Store{PIL}Conditional instructions using a normal load for the
8488 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8489 //
8490 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8491 // pair to lock object allocations from Eden space when not using
8492 // TLABs.
8493 //
8494 // There does not appear to be a Load{IL}Locked Ideal Node and the
8495 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8496 // and to use StoreIConditional only for 32-bit and StoreLConditional
8497 // only for 64-bit.
8498 //
8499 // We implement LoadPLocked and StorePLocked instructions using,
8500 // respectively the AArch64 hw load-exclusive and store-conditional
8501 // instructions. Whereas we must implement each of
8502 // Store{IL}Conditional using a CAS which employs a pair of
8503 // instructions comprising a load-exclusive followed by a
8504 // store-conditional.
8505 
8506 
8507 // Locked-load (linked load) of the current heap-top
8508 // used when updating the eden heap top
8509 // implemented using ldaxr on AArch64
8510 
8511 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8512 %{
8513   match(Set dst (LoadPLocked mem));
8514 
8515   ins_cost(VOLATILE_REF_COST);
8516 
8517   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8518 
8519   ins_encode(aarch64_enc_ldaxr(dst, mem));
8520 
8521   ins_pipe(pipe_serial);
8522 %}
8523 
8524 // Conditional-store of the updated heap-top.
8525 // Used during allocation of the shared heap.
8526 // Sets flag (EQ) on success.
8527 // implemented using stlxr on AArch64.
8528 
8529 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8530 %{
8531   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8532 
8533   ins_cost(VOLATILE_REF_COST);
8534 
8535  // TODO
8536  // do we need to do a store-conditional release or can we just use a
8537  // plain store-conditional?
8538 
8539   format %{
8540     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8541     "cmpw rscratch1, zr\t# EQ on successful write"
8542   %}
8543 
8544   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8545 
8546   ins_pipe(pipe_serial);
8547 %}
8548 
8549 
8550 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8551 // when attempting to rebias a lock towards the current thread.  We
8552 // must use the acquire form of cmpxchg in order to guarantee acquire
8553 // semantics in this case.
8554 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8555 %{
8556   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8557 
8558   ins_cost(VOLATILE_REF_COST);
8559 
8560   format %{
8561     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8562     "cmpw rscratch1, zr\t# EQ on successful write"
8563   %}
8564 
8565   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8566 
8567   ins_pipe(pipe_slow);
8568 %}
8569 
8570 // storeIConditional also has acquire semantics, for no better reason
8571 // than matching storeLConditional.  At the time of writing this
8572 // comment storeIConditional was not used anywhere by AArch64.
8573 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8574 %{
8575   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8576 
8577   ins_cost(VOLATILE_REF_COST);
8578 
8579   format %{
8580     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8581     "cmpw rscratch1, zr\t# EQ on successful write"
8582   %}
8583 
8584   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8585 
8586   ins_pipe(pipe_slow);
8587 %}
8588 
8589 // standard CompareAndSwapX when we are using barriers
8590 // these have higher priority than the rules selected by a predicate
8591 
8592 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8593 // can't match them
8594 
8595 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8596 
8597   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8598   ins_cost(2 * VOLATILE_REF_COST);
8599 
8600   effect(KILL cr);
8601 
8602   format %{
8603     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8604     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8605   %}
8606 
8607   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8608             aarch64_enc_cset_eq(res));
8609 
8610   ins_pipe(pipe_slow);
8611 %}
8612 
8613 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8614 
8615   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8616   ins_cost(2 * VOLATILE_REF_COST);
8617 
8618   effect(KILL cr);
8619 
8620   format %{
8621     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8622     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8623   %}
8624 
8625   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8626             aarch64_enc_cset_eq(res));
8627 
8628   ins_pipe(pipe_slow);
8629 %}
8630 
8631 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8632 
8633   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8634   ins_cost(2 * VOLATILE_REF_COST);
8635 
8636   effect(KILL cr);
8637 
8638  format %{
8639     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8640     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8641  %}
8642 
8643  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8644             aarch64_enc_cset_eq(res));
8645 
8646   ins_pipe(pipe_slow);
8647 %}
8648 
8649 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8650 
8651   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8652   ins_cost(2 * VOLATILE_REF_COST);
8653 
8654   effect(KILL cr);
8655 
8656  format %{
8657     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8658     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8659  %}
8660 
8661  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8662             aarch64_enc_cset_eq(res));
8663 
8664   ins_pipe(pipe_slow);
8665 %}
8666 
8667 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8668 
8669   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8670   predicate(n->as_LoadStore()->barrier_data() == 0);
8671   ins_cost(2 * VOLATILE_REF_COST);
8672 
8673   effect(KILL cr);
8674 
8675  format %{
8676     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8677     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8678  %}
8679 
8680  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8681             aarch64_enc_cset_eq(res));
8682 
8683   ins_pipe(pipe_slow);
8684 %}
8685 
8686 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8687 
8688   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8689   ins_cost(2 * VOLATILE_REF_COST);
8690 
8691   effect(KILL cr);
8692 
8693  format %{
8694     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8695     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8696  %}
8697 
8698  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8699             aarch64_enc_cset_eq(res));
8700 
8701   ins_pipe(pipe_slow);
8702 %}
8703 
8704 // alternative CompareAndSwapX when we are eliding barriers
8705 
8706 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8707 
8708   predicate(needs_acquiring_load_exclusive(n));
8709   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8710   ins_cost(VOLATILE_REF_COST);
8711 
8712   effect(KILL cr);
8713 
8714   format %{
8715     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8716     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8717   %}
8718 
8719   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8720             aarch64_enc_cset_eq(res));
8721 
8722   ins_pipe(pipe_slow);
8723 %}
8724 
8725 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8726 
8727   predicate(needs_acquiring_load_exclusive(n));
8728   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8729   ins_cost(VOLATILE_REF_COST);
8730 
8731   effect(KILL cr);
8732 
8733   format %{
8734     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8735     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8736   %}
8737 
8738   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8739             aarch64_enc_cset_eq(res));
8740 
8741   ins_pipe(pipe_slow);
8742 %}
8743 
8744 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8745 
8746   predicate(needs_acquiring_load_exclusive(n));
8747   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8748   ins_cost(VOLATILE_REF_COST);
8749 
8750   effect(KILL cr);
8751 
8752  format %{
8753     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8754     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8755  %}
8756 
8757  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8758             aarch64_enc_cset_eq(res));
8759 
8760   ins_pipe(pipe_slow);
8761 %}
8762 
8763 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8764 
8765   predicate(needs_acquiring_load_exclusive(n));
8766   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8767   ins_cost(VOLATILE_REF_COST);
8768 
8769   effect(KILL cr);
8770 
8771  format %{
8772     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8773     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8774  %}
8775 
8776  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8777             aarch64_enc_cset_eq(res));
8778 
8779   ins_pipe(pipe_slow);
8780 %}
8781 
8782 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8783 
8784   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8785   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8786   ins_cost(VOLATILE_REF_COST);
8787 
8788   effect(KILL cr);
8789 
8790  format %{
8791     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8792     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8793  %}
8794 
8795  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8796             aarch64_enc_cset_eq(res));
8797 
8798   ins_pipe(pipe_slow);
8799 %}
8800 
8801 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8802 
8803   predicate(needs_acquiring_load_exclusive(n));
8804   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8805   ins_cost(VOLATILE_REF_COST);
8806 
8807   effect(KILL cr);
8808 
8809  format %{
8810     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8811     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8812  %}
8813 
8814  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8815             aarch64_enc_cset_eq(res));
8816 
8817   ins_pipe(pipe_slow);
8818 %}
8819 
8820 
8821 // ---------------------------------------------------------------------
8822 
8823 
8824 // BEGIN This section of the file is automatically generated. Do not edit --------------
8825 
8826 // Sundry CAS operations.  Note that release is always true,
8827 // regardless of the memory ordering of the CAS.  This is because we
8828 // need the volatile case to be sequentially consistent but there is
8829 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8830 // can't check the type of memory ordering here, so we always emit a
8831 // STLXR.
8832 
8833 // This section is generated from aarch64_ad_cas.m4
8834 
8835 
8836 
8837 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8838   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8839   ins_cost(2 * VOLATILE_REF_COST);
8840   effect(TEMP_DEF res, KILL cr);
8841   format %{
8842     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8843   %}
8844   ins_encode %{
8845     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8846                Assembler::byte, /*acquire*/ false, /*release*/ true,
8847                /*weak*/ false, $res$$Register);
8848     __ sxtbw($res$$Register, $res$$Register);
8849   %}
8850   ins_pipe(pipe_slow);
8851 %}
8852 
8853 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8854   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8855   ins_cost(2 * VOLATILE_REF_COST);
8856   effect(TEMP_DEF res, KILL cr);
8857   format %{
8858     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8859   %}
8860   ins_encode %{
8861     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8862                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8863                /*weak*/ false, $res$$Register);
8864     __ sxthw($res$$Register, $res$$Register);
8865   %}
8866   ins_pipe(pipe_slow);
8867 %}
8868 
8869 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8870   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8871   ins_cost(2 * VOLATILE_REF_COST);
8872   effect(TEMP_DEF res, KILL cr);
8873   format %{
8874     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8875   %}
8876   ins_encode %{
8877     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8878                Assembler::word, /*acquire*/ false, /*release*/ true,
8879                /*weak*/ false, $res$$Register);
8880   %}
8881   ins_pipe(pipe_slow);
8882 %}
8883 
8884 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8885   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8886   ins_cost(2 * VOLATILE_REF_COST);
8887   effect(TEMP_DEF res, KILL cr);
8888   format %{
8889     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8890   %}
8891   ins_encode %{
8892     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8893                Assembler::xword, /*acquire*/ false, /*release*/ true,
8894                /*weak*/ false, $res$$Register);
8895   %}
8896   ins_pipe(pipe_slow);
8897 %}
8898 
8899 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8900   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8901   ins_cost(2 * VOLATILE_REF_COST);
8902   effect(TEMP_DEF res, KILL cr);
8903   format %{
8904     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8905   %}
8906   ins_encode %{
8907     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8908                Assembler::word, /*acquire*/ false, /*release*/ true,
8909                /*weak*/ false, $res$$Register);
8910   %}
8911   ins_pipe(pipe_slow);
8912 %}
8913 
8914 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8915   predicate(n->as_LoadStore()->barrier_data() == 0);
8916   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8917   ins_cost(2 * VOLATILE_REF_COST);
8918   effect(TEMP_DEF res, KILL cr);
8919   format %{
8920     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8921   %}
8922   ins_encode %{
8923     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8924                Assembler::xword, /*acquire*/ false, /*release*/ true,
8925                /*weak*/ false, $res$$Register);
8926   %}
8927   ins_pipe(pipe_slow);
8928 %}
8929 
8930 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8931   predicate(needs_acquiring_load_exclusive(n));
8932   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8933   ins_cost(VOLATILE_REF_COST);
8934   effect(TEMP_DEF res, KILL cr);
8935   format %{
8936     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8937   %}
8938   ins_encode %{
8939     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8940                Assembler::byte, /*acquire*/ true, /*release*/ true,
8941                /*weak*/ false, $res$$Register);
8942     __ sxtbw($res$$Register, $res$$Register);
8943   %}
8944   ins_pipe(pipe_slow);
8945 %}
8946 
8947 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8948   predicate(needs_acquiring_load_exclusive(n));
8949   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8950   ins_cost(VOLATILE_REF_COST);
8951   effect(TEMP_DEF res, KILL cr);
8952   format %{
8953     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8954   %}
8955   ins_encode %{
8956     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8957                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8958                /*weak*/ false, $res$$Register);
8959     __ sxthw($res$$Register, $res$$Register);
8960   %}
8961   ins_pipe(pipe_slow);
8962 %}
8963 
8964 
8965 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8966   predicate(needs_acquiring_load_exclusive(n));
8967   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8968   ins_cost(VOLATILE_REF_COST);
8969   effect(TEMP_DEF res, KILL cr);
8970   format %{
8971     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8972   %}
8973   ins_encode %{
8974     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8975                Assembler::word, /*acquire*/ true, /*release*/ true,
8976                /*weak*/ false, $res$$Register);
8977   %}
8978   ins_pipe(pipe_slow);
8979 %}
8980 
8981 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8982   predicate(needs_acquiring_load_exclusive(n));
8983   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8984   ins_cost(VOLATILE_REF_COST);
8985   effect(TEMP_DEF res, KILL cr);
8986   format %{
8987     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8988   %}
8989   ins_encode %{
8990     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8991                Assembler::xword, /*acquire*/ true, /*release*/ true,
8992                /*weak*/ false, $res$$Register);
8993   %}
8994   ins_pipe(pipe_slow);
8995 %}
8996 
8997 
8998 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8999   predicate(needs_acquiring_load_exclusive(n));
9000   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9001   ins_cost(VOLATILE_REF_COST);
9002   effect(TEMP_DEF res, KILL cr);
9003   format %{
9004     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9005   %}
9006   ins_encode %{
9007     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9008                Assembler::word, /*acquire*/ true, /*release*/ true,
9009                /*weak*/ false, $res$$Register);
9010   %}
9011   ins_pipe(pipe_slow);
9012 %}
9013 
9014 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9015   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9016   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9017   ins_cost(VOLATILE_REF_COST);
9018   effect(TEMP_DEF res, KILL cr);
9019   format %{
9020     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9021   %}
9022   ins_encode %{
9023     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9024                Assembler::xword, /*acquire*/ true, /*release*/ true,
9025                /*weak*/ false, $res$$Register);
9026   %}
9027   ins_pipe(pipe_slow);
9028 %}
9029 
9030 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9031   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9032   ins_cost(2 * VOLATILE_REF_COST);
9033   effect(KILL cr);
9034   format %{
9035     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9036     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9037   %}
9038   ins_encode %{
9039     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9040                Assembler::byte, /*acquire*/ false, /*release*/ true,
9041                /*weak*/ true, noreg);
9042     __ csetw($res$$Register, Assembler::EQ);
9043   %}
9044   ins_pipe(pipe_slow);
9045 %}
9046 
9047 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9048   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9049   ins_cost(2 * VOLATILE_REF_COST);
9050   effect(KILL cr);
9051   format %{
9052     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9053     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9054   %}
9055   ins_encode %{
9056     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9057                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9058                /*weak*/ true, noreg);
9059     __ csetw($res$$Register, Assembler::EQ);
9060   %}
9061   ins_pipe(pipe_slow);
9062 %}
9063 
9064 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9065   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9066   ins_cost(2 * VOLATILE_REF_COST);
9067   effect(KILL cr);
9068   format %{
9069     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9070     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9071   %}
9072   ins_encode %{
9073     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9074                Assembler::word, /*acquire*/ false, /*release*/ true,
9075                /*weak*/ true, noreg);
9076     __ csetw($res$$Register, Assembler::EQ);
9077   %}
9078   ins_pipe(pipe_slow);
9079 %}
9080 
9081 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9082   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9083   ins_cost(2 * VOLATILE_REF_COST);
9084   effect(KILL cr);
9085   format %{
9086     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9087     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9088   %}
9089   ins_encode %{
9090     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9091                Assembler::xword, /*acquire*/ false, /*release*/ true,
9092                /*weak*/ true, noreg);
9093     __ csetw($res$$Register, Assembler::EQ);
9094   %}
9095   ins_pipe(pipe_slow);
9096 %}
9097 
9098 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9099   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9100   ins_cost(2 * VOLATILE_REF_COST);
9101   effect(KILL cr);
9102   format %{
9103     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9104     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9105   %}
9106   ins_encode %{
9107     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9108                Assembler::word, /*acquire*/ false, /*release*/ true,
9109                /*weak*/ true, noreg);
9110     __ csetw($res$$Register, Assembler::EQ);
9111   %}
9112   ins_pipe(pipe_slow);
9113 %}
9114 
9115 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9116   predicate(n->as_LoadStore()->barrier_data() == 0);
9117   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9118   ins_cost(2 * VOLATILE_REF_COST);
9119   effect(KILL cr);
9120   format %{
9121     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9122     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9123   %}
9124   ins_encode %{
9125     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9126                Assembler::xword, /*acquire*/ false, /*release*/ true,
9127                /*weak*/ true, noreg);
9128     __ csetw($res$$Register, Assembler::EQ);
9129   %}
9130   ins_pipe(pipe_slow);
9131 %}
9132 
9133 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9134   predicate(needs_acquiring_load_exclusive(n));
9135   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9136   ins_cost(VOLATILE_REF_COST);
9137   effect(KILL cr);
9138   format %{
9139     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9140     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9141   %}
9142   ins_encode %{
9143     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9144                Assembler::byte, /*acquire*/ true, /*release*/ true,
9145                /*weak*/ true, noreg);
9146     __ csetw($res$$Register, Assembler::EQ);
9147   %}
9148   ins_pipe(pipe_slow);
9149 %}
9150 
9151 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9152   predicate(needs_acquiring_load_exclusive(n));
9153   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9154   ins_cost(VOLATILE_REF_COST);
9155   effect(KILL cr);
9156   format %{
9157     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9158     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9159   %}
9160   ins_encode %{
9161     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9162                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9163                /*weak*/ true, noreg);
9164     __ csetw($res$$Register, Assembler::EQ);
9165   %}
9166   ins_pipe(pipe_slow);
9167 %}
9168 
9169 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9170   predicate(needs_acquiring_load_exclusive(n));
9171   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9172   ins_cost(VOLATILE_REF_COST);
9173   effect(KILL cr);
9174   format %{
9175     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9176     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9177   %}
9178   ins_encode %{
9179     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9180                Assembler::word, /*acquire*/ true, /*release*/ true,
9181                /*weak*/ true, noreg);
9182     __ csetw($res$$Register, Assembler::EQ);
9183   %}
9184   ins_pipe(pipe_slow);
9185 %}
9186 
9187 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9188   predicate(needs_acquiring_load_exclusive(n));
9189   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9190   ins_cost(VOLATILE_REF_COST);
9191   effect(KILL cr);
9192   format %{
9193     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9194     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9195   %}
9196   ins_encode %{
9197     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9198                Assembler::xword, /*acquire*/ true, /*release*/ true,
9199                /*weak*/ true, noreg);
9200     __ csetw($res$$Register, Assembler::EQ);
9201   %}
9202   ins_pipe(pipe_slow);
9203 %}
9204 
9205 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9206   predicate(needs_acquiring_load_exclusive(n));
9207   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9208   ins_cost(VOLATILE_REF_COST);
9209   effect(KILL cr);
9210   format %{
9211     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9212     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9213   %}
9214   ins_encode %{
9215     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9216                Assembler::word, /*acquire*/ true, /*release*/ true,
9217                /*weak*/ true, noreg);
9218     __ csetw($res$$Register, Assembler::EQ);
9219   %}
9220   ins_pipe(pipe_slow);
9221 %}
9222 
9223 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9224   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9225   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9226   ins_cost(VOLATILE_REF_COST);
9227   effect(KILL cr);
9228   format %{
9229     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9230     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9231   %}
9232   ins_encode %{
9233     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9234                Assembler::xword, /*acquire*/ true, /*release*/ true,
9235                /*weak*/ true, noreg);
9236     __ csetw($res$$Register, Assembler::EQ);
9237   %}
9238   ins_pipe(pipe_slow);
9239 %}
9240 
9241 // END This section of the file is automatically generated. Do not edit --------------
9242 // ---------------------------------------------------------------------
9243 
9244 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9245   match(Set prev (GetAndSetI mem newv));
9246   ins_cost(2 * VOLATILE_REF_COST);
9247   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9248   ins_encode %{
9249     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9250   %}
9251   ins_pipe(pipe_serial);
9252 %}
9253 
9254 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9255   match(Set prev (GetAndSetL mem newv));
9256   ins_cost(2 * VOLATILE_REF_COST);
9257   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9258   ins_encode %{
9259     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9260   %}
9261   ins_pipe(pipe_serial);
9262 %}
9263 
9264 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9265   match(Set prev (GetAndSetN mem newv));
9266   ins_cost(2 * VOLATILE_REF_COST);
9267   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9268   ins_encode %{
9269     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9270   %}
9271   ins_pipe(pipe_serial);
9272 %}
9273 
9274 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9275   predicate(n->as_LoadStore()->barrier_data() == 0);
9276   match(Set prev (GetAndSetP mem newv));
9277   ins_cost(2 * VOLATILE_REF_COST);
9278   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9279   ins_encode %{
9280     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9281   %}
9282   ins_pipe(pipe_serial);
9283 %}
9284 
9285 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9286   predicate(needs_acquiring_load_exclusive(n));
9287   match(Set prev (GetAndSetI mem newv));
9288   ins_cost(VOLATILE_REF_COST);
9289   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9290   ins_encode %{
9291     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9292   %}
9293   ins_pipe(pipe_serial);
9294 %}
9295 
9296 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9297   predicate(needs_acquiring_load_exclusive(n));
9298   match(Set prev (GetAndSetL mem newv));
9299   ins_cost(VOLATILE_REF_COST);
9300   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9301   ins_encode %{
9302     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9303   %}
9304   ins_pipe(pipe_serial);
9305 %}
9306 
9307 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9308   predicate(needs_acquiring_load_exclusive(n));
9309   match(Set prev (GetAndSetN mem newv));
9310   ins_cost(VOLATILE_REF_COST);
9311   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9312   ins_encode %{
9313     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9314   %}
9315   ins_pipe(pipe_serial);
9316 %}
9317 
9318 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9319   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9320   match(Set prev (GetAndSetP mem newv));
9321   ins_cost(VOLATILE_REF_COST);
9322   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9323   ins_encode %{
9324     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9325   %}
9326   ins_pipe(pipe_serial);
9327 %}
9328 
9329 
9330 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9331   match(Set newval (GetAndAddL mem incr));
9332   ins_cost(2 * VOLATILE_REF_COST + 1);
9333   format %{ "get_and_addL $newval, [$mem], $incr" %}
9334   ins_encode %{
9335     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9336   %}
9337   ins_pipe(pipe_serial);
9338 %}
9339 
9340 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9341   predicate(n->as_LoadStore()->result_not_used());
9342   match(Set dummy (GetAndAddL mem incr));
9343   ins_cost(2 * VOLATILE_REF_COST);
9344   format %{ "get_and_addL [$mem], $incr" %}
9345   ins_encode %{
9346     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9347   %}
9348   ins_pipe(pipe_serial);
9349 %}
9350 
9351 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9352   match(Set newval (GetAndAddL mem incr));
9353   ins_cost(2 * VOLATILE_REF_COST + 1);
9354   format %{ "get_and_addL $newval, [$mem], $incr" %}
9355   ins_encode %{
9356     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9357   %}
9358   ins_pipe(pipe_serial);
9359 %}
9360 
9361 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9362   predicate(n->as_LoadStore()->result_not_used());
9363   match(Set dummy (GetAndAddL mem incr));
9364   ins_cost(2 * VOLATILE_REF_COST);
9365   format %{ "get_and_addL [$mem], $incr" %}
9366   ins_encode %{
9367     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9368   %}
9369   ins_pipe(pipe_serial);
9370 %}
9371 
9372 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9373   match(Set newval (GetAndAddI mem incr));
9374   ins_cost(2 * VOLATILE_REF_COST + 1);
9375   format %{ "get_and_addI $newval, [$mem], $incr" %}
9376   ins_encode %{
9377     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9378   %}
9379   ins_pipe(pipe_serial);
9380 %}
9381 
9382 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9383   predicate(n->as_LoadStore()->result_not_used());
9384   match(Set dummy (GetAndAddI mem incr));
9385   ins_cost(2 * VOLATILE_REF_COST);
9386   format %{ "get_and_addI [$mem], $incr" %}
9387   ins_encode %{
9388     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9389   %}
9390   ins_pipe(pipe_serial);
9391 %}
9392 
9393 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9394   match(Set newval (GetAndAddI mem incr));
9395   ins_cost(2 * VOLATILE_REF_COST + 1);
9396   format %{ "get_and_addI $newval, [$mem], $incr" %}
9397   ins_encode %{
9398     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9399   %}
9400   ins_pipe(pipe_serial);
9401 %}
9402 
9403 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9404   predicate(n->as_LoadStore()->result_not_used());
9405   match(Set dummy (GetAndAddI mem incr));
9406   ins_cost(2 * VOLATILE_REF_COST);
9407   format %{ "get_and_addI [$mem], $incr" %}
9408   ins_encode %{
9409     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9410   %}
9411   ins_pipe(pipe_serial);
9412 %}
9413 
9414 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9415   predicate(needs_acquiring_load_exclusive(n));
9416   match(Set newval (GetAndAddL mem incr));
9417   ins_cost(VOLATILE_REF_COST + 1);
9418   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9419   ins_encode %{
9420     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9421   %}
9422   ins_pipe(pipe_serial);
9423 %}
9424 
9425 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9426   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9427   match(Set dummy (GetAndAddL mem incr));
9428   ins_cost(VOLATILE_REF_COST);
9429   format %{ "get_and_addL_acq [$mem], $incr" %}
9430   ins_encode %{
9431     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9432   %}
9433   ins_pipe(pipe_serial);
9434 %}
9435 
9436 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9437   predicate(needs_acquiring_load_exclusive(n));
9438   match(Set newval (GetAndAddL mem incr));
9439   ins_cost(VOLATILE_REF_COST + 1);
9440   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9441   ins_encode %{
9442     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9443   %}
9444   ins_pipe(pipe_serial);
9445 %}
9446 
9447 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9448   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9449   match(Set dummy (GetAndAddL mem incr));
9450   ins_cost(VOLATILE_REF_COST);
9451   format %{ "get_and_addL_acq [$mem], $incr" %}
9452   ins_encode %{
9453     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9454   %}
9455   ins_pipe(pipe_serial);
9456 %}
9457 
9458 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9459   predicate(needs_acquiring_load_exclusive(n));
9460   match(Set newval (GetAndAddI mem incr));
9461   ins_cost(VOLATILE_REF_COST + 1);
9462   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9463   ins_encode %{
9464     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9465   %}
9466   ins_pipe(pipe_serial);
9467 %}
9468 
9469 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9470   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9471   match(Set dummy (GetAndAddI mem incr));
9472   ins_cost(VOLATILE_REF_COST);
9473   format %{ "get_and_addI_acq [$mem], $incr" %}
9474   ins_encode %{
9475     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9476   %}
9477   ins_pipe(pipe_serial);
9478 %}
9479 
9480 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9481   predicate(needs_acquiring_load_exclusive(n));
9482   match(Set newval (GetAndAddI mem incr));
9483   ins_cost(VOLATILE_REF_COST + 1);
9484   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9485   ins_encode %{
9486     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9487   %}
9488   ins_pipe(pipe_serial);
9489 %}
9490 
9491 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9492   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9493   match(Set dummy (GetAndAddI mem incr));
9494   ins_cost(VOLATILE_REF_COST);
9495   format %{ "get_and_addI_acq [$mem], $incr" %}
9496   ins_encode %{
9497     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9498   %}
9499   ins_pipe(pipe_serial);
9500 %}
9501 
9502 // Manifest a CmpL result in an integer register.
9503 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9504 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9505 %{
9506   match(Set dst (CmpL3 src1 src2));
9507   effect(KILL flags);
9508 
9509   ins_cost(INSN_COST * 6);
9510   format %{
9511       "cmp $src1, $src2"
9512       "csetw $dst, ne"
9513       "cnegw $dst, lt"
9514   %}
9515   // format %{ "CmpL3 $dst, $src1, $src2" %}
9516   ins_encode %{
9517     __ cmp($src1$$Register, $src2$$Register);
9518     __ csetw($dst$$Register, Assembler::NE);
9519     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9520   %}
9521 
9522   ins_pipe(pipe_class_default);
9523 %}
9524 
9525 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9526 %{
9527   match(Set dst (CmpL3 src1 src2));
9528   effect(KILL flags);
9529 
9530   ins_cost(INSN_COST * 6);
9531   format %{
9532       "cmp $src1, $src2"
9533       "csetw $dst, ne"
9534       "cnegw $dst, lt"
9535   %}
9536   ins_encode %{
9537     int32_t con = (int32_t)$src2$$constant;
9538      if (con < 0) {
9539       __ adds(zr, $src1$$Register, -con);
9540     } else {
9541       __ subs(zr, $src1$$Register, con);
9542     }
9543     __ csetw($dst$$Register, Assembler::NE);
9544     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9545   %}
9546 
9547   ins_pipe(pipe_class_default);
9548 %}
9549 
9550 // ============================================================================
9551 // Conditional Move Instructions
9552 
9553 // n.b. we have identical rules for both a signed compare op (cmpOp)
9554 // and an unsigned compare op (cmpOpU). it would be nice if we could
9555 // define an op class which merged both inputs and use it to type the
9556 // argument to a single rule. unfortunatelyt his fails because the
9557 // opclass does not live up to the COND_INTER interface of its
9558 // component operands. When the generic code tries to negate the
9559 // operand it ends up running the generci Machoper::negate method
9560 // which throws a ShouldNotHappen. So, we have to provide two flavours
9561 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9562 
9563 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9564   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9565 
9566   ins_cost(INSN_COST * 2);
9567   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9568 
9569   ins_encode %{
9570     __ cselw(as_Register($dst$$reg),
9571              as_Register($src2$$reg),
9572              as_Register($src1$$reg),
9573              (Assembler::Condition)$cmp$$cmpcode);
9574   %}
9575 
9576   ins_pipe(icond_reg_reg);
9577 %}
9578 
9579 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9580   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9581 
9582   ins_cost(INSN_COST * 2);
9583   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9584 
9585   ins_encode %{
9586     __ cselw(as_Register($dst$$reg),
9587              as_Register($src2$$reg),
9588              as_Register($src1$$reg),
9589              (Assembler::Condition)$cmp$$cmpcode);
9590   %}
9591 
9592   ins_pipe(icond_reg_reg);
9593 %}
9594 
9595 // special cases where one arg is zero
9596 
9597 // n.b. this is selected in preference to the rule above because it
9598 // avoids loading constant 0 into a source register
9599 
9600 // TODO
9601 // we ought only to be able to cull one of these variants as the ideal
9602 // transforms ought always to order the zero consistently (to left/right?)
9603 
9604 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9605   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9606 
9607   ins_cost(INSN_COST * 2);
9608   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9609 
9610   ins_encode %{
9611     __ cselw(as_Register($dst$$reg),
9612              as_Register($src$$reg),
9613              zr,
9614              (Assembler::Condition)$cmp$$cmpcode);
9615   %}
9616 
9617   ins_pipe(icond_reg);
9618 %}
9619 
9620 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9621   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9622 
9623   ins_cost(INSN_COST * 2);
9624   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9625 
9626   ins_encode %{
9627     __ cselw(as_Register($dst$$reg),
9628              as_Register($src$$reg),
9629              zr,
9630              (Assembler::Condition)$cmp$$cmpcode);
9631   %}
9632 
9633   ins_pipe(icond_reg);
9634 %}
9635 
9636 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9637   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9638 
9639   ins_cost(INSN_COST * 2);
9640   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9641 
9642   ins_encode %{
9643     __ cselw(as_Register($dst$$reg),
9644              zr,
9645              as_Register($src$$reg),
9646              (Assembler::Condition)$cmp$$cmpcode);
9647   %}
9648 
9649   ins_pipe(icond_reg);
9650 %}
9651 
9652 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9653   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9654 
9655   ins_cost(INSN_COST * 2);
9656   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9657 
9658   ins_encode %{
9659     __ cselw(as_Register($dst$$reg),
9660              zr,
9661              as_Register($src$$reg),
9662              (Assembler::Condition)$cmp$$cmpcode);
9663   %}
9664 
9665   ins_pipe(icond_reg);
9666 %}
9667 
9668 // special case for creating a boolean 0 or 1
9669 
9670 // n.b. this is selected in preference to the rule above because it
9671 // avoids loading constants 0 and 1 into a source register
9672 
9673 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9674   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9675 
9676   ins_cost(INSN_COST * 2);
9677   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9678 
9679   ins_encode %{
9680     // equivalently
9681     // cset(as_Register($dst$$reg),
9682     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9683     __ csincw(as_Register($dst$$reg),
9684              zr,
9685              zr,
9686              (Assembler::Condition)$cmp$$cmpcode);
9687   %}
9688 
9689   ins_pipe(icond_none);
9690 %}
9691 
9692 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9693   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9694 
9695   ins_cost(INSN_COST * 2);
9696   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9697 
9698   ins_encode %{
9699     // equivalently
9700     // cset(as_Register($dst$$reg),
9701     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9702     __ csincw(as_Register($dst$$reg),
9703              zr,
9704              zr,
9705              (Assembler::Condition)$cmp$$cmpcode);
9706   %}
9707 
9708   ins_pipe(icond_none);
9709 %}
9710 
9711 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9712   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9713 
9714   ins_cost(INSN_COST * 2);
9715   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9716 
9717   ins_encode %{
9718     __ csel(as_Register($dst$$reg),
9719             as_Register($src2$$reg),
9720             as_Register($src1$$reg),
9721             (Assembler::Condition)$cmp$$cmpcode);
9722   %}
9723 
9724   ins_pipe(icond_reg_reg);
9725 %}
9726 
9727 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9728   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9729 
9730   ins_cost(INSN_COST * 2);
9731   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9732 
9733   ins_encode %{
9734     __ csel(as_Register($dst$$reg),
9735             as_Register($src2$$reg),
9736             as_Register($src1$$reg),
9737             (Assembler::Condition)$cmp$$cmpcode);
9738   %}
9739 
9740   ins_pipe(icond_reg_reg);
9741 %}
9742 
9743 // special cases where one arg is zero
9744 
9745 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9746   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9747 
9748   ins_cost(INSN_COST * 2);
9749   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9750 
9751   ins_encode %{
9752     __ csel(as_Register($dst$$reg),
9753             zr,
9754             as_Register($src$$reg),
9755             (Assembler::Condition)$cmp$$cmpcode);
9756   %}
9757 
9758   ins_pipe(icond_reg);
9759 %}
9760 
9761 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9762   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9763 
9764   ins_cost(INSN_COST * 2);
9765   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9766 
9767   ins_encode %{
9768     __ csel(as_Register($dst$$reg),
9769             zr,
9770             as_Register($src$$reg),
9771             (Assembler::Condition)$cmp$$cmpcode);
9772   %}
9773 
9774   ins_pipe(icond_reg);
9775 %}
9776 
9777 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9778   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9779 
9780   ins_cost(INSN_COST * 2);
9781   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9782 
9783   ins_encode %{
9784     __ csel(as_Register($dst$$reg),
9785             as_Register($src$$reg),
9786             zr,
9787             (Assembler::Condition)$cmp$$cmpcode);
9788   %}
9789 
9790   ins_pipe(icond_reg);
9791 %}
9792 
9793 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9794   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9795 
9796   ins_cost(INSN_COST * 2);
9797   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9798 
9799   ins_encode %{
9800     __ csel(as_Register($dst$$reg),
9801             as_Register($src$$reg),
9802             zr,
9803             (Assembler::Condition)$cmp$$cmpcode);
9804   %}
9805 
9806   ins_pipe(icond_reg);
9807 %}
9808 
9809 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9810   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9811 
9812   ins_cost(INSN_COST * 2);
9813   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9814 
9815   ins_encode %{
9816     __ csel(as_Register($dst$$reg),
9817             as_Register($src2$$reg),
9818             as_Register($src1$$reg),
9819             (Assembler::Condition)$cmp$$cmpcode);
9820   %}
9821 
9822   ins_pipe(icond_reg_reg);
9823 %}
9824 
9825 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9826   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9827 
9828   ins_cost(INSN_COST * 2);
9829   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9830 
9831   ins_encode %{
9832     __ csel(as_Register($dst$$reg),
9833             as_Register($src2$$reg),
9834             as_Register($src1$$reg),
9835             (Assembler::Condition)$cmp$$cmpcode);
9836   %}
9837 
9838   ins_pipe(icond_reg_reg);
9839 %}
9840 
9841 // special cases where one arg is zero
9842 
9843 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9844   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9845 
9846   ins_cost(INSN_COST * 2);
9847   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9848 
9849   ins_encode %{
9850     __ csel(as_Register($dst$$reg),
9851             zr,
9852             as_Register($src$$reg),
9853             (Assembler::Condition)$cmp$$cmpcode);
9854   %}
9855 
9856   ins_pipe(icond_reg);
9857 %}
9858 
9859 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9860   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9861 
9862   ins_cost(INSN_COST * 2);
9863   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9864 
9865   ins_encode %{
9866     __ csel(as_Register($dst$$reg),
9867             zr,
9868             as_Register($src$$reg),
9869             (Assembler::Condition)$cmp$$cmpcode);
9870   %}
9871 
9872   ins_pipe(icond_reg);
9873 %}
9874 
9875 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9876   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9877 
9878   ins_cost(INSN_COST * 2);
9879   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9880 
9881   ins_encode %{
9882     __ csel(as_Register($dst$$reg),
9883             as_Register($src$$reg),
9884             zr,
9885             (Assembler::Condition)$cmp$$cmpcode);
9886   %}
9887 
9888   ins_pipe(icond_reg);
9889 %}
9890 
9891 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9892   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9893 
9894   ins_cost(INSN_COST * 2);
9895   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9896 
9897   ins_encode %{
9898     __ csel(as_Register($dst$$reg),
9899             as_Register($src$$reg),
9900             zr,
9901             (Assembler::Condition)$cmp$$cmpcode);
9902   %}
9903 
9904   ins_pipe(icond_reg);
9905 %}
9906 
9907 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9908   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9909 
9910   ins_cost(INSN_COST * 2);
9911   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9912 
9913   ins_encode %{
9914     __ cselw(as_Register($dst$$reg),
9915              as_Register($src2$$reg),
9916              as_Register($src1$$reg),
9917              (Assembler::Condition)$cmp$$cmpcode);
9918   %}
9919 
9920   ins_pipe(icond_reg_reg);
9921 %}
9922 
9923 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9924   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9925 
9926   ins_cost(INSN_COST * 2);
9927   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9928 
9929   ins_encode %{
9930     __ cselw(as_Register($dst$$reg),
9931              as_Register($src2$$reg),
9932              as_Register($src1$$reg),
9933              (Assembler::Condition)$cmp$$cmpcode);
9934   %}
9935 
9936   ins_pipe(icond_reg_reg);
9937 %}
9938 
9939 // special cases where one arg is zero
9940 
9941 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9942   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9943 
9944   ins_cost(INSN_COST * 2);
9945   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9946 
9947   ins_encode %{
9948     __ cselw(as_Register($dst$$reg),
9949              zr,
9950              as_Register($src$$reg),
9951              (Assembler::Condition)$cmp$$cmpcode);
9952   %}
9953 
9954   ins_pipe(icond_reg);
9955 %}
9956 
9957 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9958   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9959 
9960   ins_cost(INSN_COST * 2);
9961   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9962 
9963   ins_encode %{
9964     __ cselw(as_Register($dst$$reg),
9965              zr,
9966              as_Register($src$$reg),
9967              (Assembler::Condition)$cmp$$cmpcode);
9968   %}
9969 
9970   ins_pipe(icond_reg);
9971 %}
9972 
9973 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9974   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9975 
9976   ins_cost(INSN_COST * 2);
9977   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9978 
9979   ins_encode %{
9980     __ cselw(as_Register($dst$$reg),
9981              as_Register($src$$reg),
9982              zr,
9983              (Assembler::Condition)$cmp$$cmpcode);
9984   %}
9985 
9986   ins_pipe(icond_reg);
9987 %}
9988 
9989 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9990   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9991 
9992   ins_cost(INSN_COST * 2);
9993   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9994 
9995   ins_encode %{
9996     __ cselw(as_Register($dst$$reg),
9997              as_Register($src$$reg),
9998              zr,
9999              (Assembler::Condition)$cmp$$cmpcode);
10000   %}
10001 
10002   ins_pipe(icond_reg);
10003 %}
10004 
10005 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10006 %{
10007   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10008 
10009   ins_cost(INSN_COST * 3);
10010 
10011   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10012   ins_encode %{
10013     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10014     __ fcsels(as_FloatRegister($dst$$reg),
10015               as_FloatRegister($src2$$reg),
10016               as_FloatRegister($src1$$reg),
10017               cond);
10018   %}
10019 
10020   ins_pipe(fp_cond_reg_reg_s);
10021 %}
10022 
10023 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10024 %{
10025   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10026 
10027   ins_cost(INSN_COST * 3);
10028 
10029   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10030   ins_encode %{
10031     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10032     __ fcsels(as_FloatRegister($dst$$reg),
10033               as_FloatRegister($src2$$reg),
10034               as_FloatRegister($src1$$reg),
10035               cond);
10036   %}
10037 
10038   ins_pipe(fp_cond_reg_reg_s);
10039 %}
10040 
10041 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10042 %{
10043   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10044 
10045   ins_cost(INSN_COST * 3);
10046 
10047   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10048   ins_encode %{
10049     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10050     __ fcseld(as_FloatRegister($dst$$reg),
10051               as_FloatRegister($src2$$reg),
10052               as_FloatRegister($src1$$reg),
10053               cond);
10054   %}
10055 
10056   ins_pipe(fp_cond_reg_reg_d);
10057 %}
10058 
10059 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10060 %{
10061   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10062 
10063   ins_cost(INSN_COST * 3);
10064 
10065   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10066   ins_encode %{
10067     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10068     __ fcseld(as_FloatRegister($dst$$reg),
10069               as_FloatRegister($src2$$reg),
10070               as_FloatRegister($src1$$reg),
10071               cond);
10072   %}
10073 
10074   ins_pipe(fp_cond_reg_reg_d);
10075 %}
10076 
10077 // ============================================================================
10078 // Arithmetic Instructions
10079 //
10080 
10081 // Integer Addition
10082 
10083 // TODO
10084 // these currently employ operations which do not set CR and hence are
10085 // not flagged as killing CR but we would like to isolate the cases
10086 // where we want to set flags from those where we don't. need to work
10087 // out how to do that.
10088 
10089 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10090   match(Set dst (AddI src1 src2));
10091 
10092   ins_cost(INSN_COST);
10093   format %{ "addw  $dst, $src1, $src2" %}
10094 
10095   ins_encode %{
10096     __ addw(as_Register($dst$$reg),
10097             as_Register($src1$$reg),
10098             as_Register($src2$$reg));
10099   %}
10100 
10101   ins_pipe(ialu_reg_reg);
10102 %}
10103 
10104 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10105   match(Set dst (AddI src1 src2));
10106 
10107   ins_cost(INSN_COST);
10108   format %{ "addw $dst, $src1, $src2" %}
10109 
10110   // use opcode to indicate that this is an add not a sub
10111   opcode(0x0);
10112 
10113   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10114 
10115   ins_pipe(ialu_reg_imm);
10116 %}
10117 
10118 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10119   match(Set dst (AddI (ConvL2I src1) src2));
10120 
10121   ins_cost(INSN_COST);
10122   format %{ "addw $dst, $src1, $src2" %}
10123 
10124   // use opcode to indicate that this is an add not a sub
10125   opcode(0x0);
10126 
10127   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10128 
10129   ins_pipe(ialu_reg_imm);
10130 %}
10131 
10132 // Pointer Addition
10133 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10134   match(Set dst (AddP src1 src2));
10135 
10136   ins_cost(INSN_COST);
10137   format %{ "add $dst, $src1, $src2\t# ptr" %}
10138 
10139   ins_encode %{
10140     __ add(as_Register($dst$$reg),
10141            as_Register($src1$$reg),
10142            as_Register($src2$$reg));
10143   %}
10144 
10145   ins_pipe(ialu_reg_reg);
10146 %}
10147 
10148 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10149   match(Set dst (AddP src1 (ConvI2L src2)));
10150 
10151   ins_cost(1.9 * INSN_COST);
10152   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10153 
10154   ins_encode %{
10155     __ add(as_Register($dst$$reg),
10156            as_Register($src1$$reg),
10157            as_Register($src2$$reg), ext::sxtw);
10158   %}
10159 
10160   ins_pipe(ialu_reg_reg);
10161 %}
10162 
10163 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10164   match(Set dst (AddP src1 (LShiftL src2 scale)));
10165 
10166   ins_cost(1.9 * INSN_COST);
10167   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10168 
10169   ins_encode %{
10170     __ lea(as_Register($dst$$reg),
10171            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10172                    Address::lsl($scale$$constant)));
10173   %}
10174 
10175   ins_pipe(ialu_reg_reg_shift);
10176 %}
10177 
10178 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10179   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10180 
10181   ins_cost(1.9 * INSN_COST);
10182   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10183 
10184   ins_encode %{
10185     __ lea(as_Register($dst$$reg),
10186            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10187                    Address::sxtw($scale$$constant)));
10188   %}
10189 
10190   ins_pipe(ialu_reg_reg_shift);
10191 %}
10192 
10193 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10194   match(Set dst (LShiftL (ConvI2L src) scale));
10195 
10196   ins_cost(INSN_COST);
10197   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10198 
10199   ins_encode %{
10200     __ sbfiz(as_Register($dst$$reg),
10201           as_Register($src$$reg),
10202           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10203   %}
10204 
10205   ins_pipe(ialu_reg_shift);
10206 %}
10207 
10208 // Pointer Immediate Addition
10209 // n.b. this needs to be more expensive than using an indirect memory
10210 // operand
10211 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10212   match(Set dst (AddP src1 src2));
10213 
10214   ins_cost(INSN_COST);
10215   format %{ "add $dst, $src1, $src2\t# ptr" %}
10216 
10217   // use opcode to indicate that this is an add not a sub
10218   opcode(0x0);
10219 
10220   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10221 
10222   ins_pipe(ialu_reg_imm);
10223 %}
10224 
10225 // Long Addition
10226 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10227 
10228   match(Set dst (AddL src1 src2));
10229 
10230   ins_cost(INSN_COST);
10231   format %{ "add  $dst, $src1, $src2" %}
10232 
10233   ins_encode %{
10234     __ add(as_Register($dst$$reg),
10235            as_Register($src1$$reg),
10236            as_Register($src2$$reg));
10237   %}
10238 
10239   ins_pipe(ialu_reg_reg);
10240 %}
10241 
10242 // No constant pool entries requiredLong Immediate Addition.
10243 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10244   match(Set dst (AddL src1 src2));
10245 
10246   ins_cost(INSN_COST);
10247   format %{ "add $dst, $src1, $src2" %}
10248 
10249   // use opcode to indicate that this is an add not a sub
10250   opcode(0x0);
10251 
10252   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10253 
10254   ins_pipe(ialu_reg_imm);
10255 %}
10256 
10257 // Integer Subtraction
10258 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10259   match(Set dst (SubI src1 src2));
10260 
10261   ins_cost(INSN_COST);
10262   format %{ "subw  $dst, $src1, $src2" %}
10263 
10264   ins_encode %{
10265     __ subw(as_Register($dst$$reg),
10266             as_Register($src1$$reg),
10267             as_Register($src2$$reg));
10268   %}
10269 
10270   ins_pipe(ialu_reg_reg);
10271 %}
10272 
10273 // Immediate Subtraction
10274 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10275   match(Set dst (SubI src1 src2));
10276 
10277   ins_cost(INSN_COST);
10278   format %{ "subw $dst, $src1, $src2" %}
10279 
10280   // use opcode to indicate that this is a sub not an add
10281   opcode(0x1);
10282 
10283   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10284 
10285   ins_pipe(ialu_reg_imm);
10286 %}
10287 
10288 // Long Subtraction
10289 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10290 
10291   match(Set dst (SubL src1 src2));
10292 
10293   ins_cost(INSN_COST);
10294   format %{ "sub  $dst, $src1, $src2" %}
10295 
10296   ins_encode %{
10297     __ sub(as_Register($dst$$reg),
10298            as_Register($src1$$reg),
10299            as_Register($src2$$reg));
10300   %}
10301 
10302   ins_pipe(ialu_reg_reg);
10303 %}
10304 
10305 // No constant pool entries requiredLong Immediate Subtraction.
10306 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10307   match(Set dst (SubL src1 src2));
10308 
10309   ins_cost(INSN_COST);
10310   format %{ "sub$dst, $src1, $src2" %}
10311 
10312   // use opcode to indicate that this is a sub not an add
10313   opcode(0x1);
10314 
10315   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10316 
10317   ins_pipe(ialu_reg_imm);
10318 %}
10319 
10320 // Integer Negation (special case for sub)
10321 
10322 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10323   match(Set dst (SubI zero src));
10324 
10325   ins_cost(INSN_COST);
10326   format %{ "negw $dst, $src\t# int" %}
10327 
10328   ins_encode %{
10329     __ negw(as_Register($dst$$reg),
10330             as_Register($src$$reg));
10331   %}
10332 
10333   ins_pipe(ialu_reg);
10334 %}
10335 
10336 // Long Negation
10337 
10338 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10339   match(Set dst (SubL zero src));
10340 
10341   ins_cost(INSN_COST);
10342   format %{ "neg $dst, $src\t# long" %}
10343 
10344   ins_encode %{
10345     __ neg(as_Register($dst$$reg),
10346            as_Register($src$$reg));
10347   %}
10348 
10349   ins_pipe(ialu_reg);
10350 %}
10351 
10352 // Integer Multiply
10353 
10354 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10355   match(Set dst (MulI src1 src2));
10356 
10357   ins_cost(INSN_COST * 3);
10358   format %{ "mulw  $dst, $src1, $src2" %}
10359 
10360   ins_encode %{
10361     __ mulw(as_Register($dst$$reg),
10362             as_Register($src1$$reg),
10363             as_Register($src2$$reg));
10364   %}
10365 
10366   ins_pipe(imul_reg_reg);
10367 %}
10368 
10369 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10370   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10371 
10372   ins_cost(INSN_COST * 3);
10373   format %{ "smull  $dst, $src1, $src2" %}
10374 
10375   ins_encode %{
10376     __ smull(as_Register($dst$$reg),
10377              as_Register($src1$$reg),
10378              as_Register($src2$$reg));
10379   %}
10380 
10381   ins_pipe(imul_reg_reg);
10382 %}
10383 
10384 // Long Multiply
10385 
10386 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10387   match(Set dst (MulL src1 src2));
10388 
10389   ins_cost(INSN_COST * 5);
10390   format %{ "mul  $dst, $src1, $src2" %}
10391 
10392   ins_encode %{
10393     __ mul(as_Register($dst$$reg),
10394            as_Register($src1$$reg),
10395            as_Register($src2$$reg));
10396   %}
10397 
10398   ins_pipe(lmul_reg_reg);
10399 %}
10400 
10401 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10402 %{
10403   match(Set dst (MulHiL src1 src2));
10404 
10405   ins_cost(INSN_COST * 7);
10406   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10407 
10408   ins_encode %{
10409     __ smulh(as_Register($dst$$reg),
10410              as_Register($src1$$reg),
10411              as_Register($src2$$reg));
10412   %}
10413 
10414   ins_pipe(lmul_reg_reg);
10415 %}
10416 
10417 // Combined Integer Multiply & Add/Sub
10418 
10419 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10420   match(Set dst (AddI src3 (MulI src1 src2)));
10421 
10422   ins_cost(INSN_COST * 3);
10423   format %{ "madd  $dst, $src1, $src2, $src3" %}
10424 
10425   ins_encode %{
10426     __ maddw(as_Register($dst$$reg),
10427              as_Register($src1$$reg),
10428              as_Register($src2$$reg),
10429              as_Register($src3$$reg));
10430   %}
10431 
10432   ins_pipe(imac_reg_reg);
10433 %}
10434 
10435 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10436   match(Set dst (SubI src3 (MulI src1 src2)));
10437 
10438   ins_cost(INSN_COST * 3);
10439   format %{ "msub  $dst, $src1, $src2, $src3" %}
10440 
10441   ins_encode %{
10442     __ msubw(as_Register($dst$$reg),
10443              as_Register($src1$$reg),
10444              as_Register($src2$$reg),
10445              as_Register($src3$$reg));
10446   %}
10447 
10448   ins_pipe(imac_reg_reg);
10449 %}
10450 
10451 // Combined Integer Multiply & Neg
10452 
10453 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10454   match(Set dst (MulI (SubI zero src1) src2));
10455   match(Set dst (MulI src1 (SubI zero src2)));
10456 
10457   ins_cost(INSN_COST * 3);
10458   format %{ "mneg  $dst, $src1, $src2" %}
10459 
10460   ins_encode %{
10461     __ mnegw(as_Register($dst$$reg),
10462              as_Register($src1$$reg),
10463              as_Register($src2$$reg));
10464   %}
10465 
10466   ins_pipe(imac_reg_reg);
10467 %}
10468 
10469 // Combined Long Multiply & Add/Sub
10470 
10471 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10472   match(Set dst (AddL src3 (MulL src1 src2)));
10473 
10474   ins_cost(INSN_COST * 5);
10475   format %{ "madd  $dst, $src1, $src2, $src3" %}
10476 
10477   ins_encode %{
10478     __ madd(as_Register($dst$$reg),
10479             as_Register($src1$$reg),
10480             as_Register($src2$$reg),
10481             as_Register($src3$$reg));
10482   %}
10483 
10484   ins_pipe(lmac_reg_reg);
10485 %}
10486 
10487 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10488   match(Set dst (SubL src3 (MulL src1 src2)));
10489 
10490   ins_cost(INSN_COST * 5);
10491   format %{ "msub  $dst, $src1, $src2, $src3" %}
10492 
10493   ins_encode %{
10494     __ msub(as_Register($dst$$reg),
10495             as_Register($src1$$reg),
10496             as_Register($src2$$reg),
10497             as_Register($src3$$reg));
10498   %}
10499 
10500   ins_pipe(lmac_reg_reg);
10501 %}
10502 
10503 // Combined Long Multiply & Neg
10504 
10505 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10506   match(Set dst (MulL (SubL zero src1) src2));
10507   match(Set dst (MulL src1 (SubL zero src2)));
10508 
10509   ins_cost(INSN_COST * 5);
10510   format %{ "mneg  $dst, $src1, $src2" %}
10511 
10512   ins_encode %{
10513     __ mneg(as_Register($dst$$reg),
10514             as_Register($src1$$reg),
10515             as_Register($src2$$reg));
10516   %}
10517 
10518   ins_pipe(lmac_reg_reg);
10519 %}
10520 
10521 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10522 
10523 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10524   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10525 
10526   ins_cost(INSN_COST * 3);
10527   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10528 
10529   ins_encode %{
10530     __ smaddl(as_Register($dst$$reg),
10531               as_Register($src1$$reg),
10532               as_Register($src2$$reg),
10533               as_Register($src3$$reg));
10534   %}
10535 
10536   ins_pipe(imac_reg_reg);
10537 %}
10538 
10539 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10540   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10541 
10542   ins_cost(INSN_COST * 3);
10543   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10544 
10545   ins_encode %{
10546     __ smsubl(as_Register($dst$$reg),
10547               as_Register($src1$$reg),
10548               as_Register($src2$$reg),
10549               as_Register($src3$$reg));
10550   %}
10551 
10552   ins_pipe(imac_reg_reg);
10553 %}
10554 
10555 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10556   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10557   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10558 
10559   ins_cost(INSN_COST * 3);
10560   format %{ "smnegl  $dst, $src1, $src2" %}
10561 
10562   ins_encode %{
10563     __ smnegl(as_Register($dst$$reg),
10564               as_Register($src1$$reg),
10565               as_Register($src2$$reg));
10566   %}
10567 
10568   ins_pipe(imac_reg_reg);
10569 %}
10570 
10571 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10572 
10573 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10574   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10575 
10576   ins_cost(INSN_COST * 5);
10577   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10578             "maddw $dst, $src3, $src4, rscratch1" %}
10579 
10580   ins_encode %{
10581     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10582     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10583 
10584   ins_pipe(imac_reg_reg);
10585 %}
10586 
10587 // Integer Divide
10588 
10589 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10590   match(Set dst (DivI src1 src2));
10591 
10592   ins_cost(INSN_COST * 19);
10593   format %{ "sdivw  $dst, $src1, $src2" %}
10594 
10595   ins_encode(aarch64_enc_divw(dst, src1, src2));
10596   ins_pipe(idiv_reg_reg);
10597 %}
10598 
10599 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10600   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10601   ins_cost(INSN_COST);
10602   format %{ "lsrw $dst, $src1, $div1" %}
10603   ins_encode %{
10604     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10605   %}
10606   ins_pipe(ialu_reg_shift);
10607 %}
10608 
10609 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10610   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10611   ins_cost(INSN_COST);
10612   format %{ "addw $dst, $src, LSR $div1" %}
10613 
10614   ins_encode %{
10615     __ addw(as_Register($dst$$reg),
10616               as_Register($src$$reg),
10617               as_Register($src$$reg),
10618               Assembler::LSR, 31);
10619   %}
10620   ins_pipe(ialu_reg);
10621 %}
10622 
10623 // Long Divide
10624 
10625 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10626   match(Set dst (DivL src1 src2));
10627 
10628   ins_cost(INSN_COST * 35);
10629   format %{ "sdiv   $dst, $src1, $src2" %}
10630 
10631   ins_encode(aarch64_enc_div(dst, src1, src2));
10632   ins_pipe(ldiv_reg_reg);
10633 %}
10634 
10635 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10636   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10637   ins_cost(INSN_COST);
10638   format %{ "lsr $dst, $src1, $div1" %}
10639   ins_encode %{
10640     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10641   %}
10642   ins_pipe(ialu_reg_shift);
10643 %}
10644 
10645 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10646   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10647   ins_cost(INSN_COST);
10648   format %{ "add $dst, $src, $div1" %}
10649 
10650   ins_encode %{
10651     __ add(as_Register($dst$$reg),
10652               as_Register($src$$reg),
10653               as_Register($src$$reg),
10654               Assembler::LSR, 63);
10655   %}
10656   ins_pipe(ialu_reg);
10657 %}
10658 
10659 // Integer Remainder
10660 
10661 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10662   match(Set dst (ModI src1 src2));
10663 
10664   ins_cost(INSN_COST * 22);
10665   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10666             "msubw($dst, rscratch1, $src2, $src1" %}
10667 
10668   ins_encode(aarch64_enc_modw(dst, src1, src2));
10669   ins_pipe(idiv_reg_reg);
10670 %}
10671 
10672 // Long Remainder
10673 
10674 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10675   match(Set dst (ModL src1 src2));
10676 
10677   ins_cost(INSN_COST * 38);
10678   format %{ "sdiv   rscratch1, $src1, $src2\n"
10679             "msub($dst, rscratch1, $src2, $src1" %}
10680 
10681   ins_encode(aarch64_enc_mod(dst, src1, src2));
10682   ins_pipe(ldiv_reg_reg);
10683 %}
10684 
10685 // Integer Shifts
10686 
10687 // Shift Left Register
10688 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10689   match(Set dst (LShiftI src1 src2));
10690 
10691   ins_cost(INSN_COST * 2);
10692   format %{ "lslvw  $dst, $src1, $src2" %}
10693 
10694   ins_encode %{
10695     __ lslvw(as_Register($dst$$reg),
10696              as_Register($src1$$reg),
10697              as_Register($src2$$reg));
10698   %}
10699 
10700   ins_pipe(ialu_reg_reg_vshift);
10701 %}
10702 
10703 // Shift Left Immediate
10704 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10705   match(Set dst (LShiftI src1 src2));
10706 
10707   ins_cost(INSN_COST);
10708   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10709 
10710   ins_encode %{
10711     __ lslw(as_Register($dst$$reg),
10712             as_Register($src1$$reg),
10713             $src2$$constant & 0x1f);
10714   %}
10715 
10716   ins_pipe(ialu_reg_shift);
10717 %}
10718 
10719 // Shift Right Logical Register
10720 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10721   match(Set dst (URShiftI src1 src2));
10722 
10723   ins_cost(INSN_COST * 2);
10724   format %{ "lsrvw  $dst, $src1, $src2" %}
10725 
10726   ins_encode %{
10727     __ lsrvw(as_Register($dst$$reg),
10728              as_Register($src1$$reg),
10729              as_Register($src2$$reg));
10730   %}
10731 
10732   ins_pipe(ialu_reg_reg_vshift);
10733 %}
10734 
10735 // Shift Right Logical Immediate
10736 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10737   match(Set dst (URShiftI src1 src2));
10738 
10739   ins_cost(INSN_COST);
10740   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10741 
10742   ins_encode %{
10743     __ lsrw(as_Register($dst$$reg),
10744             as_Register($src1$$reg),
10745             $src2$$constant & 0x1f);
10746   %}
10747 
10748   ins_pipe(ialu_reg_shift);
10749 %}
10750 
10751 // Shift Right Arithmetic Register
10752 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10753   match(Set dst (RShiftI src1 src2));
10754 
10755   ins_cost(INSN_COST * 2);
10756   format %{ "asrvw  $dst, $src1, $src2" %}
10757 
10758   ins_encode %{
10759     __ asrvw(as_Register($dst$$reg),
10760              as_Register($src1$$reg),
10761              as_Register($src2$$reg));
10762   %}
10763 
10764   ins_pipe(ialu_reg_reg_vshift);
10765 %}
10766 
10767 // Shift Right Arithmetic Immediate
10768 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10769   match(Set dst (RShiftI src1 src2));
10770 
10771   ins_cost(INSN_COST);
10772   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10773 
10774   ins_encode %{
10775     __ asrw(as_Register($dst$$reg),
10776             as_Register($src1$$reg),
10777             $src2$$constant & 0x1f);
10778   %}
10779 
10780   ins_pipe(ialu_reg_shift);
10781 %}
10782 
10783 // Combined Int Mask and Right Shift (using UBFM)
10784 // TODO
10785 
10786 // Long Shifts
10787 
10788 // Shift Left Register
10789 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10790   match(Set dst (LShiftL src1 src2));
10791 
10792   ins_cost(INSN_COST * 2);
10793   format %{ "lslv  $dst, $src1, $src2" %}
10794 
10795   ins_encode %{
10796     __ lslv(as_Register($dst$$reg),
10797             as_Register($src1$$reg),
10798             as_Register($src2$$reg));
10799   %}
10800 
10801   ins_pipe(ialu_reg_reg_vshift);
10802 %}
10803 
10804 // Shift Left Immediate
10805 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10806   match(Set dst (LShiftL src1 src2));
10807 
10808   ins_cost(INSN_COST);
10809   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10810 
10811   ins_encode %{
10812     __ lsl(as_Register($dst$$reg),
10813             as_Register($src1$$reg),
10814             $src2$$constant & 0x3f);
10815   %}
10816 
10817   ins_pipe(ialu_reg_shift);
10818 %}
10819 
10820 // Shift Right Logical Register
10821 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10822   match(Set dst (URShiftL src1 src2));
10823 
10824   ins_cost(INSN_COST * 2);
10825   format %{ "lsrv  $dst, $src1, $src2" %}
10826 
10827   ins_encode %{
10828     __ lsrv(as_Register($dst$$reg),
10829             as_Register($src1$$reg),
10830             as_Register($src2$$reg));
10831   %}
10832 
10833   ins_pipe(ialu_reg_reg_vshift);
10834 %}
10835 
10836 // Shift Right Logical Immediate
10837 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10838   match(Set dst (URShiftL src1 src2));
10839 
10840   ins_cost(INSN_COST);
10841   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10842 
10843   ins_encode %{
10844     __ lsr(as_Register($dst$$reg),
10845            as_Register($src1$$reg),
10846            $src2$$constant & 0x3f);
10847   %}
10848 
10849   ins_pipe(ialu_reg_shift);
10850 %}
10851 
10852 // A special-case pattern for card table stores.
10853 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10854   match(Set dst (URShiftL (CastP2X src1) src2));
10855 
10856   ins_cost(INSN_COST);
10857   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10858 
10859   ins_encode %{
10860     __ lsr(as_Register($dst$$reg),
10861            as_Register($src1$$reg),
10862            $src2$$constant & 0x3f);
10863   %}
10864 
10865   ins_pipe(ialu_reg_shift);
10866 %}
10867 
10868 // Shift Right Arithmetic Register
10869 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10870   match(Set dst (RShiftL src1 src2));
10871 
10872   ins_cost(INSN_COST * 2);
10873   format %{ "asrv  $dst, $src1, $src2" %}
10874 
10875   ins_encode %{
10876     __ asrv(as_Register($dst$$reg),
10877             as_Register($src1$$reg),
10878             as_Register($src2$$reg));
10879   %}
10880 
10881   ins_pipe(ialu_reg_reg_vshift);
10882 %}
10883 
10884 // Shift Right Arithmetic Immediate
10885 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10886   match(Set dst (RShiftL src1 src2));
10887 
10888   ins_cost(INSN_COST);
10889   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10890 
10891   ins_encode %{
10892     __ asr(as_Register($dst$$reg),
10893            as_Register($src1$$reg),
10894            $src2$$constant & 0x3f);
10895   %}
10896 
10897   ins_pipe(ialu_reg_shift);
10898 %}
10899 
10900 // BEGIN This section of the file is automatically generated. Do not edit --------------
10901 
10902 instruct regL_not_reg(iRegLNoSp dst,
10903                          iRegL src1, immL_M1 m1,
10904                          rFlagsReg cr) %{
10905   match(Set dst (XorL src1 m1));
10906   ins_cost(INSN_COST);
10907   format %{ "eon  $dst, $src1, zr" %}
10908 
10909   ins_encode %{
10910     __ eon(as_Register($dst$$reg),
10911               as_Register($src1$$reg),
10912               zr,
10913               Assembler::LSL, 0);
10914   %}
10915 
10916   ins_pipe(ialu_reg);
10917 %}
10918 instruct regI_not_reg(iRegINoSp dst,
10919                          iRegIorL2I src1, immI_M1 m1,
10920                          rFlagsReg cr) %{
10921   match(Set dst (XorI src1 m1));
10922   ins_cost(INSN_COST);
10923   format %{ "eonw  $dst, $src1, zr" %}
10924 
10925   ins_encode %{
10926     __ eonw(as_Register($dst$$reg),
10927               as_Register($src1$$reg),
10928               zr,
10929               Assembler::LSL, 0);
10930   %}
10931 
10932   ins_pipe(ialu_reg);
10933 %}
10934 
10935 instruct AndI_reg_not_reg(iRegINoSp dst,
10936                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10937                          rFlagsReg cr) %{
10938   match(Set dst (AndI src1 (XorI src2 m1)));
10939   ins_cost(INSN_COST);
10940   format %{ "bicw  $dst, $src1, $src2" %}
10941 
10942   ins_encode %{
10943     __ bicw(as_Register($dst$$reg),
10944               as_Register($src1$$reg),
10945               as_Register($src2$$reg),
10946               Assembler::LSL, 0);
10947   %}
10948 
10949   ins_pipe(ialu_reg_reg);
10950 %}
10951 
10952 instruct AndL_reg_not_reg(iRegLNoSp dst,
10953                          iRegL src1, iRegL src2, immL_M1 m1,
10954                          rFlagsReg cr) %{
10955   match(Set dst (AndL src1 (XorL src2 m1)));
10956   ins_cost(INSN_COST);
10957   format %{ "bic  $dst, $src1, $src2" %}
10958 
10959   ins_encode %{
10960     __ bic(as_Register($dst$$reg),
10961               as_Register($src1$$reg),
10962               as_Register($src2$$reg),
10963               Assembler::LSL, 0);
10964   %}
10965 
10966   ins_pipe(ialu_reg_reg);
10967 %}
10968 
10969 instruct OrI_reg_not_reg(iRegINoSp dst,
10970                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10971                          rFlagsReg cr) %{
10972   match(Set dst (OrI src1 (XorI src2 m1)));
10973   ins_cost(INSN_COST);
10974   format %{ "ornw  $dst, $src1, $src2" %}
10975 
10976   ins_encode %{
10977     __ ornw(as_Register($dst$$reg),
10978               as_Register($src1$$reg),
10979               as_Register($src2$$reg),
10980               Assembler::LSL, 0);
10981   %}
10982 
10983   ins_pipe(ialu_reg_reg);
10984 %}
10985 
10986 instruct OrL_reg_not_reg(iRegLNoSp dst,
10987                          iRegL src1, iRegL src2, immL_M1 m1,
10988                          rFlagsReg cr) %{
10989   match(Set dst (OrL src1 (XorL src2 m1)));
10990   ins_cost(INSN_COST);
10991   format %{ "orn  $dst, $src1, $src2" %}
10992 
10993   ins_encode %{
10994     __ orn(as_Register($dst$$reg),
10995               as_Register($src1$$reg),
10996               as_Register($src2$$reg),
10997               Assembler::LSL, 0);
10998   %}
10999 
11000   ins_pipe(ialu_reg_reg);
11001 %}
11002 
11003 instruct XorI_reg_not_reg(iRegINoSp dst,
11004                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11005                          rFlagsReg cr) %{
11006   match(Set dst (XorI m1 (XorI src2 src1)));
11007   ins_cost(INSN_COST);
11008   format %{ "eonw  $dst, $src1, $src2" %}
11009 
11010   ins_encode %{
11011     __ eonw(as_Register($dst$$reg),
11012               as_Register($src1$$reg),
11013               as_Register($src2$$reg),
11014               Assembler::LSL, 0);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg);
11018 %}
11019 
11020 instruct XorL_reg_not_reg(iRegLNoSp dst,
11021                          iRegL src1, iRegL src2, immL_M1 m1,
11022                          rFlagsReg cr) %{
11023   match(Set dst (XorL m1 (XorL src2 src1)));
11024   ins_cost(INSN_COST);
11025   format %{ "eon  $dst, $src1, $src2" %}
11026 
11027   ins_encode %{
11028     __ eon(as_Register($dst$$reg),
11029               as_Register($src1$$reg),
11030               as_Register($src2$$reg),
11031               Assembler::LSL, 0);
11032   %}
11033 
11034   ins_pipe(ialu_reg_reg);
11035 %}
11036 
11037 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11038                          iRegIorL2I src1, iRegIorL2I src2,
11039                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11040   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11041   ins_cost(1.9 * INSN_COST);
11042   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11043 
11044   ins_encode %{
11045     __ bicw(as_Register($dst$$reg),
11046               as_Register($src1$$reg),
11047               as_Register($src2$$reg),
11048               Assembler::LSR,
11049               $src3$$constant & 0x1f);
11050   %}
11051 
11052   ins_pipe(ialu_reg_reg_shift);
11053 %}
11054 
11055 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11056                          iRegL src1, iRegL src2,
11057                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11058   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11059   ins_cost(1.9 * INSN_COST);
11060   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11061 
11062   ins_encode %{
11063     __ bic(as_Register($dst$$reg),
11064               as_Register($src1$$reg),
11065               as_Register($src2$$reg),
11066               Assembler::LSR,
11067               $src3$$constant & 0x3f);
11068   %}
11069 
11070   ins_pipe(ialu_reg_reg_shift);
11071 %}
11072 
11073 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11074                          iRegIorL2I src1, iRegIorL2I src2,
11075                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11076   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11077   ins_cost(1.9 * INSN_COST);
11078   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11079 
11080   ins_encode %{
11081     __ bicw(as_Register($dst$$reg),
11082               as_Register($src1$$reg),
11083               as_Register($src2$$reg),
11084               Assembler::ASR,
11085               $src3$$constant & 0x1f);
11086   %}
11087 
11088   ins_pipe(ialu_reg_reg_shift);
11089 %}
11090 
11091 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11092                          iRegL src1, iRegL src2,
11093                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11094   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11095   ins_cost(1.9 * INSN_COST);
11096   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11097 
11098   ins_encode %{
11099     __ bic(as_Register($dst$$reg),
11100               as_Register($src1$$reg),
11101               as_Register($src2$$reg),
11102               Assembler::ASR,
11103               $src3$$constant & 0x3f);
11104   %}
11105 
11106   ins_pipe(ialu_reg_reg_shift);
11107 %}
11108 
11109 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11110                          iRegIorL2I src1, iRegIorL2I src2,
11111                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11112   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11113   ins_cost(1.9 * INSN_COST);
11114   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11115 
11116   ins_encode %{
11117     __ bicw(as_Register($dst$$reg),
11118               as_Register($src1$$reg),
11119               as_Register($src2$$reg),
11120               Assembler::LSL,
11121               $src3$$constant & 0x1f);
11122   %}
11123 
11124   ins_pipe(ialu_reg_reg_shift);
11125 %}
11126 
11127 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11128                          iRegL src1, iRegL src2,
11129                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11130   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11131   ins_cost(1.9 * INSN_COST);
11132   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11133 
11134   ins_encode %{
11135     __ bic(as_Register($dst$$reg),
11136               as_Register($src1$$reg),
11137               as_Register($src2$$reg),
11138               Assembler::LSL,
11139               $src3$$constant & 0x3f);
11140   %}
11141 
11142   ins_pipe(ialu_reg_reg_shift);
11143 %}
11144 
11145 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11146                          iRegIorL2I src1, iRegIorL2I src2,
11147                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11148   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11149   ins_cost(1.9 * INSN_COST);
11150   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11151 
11152   ins_encode %{
11153     __ eonw(as_Register($dst$$reg),
11154               as_Register($src1$$reg),
11155               as_Register($src2$$reg),
11156               Assembler::LSR,
11157               $src3$$constant & 0x1f);
11158   %}
11159 
11160   ins_pipe(ialu_reg_reg_shift);
11161 %}
11162 
11163 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11164                          iRegL src1, iRegL src2,
11165                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11166   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11167   ins_cost(1.9 * INSN_COST);
11168   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11169 
11170   ins_encode %{
11171     __ eon(as_Register($dst$$reg),
11172               as_Register($src1$$reg),
11173               as_Register($src2$$reg),
11174               Assembler::LSR,
11175               $src3$$constant & 0x3f);
11176   %}
11177 
11178   ins_pipe(ialu_reg_reg_shift);
11179 %}
11180 
11181 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11182                          iRegIorL2I src1, iRegIorL2I src2,
11183                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11184   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11185   ins_cost(1.9 * INSN_COST);
11186   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11187 
11188   ins_encode %{
11189     __ eonw(as_Register($dst$$reg),
11190               as_Register($src1$$reg),
11191               as_Register($src2$$reg),
11192               Assembler::ASR,
11193               $src3$$constant & 0x1f);
11194   %}
11195 
11196   ins_pipe(ialu_reg_reg_shift);
11197 %}
11198 
11199 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11200                          iRegL src1, iRegL src2,
11201                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11202   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11203   ins_cost(1.9 * INSN_COST);
11204   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11205 
11206   ins_encode %{
11207     __ eon(as_Register($dst$$reg),
11208               as_Register($src1$$reg),
11209               as_Register($src2$$reg),
11210               Assembler::ASR,
11211               $src3$$constant & 0x3f);
11212   %}
11213 
11214   ins_pipe(ialu_reg_reg_shift);
11215 %}
11216 
11217 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11218                          iRegIorL2I src1, iRegIorL2I src2,
11219                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11220   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11221   ins_cost(1.9 * INSN_COST);
11222   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11223 
11224   ins_encode %{
11225     __ eonw(as_Register($dst$$reg),
11226               as_Register($src1$$reg),
11227               as_Register($src2$$reg),
11228               Assembler::LSL,
11229               $src3$$constant & 0x1f);
11230   %}
11231 
11232   ins_pipe(ialu_reg_reg_shift);
11233 %}
11234 
11235 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11236                          iRegL src1, iRegL src2,
11237                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11238   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11239   ins_cost(1.9 * INSN_COST);
11240   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11241 
11242   ins_encode %{
11243     __ eon(as_Register($dst$$reg),
11244               as_Register($src1$$reg),
11245               as_Register($src2$$reg),
11246               Assembler::LSL,
11247               $src3$$constant & 0x3f);
11248   %}
11249 
11250   ins_pipe(ialu_reg_reg_shift);
11251 %}
11252 
11253 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11254                          iRegIorL2I src1, iRegIorL2I src2,
11255                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11256   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11257   ins_cost(1.9 * INSN_COST);
11258   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11259 
11260   ins_encode %{
11261     __ ornw(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::LSR,
11265               $src3$$constant & 0x1f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg_shift);
11269 %}
11270 
11271 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11272                          iRegL src1, iRegL src2,
11273                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11274   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11275   ins_cost(1.9 * INSN_COST);
11276   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11277 
11278   ins_encode %{
11279     __ orn(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::LSR,
11283               $src3$$constant & 0x3f);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg_shift);
11287 %}
11288 
11289 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11290                          iRegIorL2I src1, iRegIorL2I src2,
11291                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11292   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11293   ins_cost(1.9 * INSN_COST);
11294   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11295 
11296   ins_encode %{
11297     __ ornw(as_Register($dst$$reg),
11298               as_Register($src1$$reg),
11299               as_Register($src2$$reg),
11300               Assembler::ASR,
11301               $src3$$constant & 0x1f);
11302   %}
11303 
11304   ins_pipe(ialu_reg_reg_shift);
11305 %}
11306 
11307 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11308                          iRegL src1, iRegL src2,
11309                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11310   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11311   ins_cost(1.9 * INSN_COST);
11312   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11313 
11314   ins_encode %{
11315     __ orn(as_Register($dst$$reg),
11316               as_Register($src1$$reg),
11317               as_Register($src2$$reg),
11318               Assembler::ASR,
11319               $src3$$constant & 0x3f);
11320   %}
11321 
11322   ins_pipe(ialu_reg_reg_shift);
11323 %}
11324 
11325 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11326                          iRegIorL2I src1, iRegIorL2I src2,
11327                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11328   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11329   ins_cost(1.9 * INSN_COST);
11330   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11331 
11332   ins_encode %{
11333     __ ornw(as_Register($dst$$reg),
11334               as_Register($src1$$reg),
11335               as_Register($src2$$reg),
11336               Assembler::LSL,
11337               $src3$$constant & 0x1f);
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg_shift);
11341 %}
11342 
11343 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11344                          iRegL src1, iRegL src2,
11345                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11346   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11347   ins_cost(1.9 * INSN_COST);
11348   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11349 
11350   ins_encode %{
11351     __ orn(as_Register($dst$$reg),
11352               as_Register($src1$$reg),
11353               as_Register($src2$$reg),
11354               Assembler::LSL,
11355               $src3$$constant & 0x3f);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg_shift);
11359 %}
11360 
11361 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11362                          iRegIorL2I src1, iRegIorL2I src2,
11363                          immI src3, rFlagsReg cr) %{
11364   match(Set dst (AndI src1 (URShiftI src2 src3)));
11365 
11366   ins_cost(1.9 * INSN_COST);
11367   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11368 
11369   ins_encode %{
11370     __ andw(as_Register($dst$$reg),
11371               as_Register($src1$$reg),
11372               as_Register($src2$$reg),
11373               Assembler::LSR,
11374               $src3$$constant & 0x1f);
11375   %}
11376 
11377   ins_pipe(ialu_reg_reg_shift);
11378 %}
11379 
11380 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11381                          iRegL src1, iRegL src2,
11382                          immI src3, rFlagsReg cr) %{
11383   match(Set dst (AndL src1 (URShiftL src2 src3)));
11384 
11385   ins_cost(1.9 * INSN_COST);
11386   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11387 
11388   ins_encode %{
11389     __ andr(as_Register($dst$$reg),
11390               as_Register($src1$$reg),
11391               as_Register($src2$$reg),
11392               Assembler::LSR,
11393               $src3$$constant & 0x3f);
11394   %}
11395 
11396   ins_pipe(ialu_reg_reg_shift);
11397 %}
11398 
11399 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11400                          iRegIorL2I src1, iRegIorL2I src2,
11401                          immI src3, rFlagsReg cr) %{
11402   match(Set dst (AndI src1 (RShiftI src2 src3)));
11403 
11404   ins_cost(1.9 * INSN_COST);
11405   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11406 
11407   ins_encode %{
11408     __ andw(as_Register($dst$$reg),
11409               as_Register($src1$$reg),
11410               as_Register($src2$$reg),
11411               Assembler::ASR,
11412               $src3$$constant & 0x1f);
11413   %}
11414 
11415   ins_pipe(ialu_reg_reg_shift);
11416 %}
11417 
11418 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11419                          iRegL src1, iRegL src2,
11420                          immI src3, rFlagsReg cr) %{
11421   match(Set dst (AndL src1 (RShiftL src2 src3)));
11422 
11423   ins_cost(1.9 * INSN_COST);
11424   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11425 
11426   ins_encode %{
11427     __ andr(as_Register($dst$$reg),
11428               as_Register($src1$$reg),
11429               as_Register($src2$$reg),
11430               Assembler::ASR,
11431               $src3$$constant & 0x3f);
11432   %}
11433 
11434   ins_pipe(ialu_reg_reg_shift);
11435 %}
11436 
11437 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11438                          iRegIorL2I src1, iRegIorL2I src2,
11439                          immI src3, rFlagsReg cr) %{
11440   match(Set dst (AndI src1 (LShiftI src2 src3)));
11441 
11442   ins_cost(1.9 * INSN_COST);
11443   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11444 
11445   ins_encode %{
11446     __ andw(as_Register($dst$$reg),
11447               as_Register($src1$$reg),
11448               as_Register($src2$$reg),
11449               Assembler::LSL,
11450               $src3$$constant & 0x1f);
11451   %}
11452 
11453   ins_pipe(ialu_reg_reg_shift);
11454 %}
11455 
11456 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11457                          iRegL src1, iRegL src2,
11458                          immI src3, rFlagsReg cr) %{
11459   match(Set dst (AndL src1 (LShiftL src2 src3)));
11460 
11461   ins_cost(1.9 * INSN_COST);
11462   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11463 
11464   ins_encode %{
11465     __ andr(as_Register($dst$$reg),
11466               as_Register($src1$$reg),
11467               as_Register($src2$$reg),
11468               Assembler::LSL,
11469               $src3$$constant & 0x3f);
11470   %}
11471 
11472   ins_pipe(ialu_reg_reg_shift);
11473 %}
11474 
11475 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11476                          iRegIorL2I src1, iRegIorL2I src2,
11477                          immI src3, rFlagsReg cr) %{
11478   match(Set dst (XorI src1 (URShiftI src2 src3)));
11479 
11480   ins_cost(1.9 * INSN_COST);
11481   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11482 
11483   ins_encode %{
11484     __ eorw(as_Register($dst$$reg),
11485               as_Register($src1$$reg),
11486               as_Register($src2$$reg),
11487               Assembler::LSR,
11488               $src3$$constant & 0x1f);
11489   %}
11490 
11491   ins_pipe(ialu_reg_reg_shift);
11492 %}
11493 
11494 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11495                          iRegL src1, iRegL src2,
11496                          immI src3, rFlagsReg cr) %{
11497   match(Set dst (XorL src1 (URShiftL src2 src3)));
11498 
11499   ins_cost(1.9 * INSN_COST);
11500   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11501 
11502   ins_encode %{
11503     __ eor(as_Register($dst$$reg),
11504               as_Register($src1$$reg),
11505               as_Register($src2$$reg),
11506               Assembler::LSR,
11507               $src3$$constant & 0x3f);
11508   %}
11509 
11510   ins_pipe(ialu_reg_reg_shift);
11511 %}
11512 
11513 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11514                          iRegIorL2I src1, iRegIorL2I src2,
11515                          immI src3, rFlagsReg cr) %{
11516   match(Set dst (XorI src1 (RShiftI src2 src3)));
11517 
11518   ins_cost(1.9 * INSN_COST);
11519   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11520 
11521   ins_encode %{
11522     __ eorw(as_Register($dst$$reg),
11523               as_Register($src1$$reg),
11524               as_Register($src2$$reg),
11525               Assembler::ASR,
11526               $src3$$constant & 0x1f);
11527   %}
11528 
11529   ins_pipe(ialu_reg_reg_shift);
11530 %}
11531 
11532 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11533                          iRegL src1, iRegL src2,
11534                          immI src3, rFlagsReg cr) %{
11535   match(Set dst (XorL src1 (RShiftL src2 src3)));
11536 
11537   ins_cost(1.9 * INSN_COST);
11538   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11539 
11540   ins_encode %{
11541     __ eor(as_Register($dst$$reg),
11542               as_Register($src1$$reg),
11543               as_Register($src2$$reg),
11544               Assembler::ASR,
11545               $src3$$constant & 0x3f);
11546   %}
11547 
11548   ins_pipe(ialu_reg_reg_shift);
11549 %}
11550 
11551 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11552                          iRegIorL2I src1, iRegIorL2I src2,
11553                          immI src3, rFlagsReg cr) %{
11554   match(Set dst (XorI src1 (LShiftI src2 src3)));
11555 
11556   ins_cost(1.9 * INSN_COST);
11557   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11558 
11559   ins_encode %{
11560     __ eorw(as_Register($dst$$reg),
11561               as_Register($src1$$reg),
11562               as_Register($src2$$reg),
11563               Assembler::LSL,
11564               $src3$$constant & 0x1f);
11565   %}
11566 
11567   ins_pipe(ialu_reg_reg_shift);
11568 %}
11569 
11570 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11571                          iRegL src1, iRegL src2,
11572                          immI src3, rFlagsReg cr) %{
11573   match(Set dst (XorL src1 (LShiftL src2 src3)));
11574 
11575   ins_cost(1.9 * INSN_COST);
11576   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11577 
11578   ins_encode %{
11579     __ eor(as_Register($dst$$reg),
11580               as_Register($src1$$reg),
11581               as_Register($src2$$reg),
11582               Assembler::LSL,
11583               $src3$$constant & 0x3f);
11584   %}
11585 
11586   ins_pipe(ialu_reg_reg_shift);
11587 %}
11588 
11589 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11590                          iRegIorL2I src1, iRegIorL2I src2,
11591                          immI src3, rFlagsReg cr) %{
11592   match(Set dst (OrI src1 (URShiftI src2 src3)));
11593 
11594   ins_cost(1.9 * INSN_COST);
11595   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11596 
11597   ins_encode %{
11598     __ orrw(as_Register($dst$$reg),
11599               as_Register($src1$$reg),
11600               as_Register($src2$$reg),
11601               Assembler::LSR,
11602               $src3$$constant & 0x1f);
11603   %}
11604 
11605   ins_pipe(ialu_reg_reg_shift);
11606 %}
11607 
11608 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11609                          iRegL src1, iRegL src2,
11610                          immI src3, rFlagsReg cr) %{
11611   match(Set dst (OrL src1 (URShiftL src2 src3)));
11612 
11613   ins_cost(1.9 * INSN_COST);
11614   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11615 
11616   ins_encode %{
11617     __ orr(as_Register($dst$$reg),
11618               as_Register($src1$$reg),
11619               as_Register($src2$$reg),
11620               Assembler::LSR,
11621               $src3$$constant & 0x3f);
11622   %}
11623 
11624   ins_pipe(ialu_reg_reg_shift);
11625 %}
11626 
11627 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11628                          iRegIorL2I src1, iRegIorL2I src2,
11629                          immI src3, rFlagsReg cr) %{
11630   match(Set dst (OrI src1 (RShiftI src2 src3)));
11631 
11632   ins_cost(1.9 * INSN_COST);
11633   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11634 
11635   ins_encode %{
11636     __ orrw(as_Register($dst$$reg),
11637               as_Register($src1$$reg),
11638               as_Register($src2$$reg),
11639               Assembler::ASR,
11640               $src3$$constant & 0x1f);
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_shift);
11644 %}
11645 
11646 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11647                          iRegL src1, iRegL src2,
11648                          immI src3, rFlagsReg cr) %{
11649   match(Set dst (OrL src1 (RShiftL src2 src3)));
11650 
11651   ins_cost(1.9 * INSN_COST);
11652   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11653 
11654   ins_encode %{
11655     __ orr(as_Register($dst$$reg),
11656               as_Register($src1$$reg),
11657               as_Register($src2$$reg),
11658               Assembler::ASR,
11659               $src3$$constant & 0x3f);
11660   %}
11661 
11662   ins_pipe(ialu_reg_reg_shift);
11663 %}
11664 
11665 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11666                          iRegIorL2I src1, iRegIorL2I src2,
11667                          immI src3, rFlagsReg cr) %{
11668   match(Set dst (OrI src1 (LShiftI src2 src3)));
11669 
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11672 
11673   ins_encode %{
11674     __ orrw(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::LSL,
11678               $src3$$constant & 0x1f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11685                          iRegL src1, iRegL src2,
11686                          immI src3, rFlagsReg cr) %{
11687   match(Set dst (OrL src1 (LShiftL src2 src3)));
11688 
11689   ins_cost(1.9 * INSN_COST);
11690   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11691 
11692   ins_encode %{
11693     __ orr(as_Register($dst$$reg),
11694               as_Register($src1$$reg),
11695               as_Register($src2$$reg),
11696               Assembler::LSL,
11697               $src3$$constant & 0x3f);
11698   %}
11699 
11700   ins_pipe(ialu_reg_reg_shift);
11701 %}
11702 
11703 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11704                          iRegIorL2I src1, iRegIorL2I src2,
11705                          immI src3, rFlagsReg cr) %{
11706   match(Set dst (AddI src1 (URShiftI src2 src3)));
11707 
11708   ins_cost(1.9 * INSN_COST);
11709   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11710 
11711   ins_encode %{
11712     __ addw(as_Register($dst$$reg),
11713               as_Register($src1$$reg),
11714               as_Register($src2$$reg),
11715               Assembler::LSR,
11716               $src3$$constant & 0x1f);
11717   %}
11718 
11719   ins_pipe(ialu_reg_reg_shift);
11720 %}
11721 
11722 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11723                          iRegL src1, iRegL src2,
11724                          immI src3, rFlagsReg cr) %{
11725   match(Set dst (AddL src1 (URShiftL src2 src3)));
11726 
11727   ins_cost(1.9 * INSN_COST);
11728   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11729 
11730   ins_encode %{
11731     __ add(as_Register($dst$$reg),
11732               as_Register($src1$$reg),
11733               as_Register($src2$$reg),
11734               Assembler::LSR,
11735               $src3$$constant & 0x3f);
11736   %}
11737 
11738   ins_pipe(ialu_reg_reg_shift);
11739 %}
11740 
11741 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11742                          iRegIorL2I src1, iRegIorL2I src2,
11743                          immI src3, rFlagsReg cr) %{
11744   match(Set dst (AddI src1 (RShiftI src2 src3)));
11745 
11746   ins_cost(1.9 * INSN_COST);
11747   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11748 
11749   ins_encode %{
11750     __ addw(as_Register($dst$$reg),
11751               as_Register($src1$$reg),
11752               as_Register($src2$$reg),
11753               Assembler::ASR,
11754               $src3$$constant & 0x1f);
11755   %}
11756 
11757   ins_pipe(ialu_reg_reg_shift);
11758 %}
11759 
11760 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11761                          iRegL src1, iRegL src2,
11762                          immI src3, rFlagsReg cr) %{
11763   match(Set dst (AddL src1 (RShiftL src2 src3)));
11764 
11765   ins_cost(1.9 * INSN_COST);
11766   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11767 
11768   ins_encode %{
11769     __ add(as_Register($dst$$reg),
11770               as_Register($src1$$reg),
11771               as_Register($src2$$reg),
11772               Assembler::ASR,
11773               $src3$$constant & 0x3f);
11774   %}
11775 
11776   ins_pipe(ialu_reg_reg_shift);
11777 %}
11778 
11779 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11780                          iRegIorL2I src1, iRegIorL2I src2,
11781                          immI src3, rFlagsReg cr) %{
11782   match(Set dst (AddI src1 (LShiftI src2 src3)));
11783 
11784   ins_cost(1.9 * INSN_COST);
11785   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11786 
11787   ins_encode %{
11788     __ addw(as_Register($dst$$reg),
11789               as_Register($src1$$reg),
11790               as_Register($src2$$reg),
11791               Assembler::LSL,
11792               $src3$$constant & 0x1f);
11793   %}
11794 
11795   ins_pipe(ialu_reg_reg_shift);
11796 %}
11797 
11798 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11799                          iRegL src1, iRegL src2,
11800                          immI src3, rFlagsReg cr) %{
11801   match(Set dst (AddL src1 (LShiftL src2 src3)));
11802 
11803   ins_cost(1.9 * INSN_COST);
11804   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11805 
11806   ins_encode %{
11807     __ add(as_Register($dst$$reg),
11808               as_Register($src1$$reg),
11809               as_Register($src2$$reg),
11810               Assembler::LSL,
11811               $src3$$constant & 0x3f);
11812   %}
11813 
11814   ins_pipe(ialu_reg_reg_shift);
11815 %}
11816 
11817 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11818                          iRegIorL2I src1, iRegIorL2I src2,
11819                          immI src3, rFlagsReg cr) %{
11820   match(Set dst (SubI src1 (URShiftI src2 src3)));
11821 
11822   ins_cost(1.9 * INSN_COST);
11823   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11824 
11825   ins_encode %{
11826     __ subw(as_Register($dst$$reg),
11827               as_Register($src1$$reg),
11828               as_Register($src2$$reg),
11829               Assembler::LSR,
11830               $src3$$constant & 0x1f);
11831   %}
11832 
11833   ins_pipe(ialu_reg_reg_shift);
11834 %}
11835 
11836 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11837                          iRegL src1, iRegL src2,
11838                          immI src3, rFlagsReg cr) %{
11839   match(Set dst (SubL src1 (URShiftL src2 src3)));
11840 
11841   ins_cost(1.9 * INSN_COST);
11842   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11843 
11844   ins_encode %{
11845     __ sub(as_Register($dst$$reg),
11846               as_Register($src1$$reg),
11847               as_Register($src2$$reg),
11848               Assembler::LSR,
11849               $src3$$constant & 0x3f);
11850   %}
11851 
11852   ins_pipe(ialu_reg_reg_shift);
11853 %}
11854 
11855 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11856                          iRegIorL2I src1, iRegIorL2I src2,
11857                          immI src3, rFlagsReg cr) %{
11858   match(Set dst (SubI src1 (RShiftI src2 src3)));
11859 
11860   ins_cost(1.9 * INSN_COST);
11861   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11862 
11863   ins_encode %{
11864     __ subw(as_Register($dst$$reg),
11865               as_Register($src1$$reg),
11866               as_Register($src2$$reg),
11867               Assembler::ASR,
11868               $src3$$constant & 0x1f);
11869   %}
11870 
11871   ins_pipe(ialu_reg_reg_shift);
11872 %}
11873 
11874 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11875                          iRegL src1, iRegL src2,
11876                          immI src3, rFlagsReg cr) %{
11877   match(Set dst (SubL src1 (RShiftL src2 src3)));
11878 
11879   ins_cost(1.9 * INSN_COST);
11880   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11881 
11882   ins_encode %{
11883     __ sub(as_Register($dst$$reg),
11884               as_Register($src1$$reg),
11885               as_Register($src2$$reg),
11886               Assembler::ASR,
11887               $src3$$constant & 0x3f);
11888   %}
11889 
11890   ins_pipe(ialu_reg_reg_shift);
11891 %}
11892 
11893 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11894                          iRegIorL2I src1, iRegIorL2I src2,
11895                          immI src3, rFlagsReg cr) %{
11896   match(Set dst (SubI src1 (LShiftI src2 src3)));
11897 
11898   ins_cost(1.9 * INSN_COST);
11899   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11900 
11901   ins_encode %{
11902     __ subw(as_Register($dst$$reg),
11903               as_Register($src1$$reg),
11904               as_Register($src2$$reg),
11905               Assembler::LSL,
11906               $src3$$constant & 0x1f);
11907   %}
11908 
11909   ins_pipe(ialu_reg_reg_shift);
11910 %}
11911 
11912 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11913                          iRegL src1, iRegL src2,
11914                          immI src3, rFlagsReg cr) %{
11915   match(Set dst (SubL src1 (LShiftL src2 src3)));
11916 
11917   ins_cost(1.9 * INSN_COST);
11918   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11919 
11920   ins_encode %{
11921     __ sub(as_Register($dst$$reg),
11922               as_Register($src1$$reg),
11923               as_Register($src2$$reg),
11924               Assembler::LSL,
11925               $src3$$constant & 0x3f);
11926   %}
11927 
11928   ins_pipe(ialu_reg_reg_shift);
11929 %}
11930 
11931 
11932 
11933 // Shift Left followed by Shift Right.
11934 // This idiom is used by the compiler for the i2b bytecode etc.
11935 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11936 %{
11937   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11938   ins_cost(INSN_COST * 2);
11939   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11940   ins_encode %{
11941     int lshift = $lshift_count$$constant & 63;
11942     int rshift = $rshift_count$$constant & 63;
11943     int s = 63 - lshift;
11944     int r = (rshift - lshift) & 63;
11945     __ sbfm(as_Register($dst$$reg),
11946             as_Register($src$$reg),
11947             r, s);
11948   %}
11949 
11950   ins_pipe(ialu_reg_shift);
11951 %}
11952 
11953 // Shift Left followed by Shift Right.
11954 // This idiom is used by the compiler for the i2b bytecode etc.
11955 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11956 %{
11957   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11958   ins_cost(INSN_COST * 2);
11959   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11960   ins_encode %{
11961     int lshift = $lshift_count$$constant & 31;
11962     int rshift = $rshift_count$$constant & 31;
11963     int s = 31 - lshift;
11964     int r = (rshift - lshift) & 31;
11965     __ sbfmw(as_Register($dst$$reg),
11966             as_Register($src$$reg),
11967             r, s);
11968   %}
11969 
11970   ins_pipe(ialu_reg_shift);
11971 %}
11972 
11973 // Shift Left followed by Shift Right.
11974 // This idiom is used by the compiler for the i2b bytecode etc.
11975 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11976 %{
11977   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11978   ins_cost(INSN_COST * 2);
11979   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11980   ins_encode %{
11981     int lshift = $lshift_count$$constant & 63;
11982     int rshift = $rshift_count$$constant & 63;
11983     int s = 63 - lshift;
11984     int r = (rshift - lshift) & 63;
11985     __ ubfm(as_Register($dst$$reg),
11986             as_Register($src$$reg),
11987             r, s);
11988   %}
11989 
11990   ins_pipe(ialu_reg_shift);
11991 %}
11992 
11993 // Shift Left followed by Shift Right.
11994 // This idiom is used by the compiler for the i2b bytecode etc.
11995 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11996 %{
11997   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11998   ins_cost(INSN_COST * 2);
11999   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12000   ins_encode %{
12001     int lshift = $lshift_count$$constant & 31;
12002     int rshift = $rshift_count$$constant & 31;
12003     int s = 31 - lshift;
12004     int r = (rshift - lshift) & 31;
12005     __ ubfmw(as_Register($dst$$reg),
12006             as_Register($src$$reg),
12007             r, s);
12008   %}
12009 
12010   ins_pipe(ialu_reg_shift);
12011 %}
12012 // Bitfield extract with shift & mask
12013 
12014 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12015 %{
12016   match(Set dst (AndI (URShiftI src rshift) mask));
12017   // Make sure we are not going to exceed what ubfxw can do.
12018   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12019 
12020   ins_cost(INSN_COST);
12021   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12022   ins_encode %{
12023     int rshift = $rshift$$constant & 31;
12024     long mask = $mask$$constant;
12025     int width = exact_log2(mask+1);
12026     __ ubfxw(as_Register($dst$$reg),
12027             as_Register($src$$reg), rshift, width);
12028   %}
12029   ins_pipe(ialu_reg_shift);
12030 %}
12031 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12032 %{
12033   match(Set dst (AndL (URShiftL src rshift) mask));
12034   // Make sure we are not going to exceed what ubfx can do.
12035   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12036 
12037   ins_cost(INSN_COST);
12038   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12039   ins_encode %{
12040     int rshift = $rshift$$constant & 63;
12041     long mask = $mask$$constant;
12042     int width = exact_log2_long(mask+1);
12043     __ ubfx(as_Register($dst$$reg),
12044             as_Register($src$$reg), rshift, width);
12045   %}
12046   ins_pipe(ialu_reg_shift);
12047 %}
12048 
12049 // We can use ubfx when extending an And with a mask when we know mask
12050 // is positive.  We know that because immI_bitmask guarantees it.
12051 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12052 %{
12053   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12054   // Make sure we are not going to exceed what ubfxw can do.
12055   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12056 
12057   ins_cost(INSN_COST * 2);
12058   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12059   ins_encode %{
12060     int rshift = $rshift$$constant & 31;
12061     long mask = $mask$$constant;
12062     int width = exact_log2(mask+1);
12063     __ ubfx(as_Register($dst$$reg),
12064             as_Register($src$$reg), rshift, width);
12065   %}
12066   ins_pipe(ialu_reg_shift);
12067 %}
12068 
12069 // We can use ubfiz when masking by a positive number and then left shifting the result.
12070 // We know that the mask is positive because immI_bitmask guarantees it.
12071 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12072 %{
12073   match(Set dst (LShiftI (AndI src mask) lshift));
12074   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12075 
12076   ins_cost(INSN_COST);
12077   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12078   ins_encode %{
12079     int lshift = $lshift$$constant & 31;
12080     long mask = $mask$$constant;
12081     int width = exact_log2(mask+1);
12082     __ ubfizw(as_Register($dst$$reg),
12083           as_Register($src$$reg), lshift, width);
12084   %}
12085   ins_pipe(ialu_reg_shift);
12086 %}
12087 // We can use ubfiz when masking by a positive number and then left shifting the result.
12088 // We know that the mask is positive because immL_bitmask guarantees it.
12089 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12090 %{
12091   match(Set dst (LShiftL (AndL src mask) lshift));
12092   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12093 
12094   ins_cost(INSN_COST);
12095   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12096   ins_encode %{
12097     int lshift = $lshift$$constant & 63;
12098     long mask = $mask$$constant;
12099     int width = exact_log2_long(mask+1);
12100     __ ubfiz(as_Register($dst$$reg),
12101           as_Register($src$$reg), lshift, width);
12102   %}
12103   ins_pipe(ialu_reg_shift);
12104 %}
12105 
12106 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12107 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12108 %{
12109   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12110   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12111 
12112   ins_cost(INSN_COST);
12113   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12114   ins_encode %{
12115     int lshift = $lshift$$constant & 63;
12116     long mask = $mask$$constant;
12117     int width = exact_log2(mask+1);
12118     __ ubfiz(as_Register($dst$$reg),
12119              as_Register($src$$reg), lshift, width);
12120   %}
12121   ins_pipe(ialu_reg_shift);
12122 %}
12123 
12124 // Rotations
12125 
12126 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12127 %{
12128   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12129   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12130 
12131   ins_cost(INSN_COST);
12132   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12133 
12134   ins_encode %{
12135     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12136             $rshift$$constant & 63);
12137   %}
12138   ins_pipe(ialu_reg_reg_extr);
12139 %}
12140 
12141 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12142 %{
12143   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12144   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12145 
12146   ins_cost(INSN_COST);
12147   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12148 
12149   ins_encode %{
12150     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12151             $rshift$$constant & 31);
12152   %}
12153   ins_pipe(ialu_reg_reg_extr);
12154 %}
12155 
12156 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12157 %{
12158   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12159   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12160 
12161   ins_cost(INSN_COST);
12162   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12163 
12164   ins_encode %{
12165     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12166             $rshift$$constant & 63);
12167   %}
12168   ins_pipe(ialu_reg_reg_extr);
12169 %}
12170 
12171 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12172 %{
12173   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12174   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12175 
12176   ins_cost(INSN_COST);
12177   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12178 
12179   ins_encode %{
12180     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12181             $rshift$$constant & 31);
12182   %}
12183   ins_pipe(ialu_reg_reg_extr);
12184 %}
12185 
12186 
12187 // rol expander
12188 
12189 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12190 %{
12191   effect(DEF dst, USE src, USE shift);
12192 
12193   format %{ "rol    $dst, $src, $shift" %}
12194   ins_cost(INSN_COST * 3);
12195   ins_encode %{
12196     __ subw(rscratch1, zr, as_Register($shift$$reg));
12197     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12198             rscratch1);
12199     %}
12200   ins_pipe(ialu_reg_reg_vshift);
12201 %}
12202 
12203 // rol expander
12204 
12205 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12206 %{
12207   effect(DEF dst, USE src, USE shift);
12208 
12209   format %{ "rol    $dst, $src, $shift" %}
12210   ins_cost(INSN_COST * 3);
12211   ins_encode %{
12212     __ subw(rscratch1, zr, as_Register($shift$$reg));
12213     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12214             rscratch1);
12215     %}
12216   ins_pipe(ialu_reg_reg_vshift);
12217 %}
12218 
12219 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12220 %{
12221   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12222 
12223   expand %{
12224     rolL_rReg(dst, src, shift, cr);
12225   %}
12226 %}
12227 
12228 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12229 %{
12230   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12231 
12232   expand %{
12233     rolL_rReg(dst, src, shift, cr);
12234   %}
12235 %}
12236 
12237 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12238 %{
12239   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12240 
12241   expand %{
12242     rolI_rReg(dst, src, shift, cr);
12243   %}
12244 %}
12245 
12246 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12247 %{
12248   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12249 
12250   expand %{
12251     rolI_rReg(dst, src, shift, cr);
12252   %}
12253 %}
12254 
12255 // ror expander
12256 
12257 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12258 %{
12259   effect(DEF dst, USE src, USE shift);
12260 
12261   format %{ "ror    $dst, $src, $shift" %}
12262   ins_cost(INSN_COST);
12263   ins_encode %{
12264     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12265             as_Register($shift$$reg));
12266     %}
12267   ins_pipe(ialu_reg_reg_vshift);
12268 %}
12269 
12270 // ror expander
12271 
12272 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12273 %{
12274   effect(DEF dst, USE src, USE shift);
12275 
12276   format %{ "ror    $dst, $src, $shift" %}
12277   ins_cost(INSN_COST);
12278   ins_encode %{
12279     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12280             as_Register($shift$$reg));
12281     %}
12282   ins_pipe(ialu_reg_reg_vshift);
12283 %}
12284 
12285 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12286 %{
12287   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12288 
12289   expand %{
12290     rorL_rReg(dst, src, shift, cr);
12291   %}
12292 %}
12293 
12294 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12295 %{
12296   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12297 
12298   expand %{
12299     rorL_rReg(dst, src, shift, cr);
12300   %}
12301 %}
12302 
12303 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12304 %{
12305   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12306 
12307   expand %{
12308     rorI_rReg(dst, src, shift, cr);
12309   %}
12310 %}
12311 
12312 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12313 %{
12314   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12315 
12316   expand %{
12317     rorI_rReg(dst, src, shift, cr);
12318   %}
12319 %}
12320 
12321 // Add/subtract (extended)
12322 
12323 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12324 %{
12325   match(Set dst (AddL src1 (ConvI2L src2)));
12326   ins_cost(INSN_COST);
12327   format %{ "add  $dst, $src1, $src2, sxtw" %}
12328 
12329    ins_encode %{
12330      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12331             as_Register($src2$$reg), ext::sxtw);
12332    %}
12333   ins_pipe(ialu_reg_reg);
12334 %};
12335 
12336 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12337 %{
12338   match(Set dst (SubL src1 (ConvI2L src2)));
12339   ins_cost(INSN_COST);
12340   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12341 
12342    ins_encode %{
12343      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12344             as_Register($src2$$reg), ext::sxtw);
12345    %}
12346   ins_pipe(ialu_reg_reg);
12347 %};
12348 
12349 
12350 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12351 %{
12352   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12353   ins_cost(INSN_COST);
12354   format %{ "add  $dst, $src1, $src2, sxth" %}
12355 
12356    ins_encode %{
12357      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12358             as_Register($src2$$reg), ext::sxth);
12359    %}
12360   ins_pipe(ialu_reg_reg);
12361 %}
12362 
12363 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12364 %{
12365   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12366   ins_cost(INSN_COST);
12367   format %{ "add  $dst, $src1, $src2, sxtb" %}
12368 
12369    ins_encode %{
12370      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12371             as_Register($src2$$reg), ext::sxtb);
12372    %}
12373   ins_pipe(ialu_reg_reg);
12374 %}
12375 
12376 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12377 %{
12378   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12379   ins_cost(INSN_COST);
12380   format %{ "add  $dst, $src1, $src2, uxtb" %}
12381 
12382    ins_encode %{
12383      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12384             as_Register($src2$$reg), ext::uxtb);
12385    %}
12386   ins_pipe(ialu_reg_reg);
12387 %}
12388 
12389 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12390 %{
12391   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12392   ins_cost(INSN_COST);
12393   format %{ "add  $dst, $src1, $src2, sxth" %}
12394 
12395    ins_encode %{
12396      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12397             as_Register($src2$$reg), ext::sxth);
12398    %}
12399   ins_pipe(ialu_reg_reg);
12400 %}
12401 
12402 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12403 %{
12404   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12405   ins_cost(INSN_COST);
12406   format %{ "add  $dst, $src1, $src2, sxtw" %}
12407 
12408    ins_encode %{
12409      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12410             as_Register($src2$$reg), ext::sxtw);
12411    %}
12412   ins_pipe(ialu_reg_reg);
12413 %}
12414 
12415 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12416 %{
12417   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12418   ins_cost(INSN_COST);
12419   format %{ "add  $dst, $src1, $src2, sxtb" %}
12420 
12421    ins_encode %{
12422      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12423             as_Register($src2$$reg), ext::sxtb);
12424    %}
12425   ins_pipe(ialu_reg_reg);
12426 %}
12427 
12428 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12429 %{
12430   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12431   ins_cost(INSN_COST);
12432   format %{ "add  $dst, $src1, $src2, uxtb" %}
12433 
12434    ins_encode %{
12435      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12436             as_Register($src2$$reg), ext::uxtb);
12437    %}
12438   ins_pipe(ialu_reg_reg);
12439 %}
12440 
12441 
12442 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12443 %{
12444   match(Set dst (AddI src1 (AndI src2 mask)));
12445   ins_cost(INSN_COST);
12446   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12447 
12448    ins_encode %{
12449      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12450             as_Register($src2$$reg), ext::uxtb);
12451    %}
12452   ins_pipe(ialu_reg_reg);
12453 %}
12454 
12455 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12456 %{
12457   match(Set dst (AddI src1 (AndI src2 mask)));
12458   ins_cost(INSN_COST);
12459   format %{ "addw  $dst, $src1, $src2, uxth" %}
12460 
12461    ins_encode %{
12462      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12463             as_Register($src2$$reg), ext::uxth);
12464    %}
12465   ins_pipe(ialu_reg_reg);
12466 %}
12467 
12468 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12469 %{
12470   match(Set dst (AddL src1 (AndL src2 mask)));
12471   ins_cost(INSN_COST);
12472   format %{ "add  $dst, $src1, $src2, uxtb" %}
12473 
12474    ins_encode %{
12475      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12476             as_Register($src2$$reg), ext::uxtb);
12477    %}
12478   ins_pipe(ialu_reg_reg);
12479 %}
12480 
12481 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12482 %{
12483   match(Set dst (AddL src1 (AndL src2 mask)));
12484   ins_cost(INSN_COST);
12485   format %{ "add  $dst, $src1, $src2, uxth" %}
12486 
12487    ins_encode %{
12488      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12489             as_Register($src2$$reg), ext::uxth);
12490    %}
12491   ins_pipe(ialu_reg_reg);
12492 %}
12493 
12494 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12495 %{
12496   match(Set dst (AddL src1 (AndL src2 mask)));
12497   ins_cost(INSN_COST);
12498   format %{ "add  $dst, $src1, $src2, uxtw" %}
12499 
12500    ins_encode %{
12501      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12502             as_Register($src2$$reg), ext::uxtw);
12503    %}
12504   ins_pipe(ialu_reg_reg);
12505 %}
12506 
12507 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12508 %{
12509   match(Set dst (SubI src1 (AndI src2 mask)));
12510   ins_cost(INSN_COST);
12511   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12512 
12513    ins_encode %{
12514      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12515             as_Register($src2$$reg), ext::uxtb);
12516    %}
12517   ins_pipe(ialu_reg_reg);
12518 %}
12519 
12520 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12521 %{
12522   match(Set dst (SubI src1 (AndI src2 mask)));
12523   ins_cost(INSN_COST);
12524   format %{ "subw  $dst, $src1, $src2, uxth" %}
12525 
12526    ins_encode %{
12527      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12528             as_Register($src2$$reg), ext::uxth);
12529    %}
12530   ins_pipe(ialu_reg_reg);
12531 %}
12532 
12533 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12534 %{
12535   match(Set dst (SubL src1 (AndL src2 mask)));
12536   ins_cost(INSN_COST);
12537   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12538 
12539    ins_encode %{
12540      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12541             as_Register($src2$$reg), ext::uxtb);
12542    %}
12543   ins_pipe(ialu_reg_reg);
12544 %}
12545 
12546 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12547 %{
12548   match(Set dst (SubL src1 (AndL src2 mask)));
12549   ins_cost(INSN_COST);
12550   format %{ "sub  $dst, $src1, $src2, uxth" %}
12551 
12552    ins_encode %{
12553      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12554             as_Register($src2$$reg), ext::uxth);
12555    %}
12556   ins_pipe(ialu_reg_reg);
12557 %}
12558 
12559 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12560 %{
12561   match(Set dst (SubL src1 (AndL src2 mask)));
12562   ins_cost(INSN_COST);
12563   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12564 
12565    ins_encode %{
12566      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12567             as_Register($src2$$reg), ext::uxtw);
12568    %}
12569   ins_pipe(ialu_reg_reg);
12570 %}
12571 
12572 
12573 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12574 %{
12575   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12576   ins_cost(1.9 * INSN_COST);
12577   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12578 
12579    ins_encode %{
12580      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12581             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12582    %}
12583   ins_pipe(ialu_reg_reg_shift);
12584 %}
12585 
12586 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12587 %{
12588   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12589   ins_cost(1.9 * INSN_COST);
12590   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12591 
12592    ins_encode %{
12593      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12594             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12595    %}
12596   ins_pipe(ialu_reg_reg_shift);
12597 %}
12598 
12599 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12600 %{
12601   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12602   ins_cost(1.9 * INSN_COST);
12603   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12604 
12605    ins_encode %{
12606      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12607             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12608    %}
12609   ins_pipe(ialu_reg_reg_shift);
12610 %}
12611 
12612 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12613 %{
12614   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12615   ins_cost(1.9 * INSN_COST);
12616   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12617 
12618    ins_encode %{
12619      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12620             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12621    %}
12622   ins_pipe(ialu_reg_reg_shift);
12623 %}
12624 
12625 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12626 %{
12627   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12628   ins_cost(1.9 * INSN_COST);
12629   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12630 
12631    ins_encode %{
12632      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12633             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12634    %}
12635   ins_pipe(ialu_reg_reg_shift);
12636 %}
12637 
12638 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12639 %{
12640   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12641   ins_cost(1.9 * INSN_COST);
12642   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12643 
12644    ins_encode %{
12645      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12646             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12647    %}
12648   ins_pipe(ialu_reg_reg_shift);
12649 %}
12650 
12651 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12652 %{
12653   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12654   ins_cost(1.9 * INSN_COST);
12655   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12656 
12657    ins_encode %{
12658      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12659             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12660    %}
12661   ins_pipe(ialu_reg_reg_shift);
12662 %}
12663 
12664 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12665 %{
12666   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12667   ins_cost(1.9 * INSN_COST);
12668   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12669 
12670    ins_encode %{
12671      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12672             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12673    %}
12674   ins_pipe(ialu_reg_reg_shift);
12675 %}
12676 
12677 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12678 %{
12679   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12680   ins_cost(1.9 * INSN_COST);
12681   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12682 
12683    ins_encode %{
12684      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12685             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12686    %}
12687   ins_pipe(ialu_reg_reg_shift);
12688 %}
12689 
12690 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12691 %{
12692   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12693   ins_cost(1.9 * INSN_COST);
12694   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12695 
12696    ins_encode %{
12697      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12698             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12699    %}
12700   ins_pipe(ialu_reg_reg_shift);
12701 %}
12702 
12703 
12704 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12705 %{
12706   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12707   ins_cost(1.9 * INSN_COST);
12708   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12709 
12710    ins_encode %{
12711      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12712             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12713    %}
12714   ins_pipe(ialu_reg_reg_shift);
12715 %};
12716 
12717 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12718 %{
12719   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12720   ins_cost(1.9 * INSN_COST);
12721   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12722 
12723    ins_encode %{
12724      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12725             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12726    %}
12727   ins_pipe(ialu_reg_reg_shift);
12728 %};
12729 
12730 
12731 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12732 %{
12733   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12734   ins_cost(1.9 * INSN_COST);
12735   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12736 
12737    ins_encode %{
12738      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12739             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12740    %}
12741   ins_pipe(ialu_reg_reg_shift);
12742 %}
12743 
12744 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12745 %{
12746   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12747   ins_cost(1.9 * INSN_COST);
12748   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12749 
12750    ins_encode %{
12751      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12752             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12753    %}
12754   ins_pipe(ialu_reg_reg_shift);
12755 %}
12756 
12757 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12758 %{
12759   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12760   ins_cost(1.9 * INSN_COST);
12761   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12762 
12763    ins_encode %{
12764      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12765             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12766    %}
12767   ins_pipe(ialu_reg_reg_shift);
12768 %}
12769 
12770 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12771 %{
12772   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12773   ins_cost(1.9 * INSN_COST);
12774   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12775 
12776    ins_encode %{
12777      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12778             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12779    %}
12780   ins_pipe(ialu_reg_reg_shift);
12781 %}
12782 
12783 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12784 %{
12785   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12786   ins_cost(1.9 * INSN_COST);
12787   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12788 
12789    ins_encode %{
12790      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12791             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12792    %}
12793   ins_pipe(ialu_reg_reg_shift);
12794 %}
12795 
12796 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12797 %{
12798   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12799   ins_cost(1.9 * INSN_COST);
12800   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12801 
12802    ins_encode %{
12803      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12804             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12805    %}
12806   ins_pipe(ialu_reg_reg_shift);
12807 %}
12808 
12809 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12810 %{
12811   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12812   ins_cost(1.9 * INSN_COST);
12813   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12814 
12815    ins_encode %{
12816      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12817             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12818    %}
12819   ins_pipe(ialu_reg_reg_shift);
12820 %}
12821 
12822 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12823 %{
12824   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12825   ins_cost(1.9 * INSN_COST);
12826   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12827 
12828    ins_encode %{
12829      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12830             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12831    %}
12832   ins_pipe(ialu_reg_reg_shift);
12833 %}
12834 
12835 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12836 %{
12837   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12838   ins_cost(1.9 * INSN_COST);
12839   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12840 
12841    ins_encode %{
12842      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12843             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12844    %}
12845   ins_pipe(ialu_reg_reg_shift);
12846 %}
12847 
12848 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12849 %{
12850   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12851   ins_cost(1.9 * INSN_COST);
12852   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12853 
12854    ins_encode %{
12855      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12856             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12857    %}
12858   ins_pipe(ialu_reg_reg_shift);
12859 %}
12860 // END This section of the file is automatically generated. Do not edit --------------
12861 
12862 // ============================================================================
12863 // Floating Point Arithmetic Instructions
12864 
12865 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12866   match(Set dst (AddF src1 src2));
12867 
12868   ins_cost(INSN_COST * 5);
12869   format %{ "fadds   $dst, $src1, $src2" %}
12870 
12871   ins_encode %{
12872     __ fadds(as_FloatRegister($dst$$reg),
12873              as_FloatRegister($src1$$reg),
12874              as_FloatRegister($src2$$reg));
12875   %}
12876 
12877   ins_pipe(fp_dop_reg_reg_s);
12878 %}
12879 
12880 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12881   match(Set dst (AddD src1 src2));
12882 
12883   ins_cost(INSN_COST * 5);
12884   format %{ "faddd   $dst, $src1, $src2" %}
12885 
12886   ins_encode %{
12887     __ faddd(as_FloatRegister($dst$$reg),
12888              as_FloatRegister($src1$$reg),
12889              as_FloatRegister($src2$$reg));
12890   %}
12891 
12892   ins_pipe(fp_dop_reg_reg_d);
12893 %}
12894 
12895 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12896   match(Set dst (SubF src1 src2));
12897 
12898   ins_cost(INSN_COST * 5);
12899   format %{ "fsubs   $dst, $src1, $src2" %}
12900 
12901   ins_encode %{
12902     __ fsubs(as_FloatRegister($dst$$reg),
12903              as_FloatRegister($src1$$reg),
12904              as_FloatRegister($src2$$reg));
12905   %}
12906 
12907   ins_pipe(fp_dop_reg_reg_s);
12908 %}
12909 
12910 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12911   match(Set dst (SubD src1 src2));
12912 
12913   ins_cost(INSN_COST * 5);
12914   format %{ "fsubd   $dst, $src1, $src2" %}
12915 
12916   ins_encode %{
12917     __ fsubd(as_FloatRegister($dst$$reg),
12918              as_FloatRegister($src1$$reg),
12919              as_FloatRegister($src2$$reg));
12920   %}
12921 
12922   ins_pipe(fp_dop_reg_reg_d);
12923 %}
12924 
12925 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12926   match(Set dst (MulF src1 src2));
12927 
12928   ins_cost(INSN_COST * 6);
12929   format %{ "fmuls   $dst, $src1, $src2" %}
12930 
12931   ins_encode %{
12932     __ fmuls(as_FloatRegister($dst$$reg),
12933              as_FloatRegister($src1$$reg),
12934              as_FloatRegister($src2$$reg));
12935   %}
12936 
12937   ins_pipe(fp_dop_reg_reg_s);
12938 %}
12939 
12940 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12941   match(Set dst (MulD src1 src2));
12942 
12943   ins_cost(INSN_COST * 6);
12944   format %{ "fmuld   $dst, $src1, $src2" %}
12945 
12946   ins_encode %{
12947     __ fmuld(as_FloatRegister($dst$$reg),
12948              as_FloatRegister($src1$$reg),
12949              as_FloatRegister($src2$$reg));
12950   %}
12951 
12952   ins_pipe(fp_dop_reg_reg_d);
12953 %}
12954 
12955 // src1 * src2 + src3
12956 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12957   predicate(UseFMA);
12958   match(Set dst (FmaF src3 (Binary src1 src2)));
12959 
12960   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12961 
12962   ins_encode %{
12963     __ fmadds(as_FloatRegister($dst$$reg),
12964              as_FloatRegister($src1$$reg),
12965              as_FloatRegister($src2$$reg),
12966              as_FloatRegister($src3$$reg));
12967   %}
12968 
12969   ins_pipe(pipe_class_default);
12970 %}
12971 
12972 // src1 * src2 + src3
12973 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12974   predicate(UseFMA);
12975   match(Set dst (FmaD src3 (Binary src1 src2)));
12976 
12977   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12978 
12979   ins_encode %{
12980     __ fmaddd(as_FloatRegister($dst$$reg),
12981              as_FloatRegister($src1$$reg),
12982              as_FloatRegister($src2$$reg),
12983              as_FloatRegister($src3$$reg));
12984   %}
12985 
12986   ins_pipe(pipe_class_default);
12987 %}
12988 
12989 // -src1 * src2 + src3
12990 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12991   predicate(UseFMA);
12992   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12993   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12994 
12995   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12996 
12997   ins_encode %{
12998     __ fmsubs(as_FloatRegister($dst$$reg),
12999               as_FloatRegister($src1$$reg),
13000               as_FloatRegister($src2$$reg),
13001               as_FloatRegister($src3$$reg));
13002   %}
13003 
13004   ins_pipe(pipe_class_default);
13005 %}
13006 
13007 // -src1 * src2 + src3
13008 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13009   predicate(UseFMA);
13010   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13011   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13012 
13013   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13014 
13015   ins_encode %{
13016     __ fmsubd(as_FloatRegister($dst$$reg),
13017               as_FloatRegister($src1$$reg),
13018               as_FloatRegister($src2$$reg),
13019               as_FloatRegister($src3$$reg));
13020   %}
13021 
13022   ins_pipe(pipe_class_default);
13023 %}
13024 
13025 // -src1 * src2 - src3
13026 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13027   predicate(UseFMA);
13028   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13029   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13030 
13031   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13032 
13033   ins_encode %{
13034     __ fnmadds(as_FloatRegister($dst$$reg),
13035                as_FloatRegister($src1$$reg),
13036                as_FloatRegister($src2$$reg),
13037                as_FloatRegister($src3$$reg));
13038   %}
13039 
13040   ins_pipe(pipe_class_default);
13041 %}
13042 
13043 // -src1 * src2 - src3
13044 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13045   predicate(UseFMA);
13046   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13047   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13048 
13049   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13050 
13051   ins_encode %{
13052     __ fnmaddd(as_FloatRegister($dst$$reg),
13053                as_FloatRegister($src1$$reg),
13054                as_FloatRegister($src2$$reg),
13055                as_FloatRegister($src3$$reg));
13056   %}
13057 
13058   ins_pipe(pipe_class_default);
13059 %}
13060 
13061 // src1 * src2 - src3
13062 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13063   predicate(UseFMA);
13064   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13065 
13066   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13067 
13068   ins_encode %{
13069     __ fnmsubs(as_FloatRegister($dst$$reg),
13070                as_FloatRegister($src1$$reg),
13071                as_FloatRegister($src2$$reg),
13072                as_FloatRegister($src3$$reg));
13073   %}
13074 
13075   ins_pipe(pipe_class_default);
13076 %}
13077 
13078 // src1 * src2 - src3
13079 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13080   predicate(UseFMA);
13081   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13082 
13083   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13084 
13085   ins_encode %{
13086   // n.b. insn name should be fnmsubd
13087     __ fnmsub(as_FloatRegister($dst$$reg),
13088               as_FloatRegister($src1$$reg),
13089               as_FloatRegister($src2$$reg),
13090               as_FloatRegister($src3$$reg));
13091   %}
13092 
13093   ins_pipe(pipe_class_default);
13094 %}
13095 
13096 
13097 // Math.max(FF)F
13098 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13099   match(Set dst (MaxF src1 src2));
13100 
13101   format %{ "fmaxs   $dst, $src1, $src2" %}
13102   ins_encode %{
13103     __ fmaxs(as_FloatRegister($dst$$reg),
13104              as_FloatRegister($src1$$reg),
13105              as_FloatRegister($src2$$reg));
13106   %}
13107 
13108   ins_pipe(fp_dop_reg_reg_s);
13109 %}
13110 
13111 // Math.min(FF)F
13112 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13113   match(Set dst (MinF src1 src2));
13114 
13115   format %{ "fmins   $dst, $src1, $src2" %}
13116   ins_encode %{
13117     __ fmins(as_FloatRegister($dst$$reg),
13118              as_FloatRegister($src1$$reg),
13119              as_FloatRegister($src2$$reg));
13120   %}
13121 
13122   ins_pipe(fp_dop_reg_reg_s);
13123 %}
13124 
13125 // Math.max(DD)D
13126 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13127   match(Set dst (MaxD src1 src2));
13128 
13129   format %{ "fmaxd   $dst, $src1, $src2" %}
13130   ins_encode %{
13131     __ fmaxd(as_FloatRegister($dst$$reg),
13132              as_FloatRegister($src1$$reg),
13133              as_FloatRegister($src2$$reg));
13134   %}
13135 
13136   ins_pipe(fp_dop_reg_reg_d);
13137 %}
13138 
13139 // Math.min(DD)D
13140 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13141   match(Set dst (MinD src1 src2));
13142 
13143   format %{ "fmind   $dst, $src1, $src2" %}
13144   ins_encode %{
13145     __ fmind(as_FloatRegister($dst$$reg),
13146              as_FloatRegister($src1$$reg),
13147              as_FloatRegister($src2$$reg));
13148   %}
13149 
13150   ins_pipe(fp_dop_reg_reg_d);
13151 %}
13152 
13153 
13154 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13155   match(Set dst (DivF src1  src2));
13156 
13157   ins_cost(INSN_COST * 18);
13158   format %{ "fdivs   $dst, $src1, $src2" %}
13159 
13160   ins_encode %{
13161     __ fdivs(as_FloatRegister($dst$$reg),
13162              as_FloatRegister($src1$$reg),
13163              as_FloatRegister($src2$$reg));
13164   %}
13165 
13166   ins_pipe(fp_div_s);
13167 %}
13168 
13169 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13170   match(Set dst (DivD src1  src2));
13171 
13172   ins_cost(INSN_COST * 32);
13173   format %{ "fdivd   $dst, $src1, $src2" %}
13174 
13175   ins_encode %{
13176     __ fdivd(as_FloatRegister($dst$$reg),
13177              as_FloatRegister($src1$$reg),
13178              as_FloatRegister($src2$$reg));
13179   %}
13180 
13181   ins_pipe(fp_div_d);
13182 %}
13183 
13184 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13185   match(Set dst (NegF src));
13186 
13187   ins_cost(INSN_COST * 3);
13188   format %{ "fneg   $dst, $src" %}
13189 
13190   ins_encode %{
13191     __ fnegs(as_FloatRegister($dst$$reg),
13192              as_FloatRegister($src$$reg));
13193   %}
13194 
13195   ins_pipe(fp_uop_s);
13196 %}
13197 
13198 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13199   match(Set dst (NegD src));
13200 
13201   ins_cost(INSN_COST * 3);
13202   format %{ "fnegd   $dst, $src" %}
13203 
13204   ins_encode %{
13205     __ fnegd(as_FloatRegister($dst$$reg),
13206              as_FloatRegister($src$$reg));
13207   %}
13208 
13209   ins_pipe(fp_uop_d);
13210 %}
13211 
13212 instruct absF_reg(vRegF dst, vRegF src) %{
13213   match(Set dst (AbsF src));
13214 
13215   ins_cost(INSN_COST * 3);
13216   format %{ "fabss   $dst, $src" %}
13217   ins_encode %{
13218     __ fabss(as_FloatRegister($dst$$reg),
13219              as_FloatRegister($src$$reg));
13220   %}
13221 
13222   ins_pipe(fp_uop_s);
13223 %}
13224 
13225 instruct absD_reg(vRegD dst, vRegD src) %{
13226   match(Set dst (AbsD src));
13227 
13228   ins_cost(INSN_COST * 3);
13229   format %{ "fabsd   $dst, $src" %}
13230   ins_encode %{
13231     __ fabsd(as_FloatRegister($dst$$reg),
13232              as_FloatRegister($src$$reg));
13233   %}
13234 
13235   ins_pipe(fp_uop_d);
13236 %}
13237 
13238 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13239   match(Set dst (SqrtD src));
13240 
13241   ins_cost(INSN_COST * 50);
13242   format %{ "fsqrtd  $dst, $src" %}
13243   ins_encode %{
13244     __ fsqrtd(as_FloatRegister($dst$$reg),
13245              as_FloatRegister($src$$reg));
13246   %}
13247 
13248   ins_pipe(fp_div_s);
13249 %}
13250 
13251 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13252   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13253 
13254   ins_cost(INSN_COST * 50);
13255   format %{ "fsqrts  $dst, $src" %}
13256   ins_encode %{
13257     __ fsqrts(as_FloatRegister($dst$$reg),
13258              as_FloatRegister($src$$reg));
13259   %}
13260 
13261   ins_pipe(fp_div_d);
13262 %}
13263 
13264 // Math.rint, floor, ceil
13265 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13266   match(Set dst (RoundDoubleMode src rmode));
13267   format %{ "frint  $dst, $src, $rmode" %}
13268   ins_encode %{
13269     switch ($rmode$$constant) {
13270       case RoundDoubleModeNode::rmode_rint:
13271         __ frintnd(as_FloatRegister($dst$$reg),
13272                    as_FloatRegister($src$$reg));
13273         break;
13274       case RoundDoubleModeNode::rmode_floor:
13275         __ frintmd(as_FloatRegister($dst$$reg),
13276                    as_FloatRegister($src$$reg));
13277         break;
13278       case RoundDoubleModeNode::rmode_ceil:
13279         __ frintpd(as_FloatRegister($dst$$reg),
13280                    as_FloatRegister($src$$reg));
13281         break;
13282     }
13283   %}
13284   ins_pipe(fp_uop_d);
13285 %}
13286 
13287 // ============================================================================
13288 // Logical Instructions
13289 
13290 // Integer Logical Instructions
13291 
13292 // And Instructions
13293 
13294 
13295 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13296   match(Set dst (AndI src1 src2));
13297 
13298   format %{ "andw  $dst, $src1, $src2\t# int" %}
13299 
13300   ins_cost(INSN_COST);
13301   ins_encode %{
13302     __ andw(as_Register($dst$$reg),
13303             as_Register($src1$$reg),
13304             as_Register($src2$$reg));
13305   %}
13306 
13307   ins_pipe(ialu_reg_reg);
13308 %}
13309 
13310 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13311   match(Set dst (AndI src1 src2));
13312 
13313   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13314 
13315   ins_cost(INSN_COST);
13316   ins_encode %{
13317     __ andw(as_Register($dst$$reg),
13318             as_Register($src1$$reg),
13319             (unsigned long)($src2$$constant));
13320   %}
13321 
13322   ins_pipe(ialu_reg_imm);
13323 %}
13324 
13325 // Or Instructions
13326 
13327 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13328   match(Set dst (OrI src1 src2));
13329 
13330   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13331 
13332   ins_cost(INSN_COST);
13333   ins_encode %{
13334     __ orrw(as_Register($dst$$reg),
13335             as_Register($src1$$reg),
13336             as_Register($src2$$reg));
13337   %}
13338 
13339   ins_pipe(ialu_reg_reg);
13340 %}
13341 
13342 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13343   match(Set dst (OrI src1 src2));
13344 
13345   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13346 
13347   ins_cost(INSN_COST);
13348   ins_encode %{
13349     __ orrw(as_Register($dst$$reg),
13350             as_Register($src1$$reg),
13351             (unsigned long)($src2$$constant));
13352   %}
13353 
13354   ins_pipe(ialu_reg_imm);
13355 %}
13356 
13357 // Xor Instructions
13358 
13359 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13360   match(Set dst (XorI src1 src2));
13361 
13362   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13363 
13364   ins_cost(INSN_COST);
13365   ins_encode %{
13366     __ eorw(as_Register($dst$$reg),
13367             as_Register($src1$$reg),
13368             as_Register($src2$$reg));
13369   %}
13370 
13371   ins_pipe(ialu_reg_reg);
13372 %}
13373 
13374 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13375   match(Set dst (XorI src1 src2));
13376 
13377   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13378 
13379   ins_cost(INSN_COST);
13380   ins_encode %{
13381     __ eorw(as_Register($dst$$reg),
13382             as_Register($src1$$reg),
13383             (unsigned long)($src2$$constant));
13384   %}
13385 
13386   ins_pipe(ialu_reg_imm);
13387 %}
13388 
13389 // Long Logical Instructions
13390 // TODO
13391 
13392 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13393   match(Set dst (AndL src1 src2));
13394 
13395   format %{ "and  $dst, $src1, $src2\t# int" %}
13396 
13397   ins_cost(INSN_COST);
13398   ins_encode %{
13399     __ andr(as_Register($dst$$reg),
13400             as_Register($src1$$reg),
13401             as_Register($src2$$reg));
13402   %}
13403 
13404   ins_pipe(ialu_reg_reg);
13405 %}
13406 
13407 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13408   match(Set dst (AndL src1 src2));
13409 
13410   format %{ "and  $dst, $src1, $src2\t# int" %}
13411 
13412   ins_cost(INSN_COST);
13413   ins_encode %{
13414     __ andr(as_Register($dst$$reg),
13415             as_Register($src1$$reg),
13416             (unsigned long)($src2$$constant));
13417   %}
13418 
13419   ins_pipe(ialu_reg_imm);
13420 %}
13421 
13422 // Or Instructions
13423 
13424 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13425   match(Set dst (OrL src1 src2));
13426 
13427   format %{ "orr  $dst, $src1, $src2\t# int" %}
13428 
13429   ins_cost(INSN_COST);
13430   ins_encode %{
13431     __ orr(as_Register($dst$$reg),
13432            as_Register($src1$$reg),
13433            as_Register($src2$$reg));
13434   %}
13435 
13436   ins_pipe(ialu_reg_reg);
13437 %}
13438 
13439 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13440   match(Set dst (OrL src1 src2));
13441 
13442   format %{ "orr  $dst, $src1, $src2\t# int" %}
13443 
13444   ins_cost(INSN_COST);
13445   ins_encode %{
13446     __ orr(as_Register($dst$$reg),
13447            as_Register($src1$$reg),
13448            (unsigned long)($src2$$constant));
13449   %}
13450 
13451   ins_pipe(ialu_reg_imm);
13452 %}
13453 
13454 // Xor Instructions
13455 
13456 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13457   match(Set dst (XorL src1 src2));
13458 
13459   format %{ "eor  $dst, $src1, $src2\t# int" %}
13460 
13461   ins_cost(INSN_COST);
13462   ins_encode %{
13463     __ eor(as_Register($dst$$reg),
13464            as_Register($src1$$reg),
13465            as_Register($src2$$reg));
13466   %}
13467 
13468   ins_pipe(ialu_reg_reg);
13469 %}
13470 
13471 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13472   match(Set dst (XorL src1 src2));
13473 
13474   ins_cost(INSN_COST);
13475   format %{ "eor  $dst, $src1, $src2\t# int" %}
13476 
13477   ins_encode %{
13478     __ eor(as_Register($dst$$reg),
13479            as_Register($src1$$reg),
13480            (unsigned long)($src2$$constant));
13481   %}
13482 
13483   ins_pipe(ialu_reg_imm);
13484 %}
13485 
13486 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13487 %{
13488   match(Set dst (ConvI2L src));
13489 
13490   ins_cost(INSN_COST);
13491   format %{ "sxtw  $dst, $src\t# i2l" %}
13492   ins_encode %{
13493     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13494   %}
13495   ins_pipe(ialu_reg_shift);
13496 %}
13497 
13498 // this pattern occurs in bigmath arithmetic
13499 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13500 %{
13501   match(Set dst (AndL (ConvI2L src) mask));
13502 
13503   ins_cost(INSN_COST);
13504   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13505   ins_encode %{
13506     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13507   %}
13508 
13509   ins_pipe(ialu_reg_shift);
13510 %}
13511 
13512 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13513   match(Set dst (ConvL2I src));
13514 
13515   ins_cost(INSN_COST);
13516   format %{ "movw  $dst, $src \t// l2i" %}
13517 
13518   ins_encode %{
13519     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13520   %}
13521 
13522   ins_pipe(ialu_reg);
13523 %}
13524 
13525 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13526 %{
13527   match(Set dst (Conv2B src));
13528   effect(KILL cr);
13529 
13530   format %{
13531     "cmpw $src, zr\n\t"
13532     "cset $dst, ne"
13533   %}
13534 
13535   ins_encode %{
13536     __ cmpw(as_Register($src$$reg), zr);
13537     __ cset(as_Register($dst$$reg), Assembler::NE);
13538   %}
13539 
13540   ins_pipe(ialu_reg);
13541 %}
13542 
13543 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13544 %{
13545   match(Set dst (Conv2B src));
13546   effect(KILL cr);
13547 
13548   format %{
13549     "cmp  $src, zr\n\t"
13550     "cset $dst, ne"
13551   %}
13552 
13553   ins_encode %{
13554     __ cmp(as_Register($src$$reg), zr);
13555     __ cset(as_Register($dst$$reg), Assembler::NE);
13556   %}
13557 
13558   ins_pipe(ialu_reg);
13559 %}
13560 
13561 instruct convD2F_reg(vRegF dst, vRegD src) %{
13562   match(Set dst (ConvD2F src));
13563 
13564   ins_cost(INSN_COST * 5);
13565   format %{ "fcvtd  $dst, $src \t// d2f" %}
13566 
13567   ins_encode %{
13568     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13569   %}
13570 
13571   ins_pipe(fp_d2f);
13572 %}
13573 
13574 instruct convF2D_reg(vRegD dst, vRegF src) %{
13575   match(Set dst (ConvF2D src));
13576 
13577   ins_cost(INSN_COST * 5);
13578   format %{ "fcvts  $dst, $src \t// f2d" %}
13579 
13580   ins_encode %{
13581     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13582   %}
13583 
13584   ins_pipe(fp_f2d);
13585 %}
13586 
13587 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13588   match(Set dst (ConvF2I src));
13589 
13590   ins_cost(INSN_COST * 5);
13591   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13592 
13593   ins_encode %{
13594     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13595   %}
13596 
13597   ins_pipe(fp_f2i);
13598 %}
13599 
13600 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13601   match(Set dst (ConvF2L src));
13602 
13603   ins_cost(INSN_COST * 5);
13604   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13605 
13606   ins_encode %{
13607     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13608   %}
13609 
13610   ins_pipe(fp_f2l);
13611 %}
13612 
13613 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13614   match(Set dst (ConvI2F src));
13615 
13616   ins_cost(INSN_COST * 5);
13617   format %{ "scvtfws  $dst, $src \t// i2f" %}
13618 
13619   ins_encode %{
13620     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13621   %}
13622 
13623   ins_pipe(fp_i2f);
13624 %}
13625 
13626 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13627   match(Set dst (ConvL2F src));
13628 
13629   ins_cost(INSN_COST * 5);
13630   format %{ "scvtfs  $dst, $src \t// l2f" %}
13631 
13632   ins_encode %{
13633     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13634   %}
13635 
13636   ins_pipe(fp_l2f);
13637 %}
13638 
13639 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13640   match(Set dst (ConvD2I src));
13641 
13642   ins_cost(INSN_COST * 5);
13643   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13644 
13645   ins_encode %{
13646     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13647   %}
13648 
13649   ins_pipe(fp_d2i);
13650 %}
13651 
13652 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13653   match(Set dst (ConvD2L src));
13654 
13655   ins_cost(INSN_COST * 5);
13656   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13657 
13658   ins_encode %{
13659     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13660   %}
13661 
13662   ins_pipe(fp_d2l);
13663 %}
13664 
13665 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13666   match(Set dst (ConvI2D src));
13667 
13668   ins_cost(INSN_COST * 5);
13669   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13670 
13671   ins_encode %{
13672     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13673   %}
13674 
13675   ins_pipe(fp_i2d);
13676 %}
13677 
13678 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13679   match(Set dst (ConvL2D src));
13680 
13681   ins_cost(INSN_COST * 5);
13682   format %{ "scvtfd  $dst, $src \t// l2d" %}
13683 
13684   ins_encode %{
13685     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13686   %}
13687 
13688   ins_pipe(fp_l2d);
13689 %}
13690 
13691 // stack <-> reg and reg <-> reg shuffles with no conversion
13692 
13693 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13694 
13695   match(Set dst (MoveF2I src));
13696 
13697   effect(DEF dst, USE src);
13698 
13699   ins_cost(4 * INSN_COST);
13700 
13701   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13702 
13703   ins_encode %{
13704     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13705   %}
13706 
13707   ins_pipe(iload_reg_reg);
13708 
13709 %}
13710 
13711 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13712 
13713   match(Set dst (MoveI2F src));
13714 
13715   effect(DEF dst, USE src);
13716 
13717   ins_cost(4 * INSN_COST);
13718 
13719   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13720 
13721   ins_encode %{
13722     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13723   %}
13724 
13725   ins_pipe(pipe_class_memory);
13726 
13727 %}
13728 
13729 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13730 
13731   match(Set dst (MoveD2L src));
13732 
13733   effect(DEF dst, USE src);
13734 
13735   ins_cost(4 * INSN_COST);
13736 
13737   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13738 
13739   ins_encode %{
13740     __ ldr($dst$$Register, Address(sp, $src$$disp));
13741   %}
13742 
13743   ins_pipe(iload_reg_reg);
13744 
13745 %}
13746 
13747 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13748 
13749   match(Set dst (MoveL2D src));
13750 
13751   effect(DEF dst, USE src);
13752 
13753   ins_cost(4 * INSN_COST);
13754 
13755   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13756 
13757   ins_encode %{
13758     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13759   %}
13760 
13761   ins_pipe(pipe_class_memory);
13762 
13763 %}
13764 
13765 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13766 
13767   match(Set dst (MoveF2I src));
13768 
13769   effect(DEF dst, USE src);
13770 
13771   ins_cost(INSN_COST);
13772 
13773   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13774 
13775   ins_encode %{
13776     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13777   %}
13778 
13779   ins_pipe(pipe_class_memory);
13780 
13781 %}
13782 
13783 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13784 
13785   match(Set dst (MoveI2F src));
13786 
13787   effect(DEF dst, USE src);
13788 
13789   ins_cost(INSN_COST);
13790 
13791   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13792 
13793   ins_encode %{
13794     __ strw($src$$Register, Address(sp, $dst$$disp));
13795   %}
13796 
13797   ins_pipe(istore_reg_reg);
13798 
13799 %}
13800 
13801 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13802 
13803   match(Set dst (MoveD2L src));
13804 
13805   effect(DEF dst, USE src);
13806 
13807   ins_cost(INSN_COST);
13808 
13809   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13810 
13811   ins_encode %{
13812     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13813   %}
13814 
13815   ins_pipe(pipe_class_memory);
13816 
13817 %}
13818 
13819 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13820 
13821   match(Set dst (MoveL2D src));
13822 
13823   effect(DEF dst, USE src);
13824 
13825   ins_cost(INSN_COST);
13826 
13827   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13828 
13829   ins_encode %{
13830     __ str($src$$Register, Address(sp, $dst$$disp));
13831   %}
13832 
13833   ins_pipe(istore_reg_reg);
13834 
13835 %}
13836 
13837 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13838 
13839   match(Set dst (MoveF2I src));
13840 
13841   effect(DEF dst, USE src);
13842 
13843   ins_cost(INSN_COST);
13844 
13845   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13846 
13847   ins_encode %{
13848     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13849   %}
13850 
13851   ins_pipe(fp_f2i);
13852 
13853 %}
13854 
13855 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13856 
13857   match(Set dst (MoveI2F src));
13858 
13859   effect(DEF dst, USE src);
13860 
13861   ins_cost(INSN_COST);
13862 
13863   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13864 
13865   ins_encode %{
13866     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13867   %}
13868 
13869   ins_pipe(fp_i2f);
13870 
13871 %}
13872 
13873 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13874 
13875   match(Set dst (MoveD2L src));
13876 
13877   effect(DEF dst, USE src);
13878 
13879   ins_cost(INSN_COST);
13880 
13881   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13882 
13883   ins_encode %{
13884     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13885   %}
13886 
13887   ins_pipe(fp_d2l);
13888 
13889 %}
13890 
13891 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13892 
13893   match(Set dst (MoveL2D src));
13894 
13895   effect(DEF dst, USE src);
13896 
13897   ins_cost(INSN_COST);
13898 
13899   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13900 
13901   ins_encode %{
13902     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13903   %}
13904 
13905   ins_pipe(fp_l2d);
13906 
13907 %}
13908 
13909 // ============================================================================
13910 // clearing of an array
13911 
13912 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13913 %{
13914   match(Set dummy (ClearArray cnt base));
13915   effect(USE_KILL cnt, USE_KILL base);
13916 
13917   ins_cost(4 * INSN_COST);
13918   format %{ "ClearArray $cnt, $base" %}
13919 
13920   ins_encode %{
13921     __ zero_words($base$$Register, $cnt$$Register);
13922   %}
13923 
13924   ins_pipe(pipe_class_memory);
13925 %}
13926 
13927 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13928 %{
13929   predicate((u_int64_t)n->in(2)->get_long()
13930             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13931   match(Set dummy (ClearArray cnt base));
13932   effect(USE_KILL base);
13933 
13934   ins_cost(4 * INSN_COST);
13935   format %{ "ClearArray $cnt, $base" %}
13936 
13937   ins_encode %{
13938     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13939   %}
13940 
13941   ins_pipe(pipe_class_memory);
13942 %}
13943 
13944 // ============================================================================
13945 // Overflow Math Instructions
13946 
13947 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13948 %{
13949   match(Set cr (OverflowAddI op1 op2));
13950 
13951   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13952   ins_cost(INSN_COST);
13953   ins_encode %{
13954     __ cmnw($op1$$Register, $op2$$Register);
13955   %}
13956 
13957   ins_pipe(icmp_reg_reg);
13958 %}
13959 
13960 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13961 %{
13962   match(Set cr (OverflowAddI op1 op2));
13963 
13964   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13965   ins_cost(INSN_COST);
13966   ins_encode %{
13967     __ cmnw($op1$$Register, $op2$$constant);
13968   %}
13969 
13970   ins_pipe(icmp_reg_imm);
13971 %}
13972 
13973 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13974 %{
13975   match(Set cr (OverflowAddL op1 op2));
13976 
13977   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13978   ins_cost(INSN_COST);
13979   ins_encode %{
13980     __ cmn($op1$$Register, $op2$$Register);
13981   %}
13982 
13983   ins_pipe(icmp_reg_reg);
13984 %}
13985 
13986 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13987 %{
13988   match(Set cr (OverflowAddL op1 op2));
13989 
13990   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13991   ins_cost(INSN_COST);
13992   ins_encode %{
13993     __ cmn($op1$$Register, $op2$$constant);
13994   %}
13995 
13996   ins_pipe(icmp_reg_imm);
13997 %}
13998 
13999 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14000 %{
14001   match(Set cr (OverflowSubI op1 op2));
14002 
14003   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14004   ins_cost(INSN_COST);
14005   ins_encode %{
14006     __ cmpw($op1$$Register, $op2$$Register);
14007   %}
14008 
14009   ins_pipe(icmp_reg_reg);
14010 %}
14011 
14012 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14013 %{
14014   match(Set cr (OverflowSubI op1 op2));
14015 
14016   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14017   ins_cost(INSN_COST);
14018   ins_encode %{
14019     __ cmpw($op1$$Register, $op2$$constant);
14020   %}
14021 
14022   ins_pipe(icmp_reg_imm);
14023 %}
14024 
14025 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14026 %{
14027   match(Set cr (OverflowSubL op1 op2));
14028 
14029   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14030   ins_cost(INSN_COST);
14031   ins_encode %{
14032     __ cmp($op1$$Register, $op2$$Register);
14033   %}
14034 
14035   ins_pipe(icmp_reg_reg);
14036 %}
14037 
14038 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14039 %{
14040   match(Set cr (OverflowSubL op1 op2));
14041 
14042   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14043   ins_cost(INSN_COST);
14044   ins_encode %{
14045     __ subs(zr, $op1$$Register, $op2$$constant);
14046   %}
14047 
14048   ins_pipe(icmp_reg_imm);
14049 %}
14050 
14051 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14052 %{
14053   match(Set cr (OverflowSubI zero op1));
14054 
14055   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14056   ins_cost(INSN_COST);
14057   ins_encode %{
14058     __ cmpw(zr, $op1$$Register);
14059   %}
14060 
14061   ins_pipe(icmp_reg_imm);
14062 %}
14063 
14064 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14065 %{
14066   match(Set cr (OverflowSubL zero op1));
14067 
14068   format %{ "cmp   zr, $op1\t# overflow check long" %}
14069   ins_cost(INSN_COST);
14070   ins_encode %{
14071     __ cmp(zr, $op1$$Register);
14072   %}
14073 
14074   ins_pipe(icmp_reg_imm);
14075 %}
14076 
14077 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14078 %{
14079   match(Set cr (OverflowMulI op1 op2));
14080 
14081   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14082             "cmp   rscratch1, rscratch1, sxtw\n\t"
14083             "movw  rscratch1, #0x80000000\n\t"
14084             "cselw rscratch1, rscratch1, zr, NE\n\t"
14085             "cmpw  rscratch1, #1" %}
14086   ins_cost(5 * INSN_COST);
14087   ins_encode %{
14088     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14089     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14090     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14091     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14092     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14093   %}
14094 
14095   ins_pipe(pipe_slow);
14096 %}
14097 
14098 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14099 %{
14100   match(If cmp (OverflowMulI op1 op2));
14101   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14102             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14103   effect(USE labl, KILL cr);
14104 
14105   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14106             "cmp   rscratch1, rscratch1, sxtw\n\t"
14107             "b$cmp   $labl" %}
14108   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14109   ins_encode %{
14110     Label* L = $labl$$label;
14111     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14112     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14113     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14114     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14115   %}
14116 
14117   ins_pipe(pipe_serial);
14118 %}
14119 
14120 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14121 %{
14122   match(Set cr (OverflowMulL op1 op2));
14123 
14124   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14125             "smulh rscratch2, $op1, $op2\n\t"
14126             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14127             "movw  rscratch1, #0x80000000\n\t"
14128             "cselw rscratch1, rscratch1, zr, NE\n\t"
14129             "cmpw  rscratch1, #1" %}
14130   ins_cost(6 * INSN_COST);
14131   ins_encode %{
14132     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14133     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14134     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14135     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14136     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14137     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14138   %}
14139 
14140   ins_pipe(pipe_slow);
14141 %}
14142 
14143 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14144 %{
14145   match(If cmp (OverflowMulL op1 op2));
14146   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14147             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14148   effect(USE labl, KILL cr);
14149 
14150   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14151             "smulh rscratch2, $op1, $op2\n\t"
14152             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14153             "b$cmp $labl" %}
14154   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14155   ins_encode %{
14156     Label* L = $labl$$label;
14157     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14158     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14159     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14160     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14161     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14162   %}
14163 
14164   ins_pipe(pipe_serial);
14165 %}
14166 
14167 // ============================================================================
14168 // Compare Instructions
14169 
14170 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14171 %{
14172   match(Set cr (CmpI op1 op2));
14173 
14174   effect(DEF cr, USE op1, USE op2);
14175 
14176   ins_cost(INSN_COST);
14177   format %{ "cmpw  $op1, $op2" %}
14178 
14179   ins_encode(aarch64_enc_cmpw(op1, op2));
14180 
14181   ins_pipe(icmp_reg_reg);
14182 %}
14183 
14184 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14185 %{
14186   match(Set cr (CmpI op1 zero));
14187 
14188   effect(DEF cr, USE op1);
14189 
14190   ins_cost(INSN_COST);
14191   format %{ "cmpw $op1, 0" %}
14192 
14193   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14194 
14195   ins_pipe(icmp_reg_imm);
14196 %}
14197 
14198 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14199 %{
14200   match(Set cr (CmpI op1 op2));
14201 
14202   effect(DEF cr, USE op1);
14203 
14204   ins_cost(INSN_COST);
14205   format %{ "cmpw  $op1, $op2" %}
14206 
14207   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14208 
14209   ins_pipe(icmp_reg_imm);
14210 %}
14211 
14212 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14213 %{
14214   match(Set cr (CmpI op1 op2));
14215 
14216   effect(DEF cr, USE op1);
14217 
14218   ins_cost(INSN_COST * 2);
14219   format %{ "cmpw  $op1, $op2" %}
14220 
14221   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14222 
14223   ins_pipe(icmp_reg_imm);
14224 %}
14225 
14226 // Unsigned compare Instructions; really, same as signed compare
14227 // except it should only be used to feed an If or a CMovI which takes a
14228 // cmpOpU.
14229 
14230 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14231 %{
14232   match(Set cr (CmpU op1 op2));
14233 
14234   effect(DEF cr, USE op1, USE op2);
14235 
14236   ins_cost(INSN_COST);
14237   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14238 
14239   ins_encode(aarch64_enc_cmpw(op1, op2));
14240 
14241   ins_pipe(icmp_reg_reg);
14242 %}
14243 
14244 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14245 %{
14246   match(Set cr (CmpU op1 zero));
14247 
14248   effect(DEF cr, USE op1);
14249 
14250   ins_cost(INSN_COST);
14251   format %{ "cmpw $op1, #0\t# unsigned" %}
14252 
14253   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14254 
14255   ins_pipe(icmp_reg_imm);
14256 %}
14257 
14258 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14259 %{
14260   match(Set cr (CmpU op1 op2));
14261 
14262   effect(DEF cr, USE op1);
14263 
14264   ins_cost(INSN_COST);
14265   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14266 
14267   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14268 
14269   ins_pipe(icmp_reg_imm);
14270 %}
14271 
14272 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14273 %{
14274   match(Set cr (CmpU op1 op2));
14275 
14276   effect(DEF cr, USE op1);
14277 
14278   ins_cost(INSN_COST * 2);
14279   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14280 
14281   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14282 
14283   ins_pipe(icmp_reg_imm);
14284 %}
14285 
14286 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14287 %{
14288   match(Set cr (CmpL op1 op2));
14289 
14290   effect(DEF cr, USE op1, USE op2);
14291 
14292   ins_cost(INSN_COST);
14293   format %{ "cmp  $op1, $op2" %}
14294 
14295   ins_encode(aarch64_enc_cmp(op1, op2));
14296 
14297   ins_pipe(icmp_reg_reg);
14298 %}
14299 
14300 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14301 %{
14302   match(Set cr (CmpL op1 zero));
14303 
14304   effect(DEF cr, USE op1);
14305 
14306   ins_cost(INSN_COST);
14307   format %{ "tst  $op1" %}
14308 
14309   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14310 
14311   ins_pipe(icmp_reg_imm);
14312 %}
14313 
14314 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14315 %{
14316   match(Set cr (CmpL op1 op2));
14317 
14318   effect(DEF cr, USE op1);
14319 
14320   ins_cost(INSN_COST);
14321   format %{ "cmp  $op1, $op2" %}
14322 
14323   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14324 
14325   ins_pipe(icmp_reg_imm);
14326 %}
14327 
14328 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14329 %{
14330   match(Set cr (CmpL op1 op2));
14331 
14332   effect(DEF cr, USE op1);
14333 
14334   ins_cost(INSN_COST * 2);
14335   format %{ "cmp  $op1, $op2" %}
14336 
14337   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14338 
14339   ins_pipe(icmp_reg_imm);
14340 %}
14341 
14342 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14343 %{
14344   match(Set cr (CmpUL op1 op2));
14345 
14346   effect(DEF cr, USE op1, USE op2);
14347 
14348   ins_cost(INSN_COST);
14349   format %{ "cmp  $op1, $op2" %}
14350 
14351   ins_encode(aarch64_enc_cmp(op1, op2));
14352 
14353   ins_pipe(icmp_reg_reg);
14354 %}
14355 
14356 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14357 %{
14358   match(Set cr (CmpUL op1 zero));
14359 
14360   effect(DEF cr, USE op1);
14361 
14362   ins_cost(INSN_COST);
14363   format %{ "tst  $op1" %}
14364 
14365   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14366 
14367   ins_pipe(icmp_reg_imm);
14368 %}
14369 
14370 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14371 %{
14372   match(Set cr (CmpUL op1 op2));
14373 
14374   effect(DEF cr, USE op1);
14375 
14376   ins_cost(INSN_COST);
14377   format %{ "cmp  $op1, $op2" %}
14378 
14379   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14380 
14381   ins_pipe(icmp_reg_imm);
14382 %}
14383 
14384 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14385 %{
14386   match(Set cr (CmpUL op1 op2));
14387 
14388   effect(DEF cr, USE op1);
14389 
14390   ins_cost(INSN_COST * 2);
14391   format %{ "cmp  $op1, $op2" %}
14392 
14393   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14394 
14395   ins_pipe(icmp_reg_imm);
14396 %}
14397 
14398 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14399 %{
14400   match(Set cr (CmpP op1 op2));
14401 
14402   effect(DEF cr, USE op1, USE op2);
14403 
14404   ins_cost(INSN_COST);
14405   format %{ "cmp  $op1, $op2\t // ptr" %}
14406 
14407   ins_encode(aarch64_enc_cmpp(op1, op2));
14408 
14409   ins_pipe(icmp_reg_reg);
14410 %}
14411 
14412 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14413 %{
14414   match(Set cr (CmpN op1 op2));
14415 
14416   effect(DEF cr, USE op1, USE op2);
14417 
14418   ins_cost(INSN_COST);
14419   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14420 
14421   ins_encode(aarch64_enc_cmpn(op1, op2));
14422 
14423   ins_pipe(icmp_reg_reg);
14424 %}
14425 
14426 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14427 %{
14428   match(Set cr (CmpP op1 zero));
14429 
14430   effect(DEF cr, USE op1, USE zero);
14431 
14432   ins_cost(INSN_COST);
14433   format %{ "cmp  $op1, 0\t // ptr" %}
14434 
14435   ins_encode(aarch64_enc_testp(op1));
14436 
14437   ins_pipe(icmp_reg_imm);
14438 %}
14439 
14440 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14441 %{
14442   match(Set cr (CmpN op1 zero));
14443 
14444   effect(DEF cr, USE op1, USE zero);
14445 
14446   ins_cost(INSN_COST);
14447   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14448 
14449   ins_encode(aarch64_enc_testn(op1));
14450 
14451   ins_pipe(icmp_reg_imm);
14452 %}
14453 
14454 // FP comparisons
14455 //
14456 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14457 // using normal cmpOp. See declaration of rFlagsReg for details.
14458 
14459 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14460 %{
14461   match(Set cr (CmpF src1 src2));
14462 
14463   ins_cost(3 * INSN_COST);
14464   format %{ "fcmps $src1, $src2" %}
14465 
14466   ins_encode %{
14467     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14468   %}
14469 
14470   ins_pipe(pipe_class_compare);
14471 %}
14472 
14473 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14474 %{
14475   match(Set cr (CmpF src1 src2));
14476 
14477   ins_cost(3 * INSN_COST);
14478   format %{ "fcmps $src1, 0.0" %}
14479 
14480   ins_encode %{
14481     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14482   %}
14483 
14484   ins_pipe(pipe_class_compare);
14485 %}
14486 // FROM HERE
14487 
14488 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14489 %{
14490   match(Set cr (CmpD src1 src2));
14491 
14492   ins_cost(3 * INSN_COST);
14493   format %{ "fcmpd $src1, $src2" %}
14494 
14495   ins_encode %{
14496     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14497   %}
14498 
14499   ins_pipe(pipe_class_compare);
14500 %}
14501 
14502 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14503 %{
14504   match(Set cr (CmpD src1 src2));
14505 
14506   ins_cost(3 * INSN_COST);
14507   format %{ "fcmpd $src1, 0.0" %}
14508 
14509   ins_encode %{
14510     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14511   %}
14512 
14513   ins_pipe(pipe_class_compare);
14514 %}
14515 
14516 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14517 %{
14518   match(Set dst (CmpF3 src1 src2));
14519   effect(KILL cr);
14520 
14521   ins_cost(5 * INSN_COST);
14522   format %{ "fcmps $src1, $src2\n\t"
14523             "csinvw($dst, zr, zr, eq\n\t"
14524             "csnegw($dst, $dst, $dst, lt)"
14525   %}
14526 
14527   ins_encode %{
14528     Label done;
14529     FloatRegister s1 = as_FloatRegister($src1$$reg);
14530     FloatRegister s2 = as_FloatRegister($src2$$reg);
14531     Register d = as_Register($dst$$reg);
14532     __ fcmps(s1, s2);
14533     // installs 0 if EQ else -1
14534     __ csinvw(d, zr, zr, Assembler::EQ);
14535     // keeps -1 if less or unordered else installs 1
14536     __ csnegw(d, d, d, Assembler::LT);
14537     __ bind(done);
14538   %}
14539 
14540   ins_pipe(pipe_class_default);
14541 
14542 %}
14543 
14544 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14545 %{
14546   match(Set dst (CmpD3 src1 src2));
14547   effect(KILL cr);
14548 
14549   ins_cost(5 * INSN_COST);
14550   format %{ "fcmpd $src1, $src2\n\t"
14551             "csinvw($dst, zr, zr, eq\n\t"
14552             "csnegw($dst, $dst, $dst, lt)"
14553   %}
14554 
14555   ins_encode %{
14556     Label done;
14557     FloatRegister s1 = as_FloatRegister($src1$$reg);
14558     FloatRegister s2 = as_FloatRegister($src2$$reg);
14559     Register d = as_Register($dst$$reg);
14560     __ fcmpd(s1, s2);
14561     // installs 0 if EQ else -1
14562     __ csinvw(d, zr, zr, Assembler::EQ);
14563     // keeps -1 if less or unordered else installs 1
14564     __ csnegw(d, d, d, Assembler::LT);
14565     __ bind(done);
14566   %}
14567   ins_pipe(pipe_class_default);
14568 
14569 %}
14570 
14571 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14572 %{
14573   match(Set dst (CmpF3 src1 zero));
14574   effect(KILL cr);
14575 
14576   ins_cost(5 * INSN_COST);
14577   format %{ "fcmps $src1, 0.0\n\t"
14578             "csinvw($dst, zr, zr, eq\n\t"
14579             "csnegw($dst, $dst, $dst, lt)"
14580   %}
14581 
14582   ins_encode %{
14583     Label done;
14584     FloatRegister s1 = as_FloatRegister($src1$$reg);
14585     Register d = as_Register($dst$$reg);
14586     __ fcmps(s1, 0.0);
14587     // installs 0 if EQ else -1
14588     __ csinvw(d, zr, zr, Assembler::EQ);
14589     // keeps -1 if less or unordered else installs 1
14590     __ csnegw(d, d, d, Assembler::LT);
14591     __ bind(done);
14592   %}
14593 
14594   ins_pipe(pipe_class_default);
14595 
14596 %}
14597 
14598 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14599 %{
14600   match(Set dst (CmpD3 src1 zero));
14601   effect(KILL cr);
14602 
14603   ins_cost(5 * INSN_COST);
14604   format %{ "fcmpd $src1, 0.0\n\t"
14605             "csinvw($dst, zr, zr, eq\n\t"
14606             "csnegw($dst, $dst, $dst, lt)"
14607   %}
14608 
14609   ins_encode %{
14610     Label done;
14611     FloatRegister s1 = as_FloatRegister($src1$$reg);
14612     Register d = as_Register($dst$$reg);
14613     __ fcmpd(s1, 0.0);
14614     // installs 0 if EQ else -1
14615     __ csinvw(d, zr, zr, Assembler::EQ);
14616     // keeps -1 if less or unordered else installs 1
14617     __ csnegw(d, d, d, Assembler::LT);
14618     __ bind(done);
14619   %}
14620   ins_pipe(pipe_class_default);
14621 
14622 %}
14623 
14624 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14625 %{
14626   match(Set dst (CmpLTMask p q));
14627   effect(KILL cr);
14628 
14629   ins_cost(3 * INSN_COST);
14630 
14631   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14632             "csetw $dst, lt\n\t"
14633             "subw $dst, zr, $dst"
14634   %}
14635 
14636   ins_encode %{
14637     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14638     __ csetw(as_Register($dst$$reg), Assembler::LT);
14639     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14640   %}
14641 
14642   ins_pipe(ialu_reg_reg);
14643 %}
14644 
14645 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14646 %{
14647   match(Set dst (CmpLTMask src zero));
14648   effect(KILL cr);
14649 
14650   ins_cost(INSN_COST);
14651 
14652   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14653 
14654   ins_encode %{
14655     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14656   %}
14657 
14658   ins_pipe(ialu_reg_shift);
14659 %}
14660 
14661 // ============================================================================
14662 // Max and Min
14663 
14664 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14665 %{
14666   effect( DEF dst, USE src1, USE src2, USE cr );
14667 
14668   ins_cost(INSN_COST * 2);
14669   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14670 
14671   ins_encode %{
14672     __ cselw(as_Register($dst$$reg),
14673              as_Register($src1$$reg),
14674              as_Register($src2$$reg),
14675              Assembler::LT);
14676   %}
14677 
14678   ins_pipe(icond_reg_reg);
14679 %}
14680 
14681 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14682 %{
14683   match(Set dst (MinI src1 src2));
14684   ins_cost(INSN_COST * 3);
14685 
14686   expand %{
14687     rFlagsReg cr;
14688     compI_reg_reg(cr, src1, src2);
14689     cmovI_reg_reg_lt(dst, src1, src2, cr);
14690   %}
14691 
14692 %}
14693 // FROM HERE
14694 
14695 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14696 %{
14697   effect( DEF dst, USE src1, USE src2, USE cr );
14698 
14699   ins_cost(INSN_COST * 2);
14700   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14701 
14702   ins_encode %{
14703     __ cselw(as_Register($dst$$reg),
14704              as_Register($src1$$reg),
14705              as_Register($src2$$reg),
14706              Assembler::GT);
14707   %}
14708 
14709   ins_pipe(icond_reg_reg);
14710 %}
14711 
14712 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14713 %{
14714   match(Set dst (MaxI src1 src2));
14715   ins_cost(INSN_COST * 3);
14716   expand %{
14717     rFlagsReg cr;
14718     compI_reg_reg(cr, src1, src2);
14719     cmovI_reg_reg_gt(dst, src1, src2, cr);
14720   %}
14721 %}
14722 
14723 // ============================================================================
14724 // Branch Instructions
14725 
14726 // Direct Branch.
14727 instruct branch(label lbl)
14728 %{
14729   match(Goto);
14730 
14731   effect(USE lbl);
14732 
14733   ins_cost(BRANCH_COST);
14734   format %{ "b  $lbl" %}
14735 
14736   ins_encode(aarch64_enc_b(lbl));
14737 
14738   ins_pipe(pipe_branch);
14739 %}
14740 
14741 // Conditional Near Branch
14742 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14743 %{
14744   // Same match rule as `branchConFar'.
14745   match(If cmp cr);
14746 
14747   effect(USE lbl);
14748 
14749   ins_cost(BRANCH_COST);
14750   // If set to 1 this indicates that the current instruction is a
14751   // short variant of a long branch. This avoids using this
14752   // instruction in first-pass matching. It will then only be used in
14753   // the `Shorten_branches' pass.
14754   // ins_short_branch(1);
14755   format %{ "b$cmp  $lbl" %}
14756 
14757   ins_encode(aarch64_enc_br_con(cmp, lbl));
14758 
14759   ins_pipe(pipe_branch_cond);
14760 %}
14761 
14762 // Conditional Near Branch Unsigned
14763 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14764 %{
14765   // Same match rule as `branchConFar'.
14766   match(If cmp cr);
14767 
14768   effect(USE lbl);
14769 
14770   ins_cost(BRANCH_COST);
14771   // If set to 1 this indicates that the current instruction is a
14772   // short variant of a long branch. This avoids using this
14773   // instruction in first-pass matching. It will then only be used in
14774   // the `Shorten_branches' pass.
14775   // ins_short_branch(1);
14776   format %{ "b$cmp  $lbl\t# unsigned" %}
14777 
14778   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14779 
14780   ins_pipe(pipe_branch_cond);
14781 %}
14782 
14783 // Make use of CBZ and CBNZ.  These instructions, as well as being
14784 // shorter than (cmp; branch), have the additional benefit of not
14785 // killing the flags.
14786 
14787 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14788   match(If cmp (CmpI op1 op2));
14789   effect(USE labl);
14790 
14791   ins_cost(BRANCH_COST);
14792   format %{ "cbw$cmp   $op1, $labl" %}
14793   ins_encode %{
14794     Label* L = $labl$$label;
14795     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14796     if (cond == Assembler::EQ)
14797       __ cbzw($op1$$Register, *L);
14798     else
14799       __ cbnzw($op1$$Register, *L);
14800   %}
14801   ins_pipe(pipe_cmp_branch);
14802 %}
14803 
14804 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14805   match(If cmp (CmpL op1 op2));
14806   effect(USE labl);
14807 
14808   ins_cost(BRANCH_COST);
14809   format %{ "cb$cmp   $op1, $labl" %}
14810   ins_encode %{
14811     Label* L = $labl$$label;
14812     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14813     if (cond == Assembler::EQ)
14814       __ cbz($op1$$Register, *L);
14815     else
14816       __ cbnz($op1$$Register, *L);
14817   %}
14818   ins_pipe(pipe_cmp_branch);
14819 %}
14820 
14821 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14822   match(If cmp (CmpP op1 op2));
14823   effect(USE labl);
14824 
14825   ins_cost(BRANCH_COST);
14826   format %{ "cb$cmp   $op1, $labl" %}
14827   ins_encode %{
14828     Label* L = $labl$$label;
14829     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14830     if (cond == Assembler::EQ)
14831       __ cbz($op1$$Register, *L);
14832     else
14833       __ cbnz($op1$$Register, *L);
14834   %}
14835   ins_pipe(pipe_cmp_branch);
14836 %}
14837 
14838 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14839   match(If cmp (CmpN op1 op2));
14840   effect(USE labl);
14841 
14842   ins_cost(BRANCH_COST);
14843   format %{ "cbw$cmp   $op1, $labl" %}
14844   ins_encode %{
14845     Label* L = $labl$$label;
14846     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14847     if (cond == Assembler::EQ)
14848       __ cbzw($op1$$Register, *L);
14849     else
14850       __ cbnzw($op1$$Register, *L);
14851   %}
14852   ins_pipe(pipe_cmp_branch);
14853 %}
14854 
14855 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14856   match(If cmp (CmpP (DecodeN oop) zero));
14857   effect(USE labl);
14858 
14859   ins_cost(BRANCH_COST);
14860   format %{ "cb$cmp   $oop, $labl" %}
14861   ins_encode %{
14862     Label* L = $labl$$label;
14863     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14864     if (cond == Assembler::EQ)
14865       __ cbzw($oop$$Register, *L);
14866     else
14867       __ cbnzw($oop$$Register, *L);
14868   %}
14869   ins_pipe(pipe_cmp_branch);
14870 %}
14871 
14872 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14873   match(If cmp (CmpU op1 op2));
14874   effect(USE labl);
14875 
14876   ins_cost(BRANCH_COST);
14877   format %{ "cbw$cmp   $op1, $labl" %}
14878   ins_encode %{
14879     Label* L = $labl$$label;
14880     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14881     if (cond == Assembler::EQ || cond == Assembler::LS)
14882       __ cbzw($op1$$Register, *L);
14883     else
14884       __ cbnzw($op1$$Register, *L);
14885   %}
14886   ins_pipe(pipe_cmp_branch);
14887 %}
14888 
14889 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14890   match(If cmp (CmpUL op1 op2));
14891   effect(USE labl);
14892 
14893   ins_cost(BRANCH_COST);
14894   format %{ "cb$cmp   $op1, $labl" %}
14895   ins_encode %{
14896     Label* L = $labl$$label;
14897     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14898     if (cond == Assembler::EQ || cond == Assembler::LS)
14899       __ cbz($op1$$Register, *L);
14900     else
14901       __ cbnz($op1$$Register, *L);
14902   %}
14903   ins_pipe(pipe_cmp_branch);
14904 %}
14905 
14906 // Test bit and Branch
14907 
14908 // Patterns for short (< 32KiB) variants
14909 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14910   match(If cmp (CmpL op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cb$cmp   $op1, $labl # long" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond =
14918       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14919     __ tbr(cond, $op1$$Register, 63, *L);
14920   %}
14921   ins_pipe(pipe_cmp_branch);
14922   ins_short_branch(1);
14923 %}
14924 
14925 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14926   match(If cmp (CmpI op1 op2));
14927   effect(USE labl);
14928 
14929   ins_cost(BRANCH_COST);
14930   format %{ "cb$cmp   $op1, $labl # int" %}
14931   ins_encode %{
14932     Label* L = $labl$$label;
14933     Assembler::Condition cond =
14934       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14935     __ tbr(cond, $op1$$Register, 31, *L);
14936   %}
14937   ins_pipe(pipe_cmp_branch);
14938   ins_short_branch(1);
14939 %}
14940 
14941 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14942   match(If cmp (CmpL (AndL op1 op2) op3));
14943   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
14944   effect(USE labl);
14945 
14946   ins_cost(BRANCH_COST);
14947   format %{ "tb$cmp   $op1, $op2, $labl" %}
14948   ins_encode %{
14949     Label* L = $labl$$label;
14950     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14951     int bit = exact_log2_long($op2$$constant);
14952     __ tbr(cond, $op1$$Register, bit, *L);
14953   %}
14954   ins_pipe(pipe_cmp_branch);
14955   ins_short_branch(1);
14956 %}
14957 
14958 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14959   match(If cmp (CmpI (AndI op1 op2) op3));
14960   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
14961   effect(USE labl);
14962 
14963   ins_cost(BRANCH_COST);
14964   format %{ "tb$cmp   $op1, $op2, $labl" %}
14965   ins_encode %{
14966     Label* L = $labl$$label;
14967     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14968     int bit = exact_log2((juint)$op2$$constant);
14969     __ tbr(cond, $op1$$Register, bit, *L);
14970   %}
14971   ins_pipe(pipe_cmp_branch);
14972   ins_short_branch(1);
14973 %}
14974 
14975 // And far variants
14976 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14977   match(If cmp (CmpL op1 op2));
14978   effect(USE labl);
14979 
14980   ins_cost(BRANCH_COST);
14981   format %{ "cb$cmp   $op1, $labl # long" %}
14982   ins_encode %{
14983     Label* L = $labl$$label;
14984     Assembler::Condition cond =
14985       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14986     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14987   %}
14988   ins_pipe(pipe_cmp_branch);
14989 %}
14990 
14991 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14992   match(If cmp (CmpI op1 op2));
14993   effect(USE labl);
14994 
14995   ins_cost(BRANCH_COST);
14996   format %{ "cb$cmp   $op1, $labl # int" %}
14997   ins_encode %{
14998     Label* L = $labl$$label;
14999     Assembler::Condition cond =
15000       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15001     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15002   %}
15003   ins_pipe(pipe_cmp_branch);
15004 %}
15005 
15006 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15007   match(If cmp (CmpL (AndL op1 op2) op3));
15008   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15009   effect(USE labl);
15010 
15011   ins_cost(BRANCH_COST);
15012   format %{ "tb$cmp   $op1, $op2, $labl" %}
15013   ins_encode %{
15014     Label* L = $labl$$label;
15015     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15016     int bit = exact_log2_long($op2$$constant);
15017     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15018   %}
15019   ins_pipe(pipe_cmp_branch);
15020 %}
15021 
15022 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15023   match(If cmp (CmpI (AndI op1 op2) op3));
15024   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15025   effect(USE labl);
15026 
15027   ins_cost(BRANCH_COST);
15028   format %{ "tb$cmp   $op1, $op2, $labl" %}
15029   ins_encode %{
15030     Label* L = $labl$$label;
15031     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15032     int bit = exact_log2((juint)$op2$$constant);
15033     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15034   %}
15035   ins_pipe(pipe_cmp_branch);
15036 %}
15037 
15038 // Test bits
15039 
15040 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15041   match(Set cr (CmpL (AndL op1 op2) op3));
15042   predicate(Assembler::operand_valid_for_logical_immediate
15043             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15044 
15045   ins_cost(INSN_COST);
15046   format %{ "tst $op1, $op2 # long" %}
15047   ins_encode %{
15048     __ tst($op1$$Register, $op2$$constant);
15049   %}
15050   ins_pipe(ialu_reg_reg);
15051 %}
15052 
15053 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15054   match(Set cr (CmpI (AndI op1 op2) op3));
15055   predicate(Assembler::operand_valid_for_logical_immediate
15056             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15057 
15058   ins_cost(INSN_COST);
15059   format %{ "tst $op1, $op2 # int" %}
15060   ins_encode %{
15061     __ tstw($op1$$Register, $op2$$constant);
15062   %}
15063   ins_pipe(ialu_reg_reg);
15064 %}
15065 
15066 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15067   match(Set cr (CmpL (AndL op1 op2) op3));
15068 
15069   ins_cost(INSN_COST);
15070   format %{ "tst $op1, $op2 # long" %}
15071   ins_encode %{
15072     __ tst($op1$$Register, $op2$$Register);
15073   %}
15074   ins_pipe(ialu_reg_reg);
15075 %}
15076 
15077 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15078   match(Set cr (CmpI (AndI op1 op2) op3));
15079 
15080   ins_cost(INSN_COST);
15081   format %{ "tstw $op1, $op2 # int" %}
15082   ins_encode %{
15083     __ tstw($op1$$Register, $op2$$Register);
15084   %}
15085   ins_pipe(ialu_reg_reg);
15086 %}
15087 
15088 
15089 // Conditional Far Branch
15090 // Conditional Far Branch Unsigned
15091 // TODO: fixme
15092 
15093 // counted loop end branch near
15094 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15095 %{
15096   match(CountedLoopEnd cmp cr);
15097 
15098   effect(USE lbl);
15099 
15100   ins_cost(BRANCH_COST);
15101   // short variant.
15102   // ins_short_branch(1);
15103   format %{ "b$cmp $lbl \t// counted loop end" %}
15104 
15105   ins_encode(aarch64_enc_br_con(cmp, lbl));
15106 
15107   ins_pipe(pipe_branch);
15108 %}
15109 
15110 // counted loop end branch near Unsigned
15111 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15112 %{
15113   match(CountedLoopEnd cmp cr);
15114 
15115   effect(USE lbl);
15116 
15117   ins_cost(BRANCH_COST);
15118   // short variant.
15119   // ins_short_branch(1);
15120   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15121 
15122   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15123 
15124   ins_pipe(pipe_branch);
15125 %}
15126 
15127 // counted loop end branch far
15128 // counted loop end branch far unsigned
15129 // TODO: fixme
15130 
15131 // ============================================================================
15132 // inlined locking and unlocking
15133 
15134 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15135 %{
15136   match(Set cr (FastLock object box));
15137   effect(TEMP tmp, TEMP tmp2);
15138 
15139   // TODO
15140   // identify correct cost
15141   ins_cost(5 * INSN_COST);
15142   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15143 
15144   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15145 
15146   ins_pipe(pipe_serial);
15147 %}
15148 
15149 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15150 %{
15151   match(Set cr (FastUnlock object box));
15152   effect(TEMP tmp, TEMP tmp2);
15153 
15154   ins_cost(5 * INSN_COST);
15155   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15156 
15157   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15158 
15159   ins_pipe(pipe_serial);
15160 %}
15161 
15162 
15163 // ============================================================================
15164 // Safepoint Instructions
15165 
15166 // TODO
15167 // provide a near and far version of this code
15168 
15169 instruct safePoint(rFlagsReg cr, iRegP poll)
15170 %{
15171   match(SafePoint poll);
15172   effect(KILL cr);
15173 
15174   format %{
15175     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15176   %}
15177   ins_encode %{
15178     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15179   %}
15180   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15181 %}
15182 
15183 
15184 // ============================================================================
15185 // Procedure Call/Return Instructions
15186 
15187 // Call Java Static Instruction
15188 
15189 instruct CallStaticJavaDirect(method meth)
15190 %{
15191   match(CallStaticJava);
15192 
15193   effect(USE meth);
15194 
15195   ins_cost(CALL_COST);
15196 
15197   format %{ "call,static $meth \t// ==> " %}
15198 
15199   ins_encode( aarch64_enc_java_static_call(meth),
15200               aarch64_enc_call_epilog );
15201 
15202   ins_pipe(pipe_class_call);
15203 %}
15204 
15205 // TO HERE
15206 
15207 // Call Java Dynamic Instruction
15208 instruct CallDynamicJavaDirect(method meth)
15209 %{
15210   match(CallDynamicJava);
15211 
15212   effect(USE meth);
15213 
15214   ins_cost(CALL_COST);
15215 
15216   format %{ "CALL,dynamic $meth \t// ==> " %}
15217 
15218   ins_encode( aarch64_enc_java_dynamic_call(meth),
15219                aarch64_enc_call_epilog );
15220 
15221   ins_pipe(pipe_class_call);
15222 %}
15223 
15224 // Call Runtime Instruction
15225 
15226 instruct CallRuntimeDirect(method meth)
15227 %{
15228   match(CallRuntime);
15229 
15230   effect(USE meth);
15231 
15232   ins_cost(CALL_COST);
15233 
15234   format %{ "CALL, runtime $meth" %}
15235 
15236   ins_encode( aarch64_enc_java_to_runtime(meth) );
15237 
15238   ins_pipe(pipe_class_call);
15239 %}
15240 
15241 // Call Runtime Instruction
15242 
15243 instruct CallLeafDirect(method meth)
15244 %{
15245   match(CallLeaf);
15246 
15247   effect(USE meth);
15248 
15249   ins_cost(CALL_COST);
15250 
15251   format %{ "CALL, runtime leaf $meth" %}
15252 
15253   ins_encode( aarch64_enc_java_to_runtime(meth) );
15254 
15255   ins_pipe(pipe_class_call);
15256 %}
15257 
15258 // Call Runtime Instruction
15259 
15260 instruct CallLeafNoFPDirect(method meth)
15261 %{
15262   match(CallLeafNoFP);
15263 
15264   effect(USE meth);
15265 
15266   ins_cost(CALL_COST);
15267 
15268   format %{ "CALL, runtime leaf nofp $meth" %}
15269 
15270   ins_encode( aarch64_enc_java_to_runtime(meth) );
15271 
15272   ins_pipe(pipe_class_call);
15273 %}
15274 
15275 // Tail Call; Jump from runtime stub to Java code.
15276 // Also known as an 'interprocedural jump'.
15277 // Target of jump will eventually return to caller.
15278 // TailJump below removes the return address.
15279 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15280 %{
15281   match(TailCall jump_target method_oop);
15282 
15283   ins_cost(CALL_COST);
15284 
15285   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15286 
15287   ins_encode(aarch64_enc_tail_call(jump_target));
15288 
15289   ins_pipe(pipe_class_call);
15290 %}
15291 
15292 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15293 %{
15294   match(TailJump jump_target ex_oop);
15295 
15296   ins_cost(CALL_COST);
15297 
15298   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15299 
15300   ins_encode(aarch64_enc_tail_jmp(jump_target));
15301 
15302   ins_pipe(pipe_class_call);
15303 %}
15304 
15305 // Create exception oop: created by stack-crawling runtime code.
15306 // Created exception is now available to this handler, and is setup
15307 // just prior to jumping to this handler. No code emitted.
15308 // TODO check
15309 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15310 instruct CreateException(iRegP_R0 ex_oop)
15311 %{
15312   match(Set ex_oop (CreateEx));
15313 
15314   format %{ " -- \t// exception oop; no code emitted" %}
15315 
15316   size(0);
15317 
15318   ins_encode( /*empty*/ );
15319 
15320   ins_pipe(pipe_class_empty);
15321 %}
15322 
15323 // Rethrow exception: The exception oop will come in the first
15324 // argument position. Then JUMP (not call) to the rethrow stub code.
15325 instruct RethrowException() %{
15326   match(Rethrow);
15327   ins_cost(CALL_COST);
15328 
15329   format %{ "b rethrow_stub" %}
15330 
15331   ins_encode( aarch64_enc_rethrow() );
15332 
15333   ins_pipe(pipe_class_call);
15334 %}
15335 
15336 
15337 // Return Instruction
15338 // epilog node loads ret address into lr as part of frame pop
15339 instruct Ret()
15340 %{
15341   match(Return);
15342 
15343   format %{ "ret\t// return register" %}
15344 
15345   ins_encode( aarch64_enc_ret() );
15346 
15347   ins_pipe(pipe_branch);
15348 %}
15349 
15350 // Die now.
15351 instruct ShouldNotReachHere() %{
15352   match(Halt);
15353 
15354   ins_cost(CALL_COST);
15355   format %{ "ShouldNotReachHere" %}
15356 
15357   ins_encode %{
15358     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15359     // return true
15360     __ dpcs1(0xdead + 1);
15361   %}
15362 
15363   ins_pipe(pipe_class_default);
15364 %}
15365 
15366 // ============================================================================
15367 // Partial Subtype Check
15368 //
15369 // superklass array for an instance of the superklass.  Set a hidden
15370 // internal cache on a hit (cache is checked with exposed code in
15371 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15372 // encoding ALSO sets flags.
15373 
15374 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15375 %{
15376   match(Set result (PartialSubtypeCheck sub super));
15377   effect(KILL cr, KILL temp);
15378 
15379   ins_cost(1100);  // slightly larger than the next version
15380   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15381 
15382   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15383 
15384   opcode(0x1); // Force zero of result reg on hit
15385 
15386   ins_pipe(pipe_class_memory);
15387 %}
15388 
15389 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15390 %{
15391   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15392   effect(KILL temp, KILL result);
15393 
15394   ins_cost(1100);  // slightly larger than the next version
15395   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15396 
15397   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15398 
15399   opcode(0x0); // Don't zero result reg on hit
15400 
15401   ins_pipe(pipe_class_memory);
15402 %}
15403 
15404 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15405                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15406 %{
15407   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15408   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15409   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15410 
15411   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15412   ins_encode %{
15413     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15414     __ string_compare($str1$$Register, $str2$$Register,
15415                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15416                       $tmp1$$Register, $tmp2$$Register,
15417                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15418   %}
15419   ins_pipe(pipe_class_memory);
15420 %}
15421 
15422 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15423                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15424 %{
15425   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15426   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15427   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15428 
15429   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15430   ins_encode %{
15431     __ string_compare($str1$$Register, $str2$$Register,
15432                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15433                       $tmp1$$Register, $tmp2$$Register,
15434                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15435   %}
15436   ins_pipe(pipe_class_memory);
15437 %}
15438 
15439 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15440                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15441                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15442 %{
15443   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15444   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15445   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15446          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15447 
15448   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15449   ins_encode %{
15450     __ string_compare($str1$$Register, $str2$$Register,
15451                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15452                       $tmp1$$Register, $tmp2$$Register,
15453                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15454                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15455   %}
15456   ins_pipe(pipe_class_memory);
15457 %}
15458 
15459 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15460                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15461                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15462 %{
15463   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15464   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15465   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15466          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15467 
15468   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15469   ins_encode %{
15470     __ string_compare($str1$$Register, $str2$$Register,
15471                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15472                       $tmp1$$Register, $tmp2$$Register,
15473                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15474                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15475   %}
15476   ins_pipe(pipe_class_memory);
15477 %}
15478 
15479 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15480        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15481        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15482 %{
15483   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15484   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15485   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15486          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15487   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15488 
15489   ins_encode %{
15490     __ string_indexof($str1$$Register, $str2$$Register,
15491                       $cnt1$$Register, $cnt2$$Register,
15492                       $tmp1$$Register, $tmp2$$Register,
15493                       $tmp3$$Register, $tmp4$$Register,
15494                       $tmp5$$Register, $tmp6$$Register,
15495                       -1, $result$$Register, StrIntrinsicNode::UU);
15496   %}
15497   ins_pipe(pipe_class_memory);
15498 %}
15499 
15500 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15501        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15502        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15503 %{
15504   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15505   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15506   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15507          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15508   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15509 
15510   ins_encode %{
15511     __ string_indexof($str1$$Register, $str2$$Register,
15512                       $cnt1$$Register, $cnt2$$Register,
15513                       $tmp1$$Register, $tmp2$$Register,
15514                       $tmp3$$Register, $tmp4$$Register,
15515                       $tmp5$$Register, $tmp6$$Register,
15516                       -1, $result$$Register, StrIntrinsicNode::LL);
15517   %}
15518   ins_pipe(pipe_class_memory);
15519 %}
15520 
15521 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15522        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15523        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15524 %{
15525   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15526   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15527   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15528          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15529   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15530 
15531   ins_encode %{
15532     __ string_indexof($str1$$Register, $str2$$Register,
15533                       $cnt1$$Register, $cnt2$$Register,
15534                       $tmp1$$Register, $tmp2$$Register,
15535                       $tmp3$$Register, $tmp4$$Register,
15536                       $tmp5$$Register, $tmp6$$Register,
15537                       -1, $result$$Register, StrIntrinsicNode::UL);
15538   %}
15539   ins_pipe(pipe_class_memory);
15540 %}
15541 
15542 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15543                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15544                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15545 %{
15546   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15547   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15548   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15549          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15550   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15551 
15552   ins_encode %{
15553     int icnt2 = (int)$int_cnt2$$constant;
15554     __ string_indexof($str1$$Register, $str2$$Register,
15555                       $cnt1$$Register, zr,
15556                       $tmp1$$Register, $tmp2$$Register,
15557                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15558                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15559   %}
15560   ins_pipe(pipe_class_memory);
15561 %}
15562 
15563 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15564                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15565                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15566 %{
15567   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15568   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15569   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15570          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15571   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15572 
15573   ins_encode %{
15574     int icnt2 = (int)$int_cnt2$$constant;
15575     __ string_indexof($str1$$Register, $str2$$Register,
15576                       $cnt1$$Register, zr,
15577                       $tmp1$$Register, $tmp2$$Register,
15578                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15579                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15580   %}
15581   ins_pipe(pipe_class_memory);
15582 %}
15583 
15584 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15585                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15586                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15587 %{
15588   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15589   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15590   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15591          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15592   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15593 
15594   ins_encode %{
15595     int icnt2 = (int)$int_cnt2$$constant;
15596     __ string_indexof($str1$$Register, $str2$$Register,
15597                       $cnt1$$Register, zr,
15598                       $tmp1$$Register, $tmp2$$Register,
15599                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15600                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15601   %}
15602   ins_pipe(pipe_class_memory);
15603 %}
15604 
15605 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15606                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15607                               iRegINoSp tmp3, rFlagsReg cr)
15608 %{
15609   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15610   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15611          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15612 
15613   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15614 
15615   ins_encode %{
15616     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15617                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15618                            $tmp3$$Register);
15619   %}
15620   ins_pipe(pipe_class_memory);
15621 %}
15622 
15623 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15624                         iRegI_R0 result, rFlagsReg cr)
15625 %{
15626   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15627   match(Set result (StrEquals (Binary str1 str2) cnt));
15628   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15629 
15630   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15631   ins_encode %{
15632     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15633     __ string_equals($str1$$Register, $str2$$Register,
15634                      $result$$Register, $cnt$$Register, 1);
15635   %}
15636   ins_pipe(pipe_class_memory);
15637 %}
15638 
15639 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15640                         iRegI_R0 result, rFlagsReg cr)
15641 %{
15642   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15643   match(Set result (StrEquals (Binary str1 str2) cnt));
15644   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15645 
15646   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15647   ins_encode %{
15648     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15649     __ string_equals($str1$$Register, $str2$$Register,
15650                      $result$$Register, $cnt$$Register, 2);
15651   %}
15652   ins_pipe(pipe_class_memory);
15653 %}
15654 
15655 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15656                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15657                        iRegP_R10 tmp, rFlagsReg cr)
15658 %{
15659   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15660   match(Set result (AryEq ary1 ary2));
15661   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15662 
15663   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15664   ins_encode %{
15665     __ arrays_equals($ary1$$Register, $ary2$$Register,
15666                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15667                      $result$$Register, $tmp$$Register, 1);
15668     %}
15669   ins_pipe(pipe_class_memory);
15670 %}
15671 
15672 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15673                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15674                        iRegP_R10 tmp, rFlagsReg cr)
15675 %{
15676   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15677   match(Set result (AryEq ary1 ary2));
15678   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15679 
15680   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15681   ins_encode %{
15682     __ arrays_equals($ary1$$Register, $ary2$$Register,
15683                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15684                      $result$$Register, $tmp$$Register, 2);
15685   %}
15686   ins_pipe(pipe_class_memory);
15687 %}
15688 
15689 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15690 %{
15691   match(Set result (HasNegatives ary1 len));
15692   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15693   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15694   ins_encode %{
15695     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15696   %}
15697   ins_pipe( pipe_slow );
15698 %}
15699 
15700 // fast char[] to byte[] compression
15701 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15702                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15703                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15704                          iRegI_R0 result, rFlagsReg cr)
15705 %{
15706   match(Set result (StrCompressedCopy src (Binary dst len)));
15707   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15708 
15709   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15710   ins_encode %{
15711     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15712                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15713                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15714                            $result$$Register);
15715   %}
15716   ins_pipe( pipe_slow );
15717 %}
15718 
15719 // fast byte[] to char[] inflation
15720 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15721                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15722 %{
15723   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15724   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15725 
15726   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15727   ins_encode %{
15728     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15729                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15730   %}
15731   ins_pipe(pipe_class_memory);
15732 %}
15733 
15734 // encode char[] to byte[] in ISO_8859_1
15735 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15736                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15737                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15738                           iRegI_R0 result, rFlagsReg cr)
15739 %{
15740   match(Set result (EncodeISOArray src (Binary dst len)));
15741   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15742          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15743 
15744   format %{ "Encode array $src,$dst,$len -> $result" %}
15745   ins_encode %{
15746     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15747          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15748          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15749   %}
15750   ins_pipe( pipe_class_memory );
15751 %}
15752 
15753 // ============================================================================
15754 // This name is KNOWN by the ADLC and cannot be changed.
15755 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15756 // for this guy.
15757 instruct tlsLoadP(thread_RegP dst)
15758 %{
15759   match(Set dst (ThreadLocal));
15760 
15761   ins_cost(0);
15762 
15763   format %{ " -- \t// $dst=Thread::current(), empty" %}
15764 
15765   size(0);
15766 
15767   ins_encode( /*empty*/ );
15768 
15769   ins_pipe(pipe_class_empty);
15770 %}
15771 
15772 // ====================VECTOR INSTRUCTIONS=====================================
15773 
15774 // Load vector (32 bits)
15775 instruct loadV4(vecD dst, vmem4 mem)
15776 %{
15777   predicate(n->as_LoadVector()->memory_size() == 4);
15778   match(Set dst (LoadVector mem));
15779   ins_cost(4 * INSN_COST);
15780   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15781   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15782   ins_pipe(vload_reg_mem64);
15783 %}
15784 
15785 // Load vector (64 bits)
15786 instruct loadV8(vecD dst, vmem8 mem)
15787 %{
15788   predicate(n->as_LoadVector()->memory_size() == 8);
15789   match(Set dst (LoadVector mem));
15790   ins_cost(4 * INSN_COST);
15791   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15792   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15793   ins_pipe(vload_reg_mem64);
15794 %}
15795 
15796 // Load Vector (128 bits)
15797 instruct loadV16(vecX dst, vmem16 mem)
15798 %{
15799   predicate(n->as_LoadVector()->memory_size() == 16);
15800   match(Set dst (LoadVector mem));
15801   ins_cost(4 * INSN_COST);
15802   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15803   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15804   ins_pipe(vload_reg_mem128);
15805 %}
15806 
15807 // Store Vector (32 bits)
15808 instruct storeV4(vecD src, vmem4 mem)
15809 %{
15810   predicate(n->as_StoreVector()->memory_size() == 4);
15811   match(Set mem (StoreVector mem src));
15812   ins_cost(4 * INSN_COST);
15813   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15814   ins_encode( aarch64_enc_strvS(src, mem) );
15815   ins_pipe(vstore_reg_mem64);
15816 %}
15817 
15818 // Store Vector (64 bits)
15819 instruct storeV8(vecD src, vmem8 mem)
15820 %{
15821   predicate(n->as_StoreVector()->memory_size() == 8);
15822   match(Set mem (StoreVector mem src));
15823   ins_cost(4 * INSN_COST);
15824   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15825   ins_encode( aarch64_enc_strvD(src, mem) );
15826   ins_pipe(vstore_reg_mem64);
15827 %}
15828 
15829 // Store Vector (128 bits)
15830 instruct storeV16(vecX src, vmem16 mem)
15831 %{
15832   predicate(n->as_StoreVector()->memory_size() == 16);
15833   match(Set mem (StoreVector mem src));
15834   ins_cost(4 * INSN_COST);
15835   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15836   ins_encode( aarch64_enc_strvQ(src, mem) );
15837   ins_pipe(vstore_reg_mem128);
15838 %}
15839 
15840 instruct replicate8B(vecD dst, iRegIorL2I src)
15841 %{
15842   predicate(n->as_Vector()->length() == 4 ||
15843             n->as_Vector()->length() == 8);
15844   match(Set dst (ReplicateB src));
15845   ins_cost(INSN_COST);
15846   format %{ "dup  $dst, $src\t# vector (8B)" %}
15847   ins_encode %{
15848     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15849   %}
15850   ins_pipe(vdup_reg_reg64);
15851 %}
15852 
15853 instruct replicate16B(vecX dst, iRegIorL2I src)
15854 %{
15855   predicate(n->as_Vector()->length() == 16);
15856   match(Set dst (ReplicateB src));
15857   ins_cost(INSN_COST);
15858   format %{ "dup  $dst, $src\t# vector (16B)" %}
15859   ins_encode %{
15860     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15861   %}
15862   ins_pipe(vdup_reg_reg128);
15863 %}
15864 
15865 instruct replicate8B_imm(vecD dst, immI con)
15866 %{
15867   predicate(n->as_Vector()->length() == 4 ||
15868             n->as_Vector()->length() == 8);
15869   match(Set dst (ReplicateB con));
15870   ins_cost(INSN_COST);
15871   format %{ "movi  $dst, $con\t# vector(8B)" %}
15872   ins_encode %{
15873     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15874   %}
15875   ins_pipe(vmovi_reg_imm64);
15876 %}
15877 
15878 instruct replicate16B_imm(vecX dst, immI con)
15879 %{
15880   predicate(n->as_Vector()->length() == 16);
15881   match(Set dst (ReplicateB con));
15882   ins_cost(INSN_COST);
15883   format %{ "movi  $dst, $con\t# vector(16B)" %}
15884   ins_encode %{
15885     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15886   %}
15887   ins_pipe(vmovi_reg_imm128);
15888 %}
15889 
15890 instruct replicate4S(vecD dst, iRegIorL2I src)
15891 %{
15892   predicate(n->as_Vector()->length() == 2 ||
15893             n->as_Vector()->length() == 4);
15894   match(Set dst (ReplicateS src));
15895   ins_cost(INSN_COST);
15896   format %{ "dup  $dst, $src\t# vector (4S)" %}
15897   ins_encode %{
15898     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15899   %}
15900   ins_pipe(vdup_reg_reg64);
15901 %}
15902 
15903 instruct replicate8S(vecX dst, iRegIorL2I src)
15904 %{
15905   predicate(n->as_Vector()->length() == 8);
15906   match(Set dst (ReplicateS src));
15907   ins_cost(INSN_COST);
15908   format %{ "dup  $dst, $src\t# vector (8S)" %}
15909   ins_encode %{
15910     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15911   %}
15912   ins_pipe(vdup_reg_reg128);
15913 %}
15914 
15915 instruct replicate4S_imm(vecD dst, immI con)
15916 %{
15917   predicate(n->as_Vector()->length() == 2 ||
15918             n->as_Vector()->length() == 4);
15919   match(Set dst (ReplicateS con));
15920   ins_cost(INSN_COST);
15921   format %{ "movi  $dst, $con\t# vector(4H)" %}
15922   ins_encode %{
15923     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15924   %}
15925   ins_pipe(vmovi_reg_imm64);
15926 %}
15927 
15928 instruct replicate8S_imm(vecX dst, immI con)
15929 %{
15930   predicate(n->as_Vector()->length() == 8);
15931   match(Set dst (ReplicateS con));
15932   ins_cost(INSN_COST);
15933   format %{ "movi  $dst, $con\t# vector(8H)" %}
15934   ins_encode %{
15935     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15936   %}
15937   ins_pipe(vmovi_reg_imm128);
15938 %}
15939 
15940 instruct replicate2I(vecD dst, iRegIorL2I src)
15941 %{
15942   predicate(n->as_Vector()->length() == 2);
15943   match(Set dst (ReplicateI src));
15944   ins_cost(INSN_COST);
15945   format %{ "dup  $dst, $src\t# vector (2I)" %}
15946   ins_encode %{
15947     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15948   %}
15949   ins_pipe(vdup_reg_reg64);
15950 %}
15951 
15952 instruct replicate4I(vecX dst, iRegIorL2I src)
15953 %{
15954   predicate(n->as_Vector()->length() == 4);
15955   match(Set dst (ReplicateI src));
15956   ins_cost(INSN_COST);
15957   format %{ "dup  $dst, $src\t# vector (4I)" %}
15958   ins_encode %{
15959     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15960   %}
15961   ins_pipe(vdup_reg_reg128);
15962 %}
15963 
15964 instruct replicate2I_imm(vecD dst, immI con)
15965 %{
15966   predicate(n->as_Vector()->length() == 2);
15967   match(Set dst (ReplicateI con));
15968   ins_cost(INSN_COST);
15969   format %{ "movi  $dst, $con\t# vector(2I)" %}
15970   ins_encode %{
15971     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15972   %}
15973   ins_pipe(vmovi_reg_imm64);
15974 %}
15975 
15976 instruct replicate4I_imm(vecX dst, immI con)
15977 %{
15978   predicate(n->as_Vector()->length() == 4);
15979   match(Set dst (ReplicateI con));
15980   ins_cost(INSN_COST);
15981   format %{ "movi  $dst, $con\t# vector(4I)" %}
15982   ins_encode %{
15983     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15984   %}
15985   ins_pipe(vmovi_reg_imm128);
15986 %}
15987 
15988 instruct replicate2L(vecX dst, iRegL src)
15989 %{
15990   predicate(n->as_Vector()->length() == 2);
15991   match(Set dst (ReplicateL src));
15992   ins_cost(INSN_COST);
15993   format %{ "dup  $dst, $src\t# vector (2L)" %}
15994   ins_encode %{
15995     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15996   %}
15997   ins_pipe(vdup_reg_reg128);
15998 %}
15999 
16000 instruct replicate2L_zero(vecX dst, immI0 zero)
16001 %{
16002   predicate(n->as_Vector()->length() == 2);
16003   match(Set dst (ReplicateI zero));
16004   ins_cost(INSN_COST);
16005   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16006   ins_encode %{
16007     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16008            as_FloatRegister($dst$$reg),
16009            as_FloatRegister($dst$$reg));
16010   %}
16011   ins_pipe(vmovi_reg_imm128);
16012 %}
16013 
16014 instruct replicate2F(vecD dst, vRegF src)
16015 %{
16016   predicate(n->as_Vector()->length() == 2);
16017   match(Set dst (ReplicateF src));
16018   ins_cost(INSN_COST);
16019   format %{ "dup  $dst, $src\t# vector (2F)" %}
16020   ins_encode %{
16021     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16022            as_FloatRegister($src$$reg));
16023   %}
16024   ins_pipe(vdup_reg_freg64);
16025 %}
16026 
16027 instruct replicate4F(vecX dst, vRegF src)
16028 %{
16029   predicate(n->as_Vector()->length() == 4);
16030   match(Set dst (ReplicateF src));
16031   ins_cost(INSN_COST);
16032   format %{ "dup  $dst, $src\t# vector (4F)" %}
16033   ins_encode %{
16034     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16035            as_FloatRegister($src$$reg));
16036   %}
16037   ins_pipe(vdup_reg_freg128);
16038 %}
16039 
16040 instruct replicate2D(vecX dst, vRegD src)
16041 %{
16042   predicate(n->as_Vector()->length() == 2);
16043   match(Set dst (ReplicateD src));
16044   ins_cost(INSN_COST);
16045   format %{ "dup  $dst, $src\t# vector (2D)" %}
16046   ins_encode %{
16047     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16048            as_FloatRegister($src$$reg));
16049   %}
16050   ins_pipe(vdup_reg_dreg128);
16051 %}
16052 
16053 // ====================REDUCTION ARITHMETIC====================================
16054 
16055 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16056 %{
16057   match(Set dst (AddReductionVI src1 src2));
16058   ins_cost(INSN_COST);
16059   effect(TEMP tmp, TEMP tmp2);
16060   format %{ "umov  $tmp, $src2, S, 0\n\t"
16061             "umov  $tmp2, $src2, S, 1\n\t"
16062             "addw  $tmp, $src1, $tmp\n\t"
16063             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
16064   %}
16065   ins_encode %{
16066     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16067     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16068     __ addw($tmp$$Register, $src1$$Register, $tmp$$Register);
16069     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16070   %}
16071   ins_pipe(pipe_class_default);
16072 %}
16073 
16074 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16075 %{
16076   match(Set dst (AddReductionVI src1 src2));
16077   ins_cost(INSN_COST);
16078   effect(TEMP tmp, TEMP tmp2);
16079   format %{ "addv  $tmp, T4S, $src2\n\t"
16080             "umov  $tmp2, $tmp, S, 0\n\t"
16081             "addw  $dst, $tmp2, $src1\t# add reduction4I"
16082   %}
16083   ins_encode %{
16084     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16085             as_FloatRegister($src2$$reg));
16086     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16087     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16088   %}
16089   ins_pipe(pipe_class_default);
16090 %}
16091 
16092 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16093 %{
16094   match(Set dst (MulReductionVI src1 src2));
16095   ins_cost(INSN_COST);
16096   effect(TEMP tmp, TEMP dst);
16097   format %{ "umov  $tmp, $src2, S, 0\n\t"
16098             "mul   $dst, $tmp, $src1\n\t"
16099             "umov  $tmp, $src2, S, 1\n\t"
16100             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16101   %}
16102   ins_encode %{
16103     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16104     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16105     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16106     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16107   %}
16108   ins_pipe(pipe_class_default);
16109 %}
16110 
16111 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16112 %{
16113   match(Set dst (MulReductionVI src1 src2));
16114   ins_cost(INSN_COST);
16115   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16116   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16117             "mul   $tmp, $tmp, $src2\n\t"
16118             "umov  $tmp2, $tmp, S, 0\n\t"
16119             "mul   $dst, $tmp2, $src1\n\t"
16120             "umov  $tmp2, $tmp, S, 1\n\t"
16121             "mul   $dst, $tmp2, $dst\t# mul reduction4I"
16122   %}
16123   ins_encode %{
16124     __ ins(as_FloatRegister($tmp$$reg), __ D,
16125            as_FloatRegister($src2$$reg), 0, 1);
16126     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16127            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16128     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16129     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16130     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16131     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16132   %}
16133   ins_pipe(pipe_class_default);
16134 %}
16135 
16136 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16137 %{
16138   match(Set dst (AddReductionVF src1 src2));
16139   ins_cost(INSN_COST);
16140   effect(TEMP tmp, TEMP dst);
16141   format %{ "fadds $dst, $src1, $src2\n\t"
16142             "ins   $tmp, S, $src2, 0, 1\n\t"
16143             "fadds $dst, $dst, $tmp\t# add reduction2F"
16144   %}
16145   ins_encode %{
16146     __ fadds(as_FloatRegister($dst$$reg),
16147              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16148     __ ins(as_FloatRegister($tmp$$reg), __ S,
16149            as_FloatRegister($src2$$reg), 0, 1);
16150     __ fadds(as_FloatRegister($dst$$reg),
16151              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16152   %}
16153   ins_pipe(pipe_class_default);
16154 %}
16155 
16156 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16157 %{
16158   match(Set dst (AddReductionVF src1 src2));
16159   ins_cost(INSN_COST);
16160   effect(TEMP tmp, TEMP dst);
16161   format %{ "fadds $dst, $src1, $src2\n\t"
16162             "ins   $tmp, S, $src2, 0, 1\n\t"
16163             "fadds $dst, $dst, $tmp\n\t"
16164             "ins   $tmp, S, $src2, 0, 2\n\t"
16165             "fadds $dst, $dst, $tmp\n\t"
16166             "ins   $tmp, S, $src2, 0, 3\n\t"
16167             "fadds $dst, $dst, $tmp\t# add reduction4F"
16168   %}
16169   ins_encode %{
16170     __ fadds(as_FloatRegister($dst$$reg),
16171              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16172     __ ins(as_FloatRegister($tmp$$reg), __ S,
16173            as_FloatRegister($src2$$reg), 0, 1);
16174     __ fadds(as_FloatRegister($dst$$reg),
16175              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16176     __ ins(as_FloatRegister($tmp$$reg), __ S,
16177            as_FloatRegister($src2$$reg), 0, 2);
16178     __ fadds(as_FloatRegister($dst$$reg),
16179              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16180     __ ins(as_FloatRegister($tmp$$reg), __ S,
16181            as_FloatRegister($src2$$reg), 0, 3);
16182     __ fadds(as_FloatRegister($dst$$reg),
16183              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16184   %}
16185   ins_pipe(pipe_class_default);
16186 %}
16187 
16188 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16189 %{
16190   match(Set dst (MulReductionVF src1 src2));
16191   ins_cost(INSN_COST);
16192   effect(TEMP tmp, TEMP dst);
16193   format %{ "fmuls $dst, $src1, $src2\n\t"
16194             "ins   $tmp, S, $src2, 0, 1\n\t"
16195             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16196   %}
16197   ins_encode %{
16198     __ fmuls(as_FloatRegister($dst$$reg),
16199              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16200     __ ins(as_FloatRegister($tmp$$reg), __ S,
16201            as_FloatRegister($src2$$reg), 0, 1);
16202     __ fmuls(as_FloatRegister($dst$$reg),
16203              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16204   %}
16205   ins_pipe(pipe_class_default);
16206 %}
16207 
16208 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16209 %{
16210   match(Set dst (MulReductionVF src1 src2));
16211   ins_cost(INSN_COST);
16212   effect(TEMP tmp, TEMP dst);
16213   format %{ "fmuls $dst, $src1, $src2\n\t"
16214             "ins   $tmp, S, $src2, 0, 1\n\t"
16215             "fmuls $dst, $dst, $tmp\n\t"
16216             "ins   $tmp, S, $src2, 0, 2\n\t"
16217             "fmuls $dst, $dst, $tmp\n\t"
16218             "ins   $tmp, S, $src2, 0, 3\n\t"
16219             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16220   %}
16221   ins_encode %{
16222     __ fmuls(as_FloatRegister($dst$$reg),
16223              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16224     __ ins(as_FloatRegister($tmp$$reg), __ S,
16225            as_FloatRegister($src2$$reg), 0, 1);
16226     __ fmuls(as_FloatRegister($dst$$reg),
16227              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16228     __ ins(as_FloatRegister($tmp$$reg), __ S,
16229            as_FloatRegister($src2$$reg), 0, 2);
16230     __ fmuls(as_FloatRegister($dst$$reg),
16231              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16232     __ ins(as_FloatRegister($tmp$$reg), __ S,
16233            as_FloatRegister($src2$$reg), 0, 3);
16234     __ fmuls(as_FloatRegister($dst$$reg),
16235              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16236   %}
16237   ins_pipe(pipe_class_default);
16238 %}
16239 
16240 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16241 %{
16242   match(Set dst (AddReductionVD src1 src2));
16243   ins_cost(INSN_COST);
16244   effect(TEMP tmp, TEMP dst);
16245   format %{ "faddd $dst, $src1, $src2\n\t"
16246             "ins   $tmp, D, $src2, 0, 1\n\t"
16247             "faddd $dst, $dst, $tmp\t# add reduction2D"
16248   %}
16249   ins_encode %{
16250     __ faddd(as_FloatRegister($dst$$reg),
16251              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16252     __ ins(as_FloatRegister($tmp$$reg), __ D,
16253            as_FloatRegister($src2$$reg), 0, 1);
16254     __ faddd(as_FloatRegister($dst$$reg),
16255              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16256   %}
16257   ins_pipe(pipe_class_default);
16258 %}
16259 
16260 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16261 %{
16262   match(Set dst (MulReductionVD src1 src2));
16263   ins_cost(INSN_COST);
16264   effect(TEMP tmp, TEMP dst);
16265   format %{ "fmuld $dst, $src1, $src2\n\t"
16266             "ins   $tmp, D, $src2, 0, 1\n\t"
16267             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16268   %}
16269   ins_encode %{
16270     __ fmuld(as_FloatRegister($dst$$reg),
16271              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16272     __ ins(as_FloatRegister($tmp$$reg), __ D,
16273            as_FloatRegister($src2$$reg), 0, 1);
16274     __ fmuld(as_FloatRegister($dst$$reg),
16275              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16276   %}
16277   ins_pipe(pipe_class_default);
16278 %}
16279 
16280 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16281   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16282   match(Set dst (MaxReductionV src1 src2));
16283   ins_cost(INSN_COST);
16284   effect(TEMP_DEF dst, TEMP tmp);
16285   format %{ "fmaxs $dst, $src1, $src2\n\t"
16286             "ins   $tmp, S, $src2, 0, 1\n\t"
16287             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16288   ins_encode %{
16289     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16290     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16291     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16292   %}
16293   ins_pipe(pipe_class_default);
16294 %}
16295 
16296 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
16297   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16298   match(Set dst (MaxReductionV src1 src2));
16299   ins_cost(INSN_COST);
16300   effect(TEMP_DEF dst);
16301   format %{ "fmaxv $dst, T4S, $src2\n\t"
16302             "fmaxs $dst, $dst, $src1\t# max reduction4F" %}
16303   ins_encode %{
16304     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16305     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16306   %}
16307   ins_pipe(pipe_class_default);
16308 %}
16309 
16310 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16311   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16312   match(Set dst (MaxReductionV src1 src2));
16313   ins_cost(INSN_COST);
16314   effect(TEMP_DEF dst, TEMP tmp);
16315   format %{ "fmaxd $dst, $src1, $src2\n\t"
16316             "ins   $tmp, D, $src2, 0, 1\n\t"
16317             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
16318   ins_encode %{
16319     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16320     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16321     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16322   %}
16323   ins_pipe(pipe_class_default);
16324 %}
16325 
16326 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16327   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16328   match(Set dst (MinReductionV src1 src2));
16329   ins_cost(INSN_COST);
16330   effect(TEMP_DEF dst, TEMP tmp);
16331   format %{ "fmins $dst, $src1, $src2\n\t"
16332             "ins   $tmp, S, $src2, 0, 1\n\t"
16333             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
16334   ins_encode %{
16335     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16336     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16337     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16338   %}
16339   ins_pipe(pipe_class_default);
16340 %}
16341 
16342 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
16343   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16344   match(Set dst (MinReductionV src1 src2));
16345   ins_cost(INSN_COST);
16346   effect(TEMP_DEF dst);
16347   format %{ "fminv $dst, T4S, $src2\n\t"
16348             "fmins $dst, $dst, $src1\t# min reduction4F" %}
16349   ins_encode %{
16350     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16351     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16352   %}
16353   ins_pipe(pipe_class_default);
16354 %}
16355 
16356 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16357   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16358   match(Set dst (MinReductionV src1 src2));
16359   ins_cost(INSN_COST);
16360   effect(TEMP_DEF dst, TEMP tmp);
16361   format %{ "fmind $dst, $src1, $src2\n\t"
16362             "ins   $tmp, D, $src2, 0, 1\n\t"
16363             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
16364   ins_encode %{
16365     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16366     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16367     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16368   %}
16369   ins_pipe(pipe_class_default);
16370 %}
16371 
16372 // ====================VECTOR ARITHMETIC=======================================
16373 
16374 // --------------------------------- ADD --------------------------------------
16375 
16376 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16377 %{
16378   predicate(n->as_Vector()->length() == 4 ||
16379             n->as_Vector()->length() == 8);
16380   match(Set dst (AddVB src1 src2));
16381   ins_cost(INSN_COST);
16382   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16383   ins_encode %{
16384     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16385             as_FloatRegister($src1$$reg),
16386             as_FloatRegister($src2$$reg));
16387   %}
16388   ins_pipe(vdop64);
16389 %}
16390 
16391 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16392 %{
16393   predicate(n->as_Vector()->length() == 16);
16394   match(Set dst (AddVB src1 src2));
16395   ins_cost(INSN_COST);
16396   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16397   ins_encode %{
16398     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16399             as_FloatRegister($src1$$reg),
16400             as_FloatRegister($src2$$reg));
16401   %}
16402   ins_pipe(vdop128);
16403 %}
16404 
16405 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16406 %{
16407   predicate(n->as_Vector()->length() == 2 ||
16408             n->as_Vector()->length() == 4);
16409   match(Set dst (AddVS src1 src2));
16410   ins_cost(INSN_COST);
16411   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16412   ins_encode %{
16413     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16414             as_FloatRegister($src1$$reg),
16415             as_FloatRegister($src2$$reg));
16416   %}
16417   ins_pipe(vdop64);
16418 %}
16419 
16420 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16421 %{
16422   predicate(n->as_Vector()->length() == 8);
16423   match(Set dst (AddVS src1 src2));
16424   ins_cost(INSN_COST);
16425   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16426   ins_encode %{
16427     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16428             as_FloatRegister($src1$$reg),
16429             as_FloatRegister($src2$$reg));
16430   %}
16431   ins_pipe(vdop128);
16432 %}
16433 
16434 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16435 %{
16436   predicate(n->as_Vector()->length() == 2);
16437   match(Set dst (AddVI src1 src2));
16438   ins_cost(INSN_COST);
16439   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16440   ins_encode %{
16441     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16442             as_FloatRegister($src1$$reg),
16443             as_FloatRegister($src2$$reg));
16444   %}
16445   ins_pipe(vdop64);
16446 %}
16447 
16448 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16449 %{
16450   predicate(n->as_Vector()->length() == 4);
16451   match(Set dst (AddVI src1 src2));
16452   ins_cost(INSN_COST);
16453   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16454   ins_encode %{
16455     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16456             as_FloatRegister($src1$$reg),
16457             as_FloatRegister($src2$$reg));
16458   %}
16459   ins_pipe(vdop128);
16460 %}
16461 
16462 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16463 %{
16464   predicate(n->as_Vector()->length() == 2);
16465   match(Set dst (AddVL src1 src2));
16466   ins_cost(INSN_COST);
16467   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16468   ins_encode %{
16469     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16470             as_FloatRegister($src1$$reg),
16471             as_FloatRegister($src2$$reg));
16472   %}
16473   ins_pipe(vdop128);
16474 %}
16475 
16476 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16477 %{
16478   predicate(n->as_Vector()->length() == 2);
16479   match(Set dst (AddVF src1 src2));
16480   ins_cost(INSN_COST);
16481   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16482   ins_encode %{
16483     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16484             as_FloatRegister($src1$$reg),
16485             as_FloatRegister($src2$$reg));
16486   %}
16487   ins_pipe(vdop_fp64);
16488 %}
16489 
16490 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16491 %{
16492   predicate(n->as_Vector()->length() == 4);
16493   match(Set dst (AddVF src1 src2));
16494   ins_cost(INSN_COST);
16495   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16496   ins_encode %{
16497     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16498             as_FloatRegister($src1$$reg),
16499             as_FloatRegister($src2$$reg));
16500   %}
16501   ins_pipe(vdop_fp128);
16502 %}
16503 
16504 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16505 %{
16506   match(Set dst (AddVD src1 src2));
16507   ins_cost(INSN_COST);
16508   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16509   ins_encode %{
16510     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16511             as_FloatRegister($src1$$reg),
16512             as_FloatRegister($src2$$reg));
16513   %}
16514   ins_pipe(vdop_fp128);
16515 %}
16516 
16517 // --------------------------------- SUB --------------------------------------
16518 
16519 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16520 %{
16521   predicate(n->as_Vector()->length() == 4 ||
16522             n->as_Vector()->length() == 8);
16523   match(Set dst (SubVB src1 src2));
16524   ins_cost(INSN_COST);
16525   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16526   ins_encode %{
16527     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16528             as_FloatRegister($src1$$reg),
16529             as_FloatRegister($src2$$reg));
16530   %}
16531   ins_pipe(vdop64);
16532 %}
16533 
16534 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16535 %{
16536   predicate(n->as_Vector()->length() == 16);
16537   match(Set dst (SubVB src1 src2));
16538   ins_cost(INSN_COST);
16539   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16540   ins_encode %{
16541     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16542             as_FloatRegister($src1$$reg),
16543             as_FloatRegister($src2$$reg));
16544   %}
16545   ins_pipe(vdop128);
16546 %}
16547 
16548 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16549 %{
16550   predicate(n->as_Vector()->length() == 2 ||
16551             n->as_Vector()->length() == 4);
16552   match(Set dst (SubVS src1 src2));
16553   ins_cost(INSN_COST);
16554   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16555   ins_encode %{
16556     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16557             as_FloatRegister($src1$$reg),
16558             as_FloatRegister($src2$$reg));
16559   %}
16560   ins_pipe(vdop64);
16561 %}
16562 
16563 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16564 %{
16565   predicate(n->as_Vector()->length() == 8);
16566   match(Set dst (SubVS src1 src2));
16567   ins_cost(INSN_COST);
16568   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16569   ins_encode %{
16570     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16571             as_FloatRegister($src1$$reg),
16572             as_FloatRegister($src2$$reg));
16573   %}
16574   ins_pipe(vdop128);
16575 %}
16576 
16577 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16578 %{
16579   predicate(n->as_Vector()->length() == 2);
16580   match(Set dst (SubVI src1 src2));
16581   ins_cost(INSN_COST);
16582   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16583   ins_encode %{
16584     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16585             as_FloatRegister($src1$$reg),
16586             as_FloatRegister($src2$$reg));
16587   %}
16588   ins_pipe(vdop64);
16589 %}
16590 
16591 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16592 %{
16593   predicate(n->as_Vector()->length() == 4);
16594   match(Set dst (SubVI src1 src2));
16595   ins_cost(INSN_COST);
16596   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16597   ins_encode %{
16598     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16599             as_FloatRegister($src1$$reg),
16600             as_FloatRegister($src2$$reg));
16601   %}
16602   ins_pipe(vdop128);
16603 %}
16604 
16605 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16606 %{
16607   predicate(n->as_Vector()->length() == 2);
16608   match(Set dst (SubVL src1 src2));
16609   ins_cost(INSN_COST);
16610   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16611   ins_encode %{
16612     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16613             as_FloatRegister($src1$$reg),
16614             as_FloatRegister($src2$$reg));
16615   %}
16616   ins_pipe(vdop128);
16617 %}
16618 
16619 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16620 %{
16621   predicate(n->as_Vector()->length() == 2);
16622   match(Set dst (SubVF src1 src2));
16623   ins_cost(INSN_COST);
16624   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16625   ins_encode %{
16626     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16627             as_FloatRegister($src1$$reg),
16628             as_FloatRegister($src2$$reg));
16629   %}
16630   ins_pipe(vdop_fp64);
16631 %}
16632 
16633 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16634 %{
16635   predicate(n->as_Vector()->length() == 4);
16636   match(Set dst (SubVF src1 src2));
16637   ins_cost(INSN_COST);
16638   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16639   ins_encode %{
16640     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16641             as_FloatRegister($src1$$reg),
16642             as_FloatRegister($src2$$reg));
16643   %}
16644   ins_pipe(vdop_fp128);
16645 %}
16646 
16647 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16648 %{
16649   predicate(n->as_Vector()->length() == 2);
16650   match(Set dst (SubVD src1 src2));
16651   ins_cost(INSN_COST);
16652   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16653   ins_encode %{
16654     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16655             as_FloatRegister($src1$$reg),
16656             as_FloatRegister($src2$$reg));
16657   %}
16658   ins_pipe(vdop_fp128);
16659 %}
16660 
16661 // --------------------------------- MUL --------------------------------------
16662 
16663 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16664 %{
16665   predicate(n->as_Vector()->length() == 2 ||
16666             n->as_Vector()->length() == 4);
16667   match(Set dst (MulVS src1 src2));
16668   ins_cost(INSN_COST);
16669   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16670   ins_encode %{
16671     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16672             as_FloatRegister($src1$$reg),
16673             as_FloatRegister($src2$$reg));
16674   %}
16675   ins_pipe(vmul64);
16676 %}
16677 
16678 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16679 %{
16680   predicate(n->as_Vector()->length() == 8);
16681   match(Set dst (MulVS src1 src2));
16682   ins_cost(INSN_COST);
16683   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16684   ins_encode %{
16685     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16686             as_FloatRegister($src1$$reg),
16687             as_FloatRegister($src2$$reg));
16688   %}
16689   ins_pipe(vmul128);
16690 %}
16691 
16692 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16693 %{
16694   predicate(n->as_Vector()->length() == 2);
16695   match(Set dst (MulVI src1 src2));
16696   ins_cost(INSN_COST);
16697   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16698   ins_encode %{
16699     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16700             as_FloatRegister($src1$$reg),
16701             as_FloatRegister($src2$$reg));
16702   %}
16703   ins_pipe(vmul64);
16704 %}
16705 
16706 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16707 %{
16708   predicate(n->as_Vector()->length() == 4);
16709   match(Set dst (MulVI src1 src2));
16710   ins_cost(INSN_COST);
16711   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16712   ins_encode %{
16713     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16714             as_FloatRegister($src1$$reg),
16715             as_FloatRegister($src2$$reg));
16716   %}
16717   ins_pipe(vmul128);
16718 %}
16719 
16720 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16721 %{
16722   predicate(n->as_Vector()->length() == 2);
16723   match(Set dst (MulVF src1 src2));
16724   ins_cost(INSN_COST);
16725   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16726   ins_encode %{
16727     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16728             as_FloatRegister($src1$$reg),
16729             as_FloatRegister($src2$$reg));
16730   %}
16731   ins_pipe(vmuldiv_fp64);
16732 %}
16733 
16734 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16735 %{
16736   predicate(n->as_Vector()->length() == 4);
16737   match(Set dst (MulVF src1 src2));
16738   ins_cost(INSN_COST);
16739   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16740   ins_encode %{
16741     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16742             as_FloatRegister($src1$$reg),
16743             as_FloatRegister($src2$$reg));
16744   %}
16745   ins_pipe(vmuldiv_fp128);
16746 %}
16747 
16748 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16749 %{
16750   predicate(n->as_Vector()->length() == 2);
16751   match(Set dst (MulVD src1 src2));
16752   ins_cost(INSN_COST);
16753   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16754   ins_encode %{
16755     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16756             as_FloatRegister($src1$$reg),
16757             as_FloatRegister($src2$$reg));
16758   %}
16759   ins_pipe(vmuldiv_fp128);
16760 %}
16761 
16762 // --------------------------------- MLA --------------------------------------
16763 
16764 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16765 %{
16766   predicate(n->as_Vector()->length() == 2 ||
16767             n->as_Vector()->length() == 4);
16768   match(Set dst (AddVS dst (MulVS src1 src2)));
16769   ins_cost(INSN_COST);
16770   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16771   ins_encode %{
16772     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16773             as_FloatRegister($src1$$reg),
16774             as_FloatRegister($src2$$reg));
16775   %}
16776   ins_pipe(vmla64);
16777 %}
16778 
16779 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16780 %{
16781   predicate(n->as_Vector()->length() == 8);
16782   match(Set dst (AddVS dst (MulVS src1 src2)));
16783   ins_cost(INSN_COST);
16784   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16785   ins_encode %{
16786     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16787             as_FloatRegister($src1$$reg),
16788             as_FloatRegister($src2$$reg));
16789   %}
16790   ins_pipe(vmla128);
16791 %}
16792 
16793 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16794 %{
16795   predicate(n->as_Vector()->length() == 2);
16796   match(Set dst (AddVI dst (MulVI src1 src2)));
16797   ins_cost(INSN_COST);
16798   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16799   ins_encode %{
16800     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16801             as_FloatRegister($src1$$reg),
16802             as_FloatRegister($src2$$reg));
16803   %}
16804   ins_pipe(vmla64);
16805 %}
16806 
16807 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16808 %{
16809   predicate(n->as_Vector()->length() == 4);
16810   match(Set dst (AddVI dst (MulVI src1 src2)));
16811   ins_cost(INSN_COST);
16812   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16813   ins_encode %{
16814     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16815             as_FloatRegister($src1$$reg),
16816             as_FloatRegister($src2$$reg));
16817   %}
16818   ins_pipe(vmla128);
16819 %}
16820 
16821 // dst + src1 * src2
16822 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16823   predicate(UseFMA && n->as_Vector()->length() == 2);
16824   match(Set dst (FmaVF  dst (Binary src1 src2)));
16825   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16826   ins_cost(INSN_COST);
16827   ins_encode %{
16828     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16829             as_FloatRegister($src1$$reg),
16830             as_FloatRegister($src2$$reg));
16831   %}
16832   ins_pipe(vmuldiv_fp64);
16833 %}
16834 
16835 // dst + src1 * src2
16836 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16837   predicate(UseFMA && n->as_Vector()->length() == 4);
16838   match(Set dst (FmaVF  dst (Binary src1 src2)));
16839   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16840   ins_cost(INSN_COST);
16841   ins_encode %{
16842     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16843             as_FloatRegister($src1$$reg),
16844             as_FloatRegister($src2$$reg));
16845   %}
16846   ins_pipe(vmuldiv_fp128);
16847 %}
16848 
16849 // dst + src1 * src2
16850 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16851   predicate(UseFMA && n->as_Vector()->length() == 2);
16852   match(Set dst (FmaVD  dst (Binary src1 src2)));
16853   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16854   ins_cost(INSN_COST);
16855   ins_encode %{
16856     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16857             as_FloatRegister($src1$$reg),
16858             as_FloatRegister($src2$$reg));
16859   %}
16860   ins_pipe(vmuldiv_fp128);
16861 %}
16862 
16863 // --------------------------------- MLS --------------------------------------
16864 
16865 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16866 %{
16867   predicate(n->as_Vector()->length() == 2 ||
16868             n->as_Vector()->length() == 4);
16869   match(Set dst (SubVS dst (MulVS src1 src2)));
16870   ins_cost(INSN_COST);
16871   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16872   ins_encode %{
16873     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16874             as_FloatRegister($src1$$reg),
16875             as_FloatRegister($src2$$reg));
16876   %}
16877   ins_pipe(vmla64);
16878 %}
16879 
16880 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16881 %{
16882   predicate(n->as_Vector()->length() == 8);
16883   match(Set dst (SubVS dst (MulVS src1 src2)));
16884   ins_cost(INSN_COST);
16885   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16886   ins_encode %{
16887     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16888             as_FloatRegister($src1$$reg),
16889             as_FloatRegister($src2$$reg));
16890   %}
16891   ins_pipe(vmla128);
16892 %}
16893 
16894 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16895 %{
16896   predicate(n->as_Vector()->length() == 2);
16897   match(Set dst (SubVI dst (MulVI src1 src2)));
16898   ins_cost(INSN_COST);
16899   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16900   ins_encode %{
16901     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16902             as_FloatRegister($src1$$reg),
16903             as_FloatRegister($src2$$reg));
16904   %}
16905   ins_pipe(vmla64);
16906 %}
16907 
16908 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16909 %{
16910   predicate(n->as_Vector()->length() == 4);
16911   match(Set dst (SubVI dst (MulVI src1 src2)));
16912   ins_cost(INSN_COST);
16913   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16914   ins_encode %{
16915     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16916             as_FloatRegister($src1$$reg),
16917             as_FloatRegister($src2$$reg));
16918   %}
16919   ins_pipe(vmla128);
16920 %}
16921 
16922 // dst - src1 * src2
16923 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16924   predicate(UseFMA && n->as_Vector()->length() == 2);
16925   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16926   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16927   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16928   ins_cost(INSN_COST);
16929   ins_encode %{
16930     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16931             as_FloatRegister($src1$$reg),
16932             as_FloatRegister($src2$$reg));
16933   %}
16934   ins_pipe(vmuldiv_fp64);
16935 %}
16936 
16937 // dst - src1 * src2
16938 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16939   predicate(UseFMA && n->as_Vector()->length() == 4);
16940   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16941   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16942   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16943   ins_cost(INSN_COST);
16944   ins_encode %{
16945     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16946             as_FloatRegister($src1$$reg),
16947             as_FloatRegister($src2$$reg));
16948   %}
16949   ins_pipe(vmuldiv_fp128);
16950 %}
16951 
16952 // dst - src1 * src2
16953 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16954   predicate(UseFMA && n->as_Vector()->length() == 2);
16955   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16956   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16957   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16958   ins_cost(INSN_COST);
16959   ins_encode %{
16960     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16961             as_FloatRegister($src1$$reg),
16962             as_FloatRegister($src2$$reg));
16963   %}
16964   ins_pipe(vmuldiv_fp128);
16965 %}
16966 
16967 // --------------- Vector Multiply-Add Shorts into Integer --------------------
16968 
16969 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
16970   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16971   match(Set dst (MulAddVS2VI src1 src2));
16972   ins_cost(INSN_COST);
16973   effect(TEMP_DEF dst, TEMP tmp);
16974   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
16975             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
16976             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
16977   ins_encode %{
16978     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
16979               as_FloatRegister($src1$$reg),
16980               as_FloatRegister($src2$$reg));
16981     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
16982               as_FloatRegister($src1$$reg),
16983               as_FloatRegister($src2$$reg));
16984     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
16985              as_FloatRegister($tmp$$reg),
16986              as_FloatRegister($dst$$reg));
16987   %}
16988   ins_pipe(vmuldiv_fp128);
16989 %}
16990 
16991 // --------------------------------- DIV --------------------------------------
16992 
16993 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16994 %{
16995   predicate(n->as_Vector()->length() == 2);
16996   match(Set dst (DivVF src1 src2));
16997   ins_cost(INSN_COST);
16998   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16999   ins_encode %{
17000     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17001             as_FloatRegister($src1$$reg),
17002             as_FloatRegister($src2$$reg));
17003   %}
17004   ins_pipe(vmuldiv_fp64);
17005 %}
17006 
17007 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17008 %{
17009   predicate(n->as_Vector()->length() == 4);
17010   match(Set dst (DivVF src1 src2));
17011   ins_cost(INSN_COST);
17012   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17013   ins_encode %{
17014     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17015             as_FloatRegister($src1$$reg),
17016             as_FloatRegister($src2$$reg));
17017   %}
17018   ins_pipe(vmuldiv_fp128);
17019 %}
17020 
17021 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17022 %{
17023   predicate(n->as_Vector()->length() == 2);
17024   match(Set dst (DivVD src1 src2));
17025   ins_cost(INSN_COST);
17026   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17027   ins_encode %{
17028     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17029             as_FloatRegister($src1$$reg),
17030             as_FloatRegister($src2$$reg));
17031   %}
17032   ins_pipe(vmuldiv_fp128);
17033 %}
17034 
17035 // --------------------------------- SQRT -------------------------------------
17036 
17037 instruct vsqrt2D(vecX dst, vecX src)
17038 %{
17039   predicate(n->as_Vector()->length() == 2);
17040   match(Set dst (SqrtVD src));
17041   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17042   ins_encode %{
17043     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17044              as_FloatRegister($src$$reg));
17045   %}
17046   ins_pipe(vsqrt_fp128);
17047 %}
17048 
17049 // --------------------------------- ABS --------------------------------------
17050 
17051 instruct vabs2F(vecD dst, vecD src)
17052 %{
17053   predicate(n->as_Vector()->length() == 2);
17054   match(Set dst (AbsVF src));
17055   ins_cost(INSN_COST * 3);
17056   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17057   ins_encode %{
17058     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17059             as_FloatRegister($src$$reg));
17060   %}
17061   ins_pipe(vunop_fp64);
17062 %}
17063 
17064 instruct vabs4F(vecX dst, vecX src)
17065 %{
17066   predicate(n->as_Vector()->length() == 4);
17067   match(Set dst (AbsVF src));
17068   ins_cost(INSN_COST * 3);
17069   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17070   ins_encode %{
17071     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17072             as_FloatRegister($src$$reg));
17073   %}
17074   ins_pipe(vunop_fp128);
17075 %}
17076 
17077 instruct vabs2D(vecX dst, vecX src)
17078 %{
17079   predicate(n->as_Vector()->length() == 2);
17080   match(Set dst (AbsVD src));
17081   ins_cost(INSN_COST * 3);
17082   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17083   ins_encode %{
17084     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17085             as_FloatRegister($src$$reg));
17086   %}
17087   ins_pipe(vunop_fp128);
17088 %}
17089 
17090 // --------------------------------- NEG --------------------------------------
17091 
17092 instruct vneg2F(vecD dst, vecD src)
17093 %{
17094   predicate(n->as_Vector()->length() == 2);
17095   match(Set dst (NegVF src));
17096   ins_cost(INSN_COST * 3);
17097   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17098   ins_encode %{
17099     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17100             as_FloatRegister($src$$reg));
17101   %}
17102   ins_pipe(vunop_fp64);
17103 %}
17104 
17105 instruct vneg4F(vecX dst, vecX src)
17106 %{
17107   predicate(n->as_Vector()->length() == 4);
17108   match(Set dst (NegVF src));
17109   ins_cost(INSN_COST * 3);
17110   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17111   ins_encode %{
17112     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17113             as_FloatRegister($src$$reg));
17114   %}
17115   ins_pipe(vunop_fp128);
17116 %}
17117 
17118 instruct vneg2D(vecX dst, vecX src)
17119 %{
17120   predicate(n->as_Vector()->length() == 2);
17121   match(Set dst (NegVD src));
17122   ins_cost(INSN_COST * 3);
17123   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17124   ins_encode %{
17125     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17126             as_FloatRegister($src$$reg));
17127   %}
17128   ins_pipe(vunop_fp128);
17129 %}
17130 
17131 // --------------------------------- AND --------------------------------------
17132 
17133 instruct vand8B(vecD dst, vecD src1, vecD src2)
17134 %{
17135   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17136             n->as_Vector()->length_in_bytes() == 8);
17137   match(Set dst (AndV src1 src2));
17138   ins_cost(INSN_COST);
17139   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17140   ins_encode %{
17141     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17142             as_FloatRegister($src1$$reg),
17143             as_FloatRegister($src2$$reg));
17144   %}
17145   ins_pipe(vlogical64);
17146 %}
17147 
17148 instruct vand16B(vecX dst, vecX src1, vecX src2)
17149 %{
17150   predicate(n->as_Vector()->length_in_bytes() == 16);
17151   match(Set dst (AndV src1 src2));
17152   ins_cost(INSN_COST);
17153   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17154   ins_encode %{
17155     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17156             as_FloatRegister($src1$$reg),
17157             as_FloatRegister($src2$$reg));
17158   %}
17159   ins_pipe(vlogical128);
17160 %}
17161 
17162 // --------------------------------- OR ---------------------------------------
17163 
17164 instruct vor8B(vecD dst, vecD src1, vecD src2)
17165 %{
17166   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17167             n->as_Vector()->length_in_bytes() == 8);
17168   match(Set dst (OrV src1 src2));
17169   ins_cost(INSN_COST);
17170   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17171   ins_encode %{
17172     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17173             as_FloatRegister($src1$$reg),
17174             as_FloatRegister($src2$$reg));
17175   %}
17176   ins_pipe(vlogical64);
17177 %}
17178 
17179 instruct vor16B(vecX dst, vecX src1, vecX src2)
17180 %{
17181   predicate(n->as_Vector()->length_in_bytes() == 16);
17182   match(Set dst (OrV src1 src2));
17183   ins_cost(INSN_COST);
17184   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17185   ins_encode %{
17186     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17187             as_FloatRegister($src1$$reg),
17188             as_FloatRegister($src2$$reg));
17189   %}
17190   ins_pipe(vlogical128);
17191 %}
17192 
17193 // --------------------------------- XOR --------------------------------------
17194 
17195 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17196 %{
17197   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17198             n->as_Vector()->length_in_bytes() == 8);
17199   match(Set dst (XorV src1 src2));
17200   ins_cost(INSN_COST);
17201   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17202   ins_encode %{
17203     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17204             as_FloatRegister($src1$$reg),
17205             as_FloatRegister($src2$$reg));
17206   %}
17207   ins_pipe(vlogical64);
17208 %}
17209 
17210 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17211 %{
17212   predicate(n->as_Vector()->length_in_bytes() == 16);
17213   match(Set dst (XorV src1 src2));
17214   ins_cost(INSN_COST);
17215   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17216   ins_encode %{
17217     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17218             as_FloatRegister($src1$$reg),
17219             as_FloatRegister($src2$$reg));
17220   %}
17221   ins_pipe(vlogical128);
17222 %}
17223 
17224 // ------------------------------ Shift ---------------------------------------
17225 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17226   predicate(n->as_Vector()->length_in_bytes() == 8);
17227   match(Set dst (LShiftCntV cnt));
17228   match(Set dst (RShiftCntV cnt));
17229   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17230   ins_encode %{
17231     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17232   %}
17233   ins_pipe(vdup_reg_reg64);
17234 %}
17235 
17236 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17237   predicate(n->as_Vector()->length_in_bytes() == 16);
17238   match(Set dst (LShiftCntV cnt));
17239   match(Set dst (RShiftCntV cnt));
17240   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17241   ins_encode %{
17242     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17243   %}
17244   ins_pipe(vdup_reg_reg128);
17245 %}
17246 
17247 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17248   predicate(n->as_Vector()->length() == 4 ||
17249             n->as_Vector()->length() == 8);
17250   match(Set dst (LShiftVB src shift));
17251   ins_cost(INSN_COST);
17252   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17253   ins_encode %{
17254     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17255             as_FloatRegister($src$$reg),
17256             as_FloatRegister($shift$$reg));
17257   %}
17258   ins_pipe(vshift64);
17259 %}
17260 
17261 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17262   predicate(n->as_Vector()->length() == 16);
17263   match(Set dst (LShiftVB src shift));
17264   ins_cost(INSN_COST);
17265   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17266   ins_encode %{
17267     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17268             as_FloatRegister($src$$reg),
17269             as_FloatRegister($shift$$reg));
17270   %}
17271   ins_pipe(vshift128);
17272 %}
17273 
17274 // Right shifts with vector shift count on aarch64 SIMD are implemented
17275 // as left shift by negative shift count.
17276 // There are two cases for vector shift count.
17277 //
17278 // Case 1: The vector shift count is from replication.
17279 //        |            |
17280 //    LoadVector  RShiftCntV
17281 //        |       /
17282 //     RShiftVI
17283 // Note: In inner loop, multiple neg instructions are used, which can be
17284 // moved to outer loop and merge into one neg instruction.
17285 //
17286 // Case 2: The vector shift count is from loading.
17287 // This case isn't supported by middle-end now. But it's supported by
17288 // panama/vectorIntrinsics(JEP 338: Vector API).
17289 //        |            |
17290 //    LoadVector  LoadVector
17291 //        |       /
17292 //     RShiftVI
17293 //
17294 
17295 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17296   predicate(n->as_Vector()->length() == 4 ||
17297             n->as_Vector()->length() == 8);
17298   match(Set dst (RShiftVB src shift));
17299   ins_cost(INSN_COST);
17300   effect(TEMP tmp);
17301   format %{ "negr  $tmp,$shift\t"
17302             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17303   ins_encode %{
17304     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17305             as_FloatRegister($shift$$reg));
17306     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17307             as_FloatRegister($src$$reg),
17308             as_FloatRegister($tmp$$reg));
17309   %}
17310   ins_pipe(vshift64);
17311 %}
17312 
17313 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17314   predicate(n->as_Vector()->length() == 16);
17315   match(Set dst (RShiftVB src shift));
17316   ins_cost(INSN_COST);
17317   effect(TEMP tmp);
17318   format %{ "negr  $tmp,$shift\t"
17319             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17320   ins_encode %{
17321     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17322             as_FloatRegister($shift$$reg));
17323     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17324             as_FloatRegister($src$$reg),
17325             as_FloatRegister($tmp$$reg));
17326   %}
17327   ins_pipe(vshift128);
17328 %}
17329 
17330 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17331   predicate(n->as_Vector()->length() == 4 ||
17332             n->as_Vector()->length() == 8);
17333   match(Set dst (URShiftVB src shift));
17334   ins_cost(INSN_COST);
17335   effect(TEMP tmp);
17336   format %{ "negr  $tmp,$shift\t"
17337             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17338   ins_encode %{
17339     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17340             as_FloatRegister($shift$$reg));
17341     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17342             as_FloatRegister($src$$reg),
17343             as_FloatRegister($tmp$$reg));
17344   %}
17345   ins_pipe(vshift64);
17346 %}
17347 
17348 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17349   predicate(n->as_Vector()->length() == 16);
17350   match(Set dst (URShiftVB src shift));
17351   ins_cost(INSN_COST);
17352   effect(TEMP tmp);
17353   format %{ "negr  $tmp,$shift\t"
17354             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17355   ins_encode %{
17356     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17357             as_FloatRegister($shift$$reg));
17358     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17359             as_FloatRegister($src$$reg),
17360             as_FloatRegister($tmp$$reg));
17361   %}
17362   ins_pipe(vshift128);
17363 %}
17364 
17365 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17366   predicate(n->as_Vector()->length() == 4 ||
17367             n->as_Vector()->length() == 8);
17368   match(Set dst (LShiftVB src (LShiftCntV shift)));
17369   ins_cost(INSN_COST);
17370   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17371   ins_encode %{
17372     int sh = (int)$shift$$constant;
17373     if (sh >= 8) {
17374       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17375              as_FloatRegister($src$$reg),
17376              as_FloatRegister($src$$reg));
17377     } else {
17378       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17379              as_FloatRegister($src$$reg), sh);
17380     }
17381   %}
17382   ins_pipe(vshift64_imm);
17383 %}
17384 
17385 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17386   predicate(n->as_Vector()->length() == 16);
17387   match(Set dst (LShiftVB src (LShiftCntV shift)));
17388   ins_cost(INSN_COST);
17389   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17390   ins_encode %{
17391     int sh = (int)$shift$$constant;
17392     if (sh >= 8) {
17393       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17394              as_FloatRegister($src$$reg),
17395              as_FloatRegister($src$$reg));
17396     } else {
17397       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17398              as_FloatRegister($src$$reg), sh);
17399     }
17400   %}
17401   ins_pipe(vshift128_imm);
17402 %}
17403 
17404 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17405   predicate(n->as_Vector()->length() == 4 ||
17406             n->as_Vector()->length() == 8);
17407   match(Set dst (RShiftVB src (RShiftCntV shift)));
17408   ins_cost(INSN_COST);
17409   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17410   ins_encode %{
17411     int sh = (int)$shift$$constant;
17412     if (sh >= 8) sh = 7;
17413     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17414            as_FloatRegister($src$$reg), sh);
17415   %}
17416   ins_pipe(vshift64_imm);
17417 %}
17418 
17419 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17420   predicate(n->as_Vector()->length() == 16);
17421   match(Set dst (RShiftVB src (RShiftCntV shift)));
17422   ins_cost(INSN_COST);
17423   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17424   ins_encode %{
17425     int sh = (int)$shift$$constant;
17426     if (sh >= 8) sh = 7;
17427     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17428            as_FloatRegister($src$$reg), sh);
17429   %}
17430   ins_pipe(vshift128_imm);
17431 %}
17432 
17433 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17434   predicate(n->as_Vector()->length() == 4 ||
17435             n->as_Vector()->length() == 8);
17436   match(Set dst (URShiftVB src (RShiftCntV shift)));
17437   ins_cost(INSN_COST);
17438   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17439   ins_encode %{
17440     int sh = (int)$shift$$constant;
17441     if (sh >= 8) {
17442       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17443              as_FloatRegister($src$$reg),
17444              as_FloatRegister($src$$reg));
17445     } else {
17446       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17447              as_FloatRegister($src$$reg), sh);
17448     }
17449   %}
17450   ins_pipe(vshift64_imm);
17451 %}
17452 
17453 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17454   predicate(n->as_Vector()->length() == 16);
17455   match(Set dst (URShiftVB src (RShiftCntV shift)));
17456   ins_cost(INSN_COST);
17457   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17458   ins_encode %{
17459     int sh = (int)$shift$$constant;
17460     if (sh >= 8) {
17461       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17462              as_FloatRegister($src$$reg),
17463              as_FloatRegister($src$$reg));
17464     } else {
17465       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17466              as_FloatRegister($src$$reg), sh);
17467     }
17468   %}
17469   ins_pipe(vshift128_imm);
17470 %}
17471 
17472 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17473   predicate(n->as_Vector()->length() == 2 ||
17474             n->as_Vector()->length() == 4);
17475   match(Set dst (LShiftVS src shift));
17476   ins_cost(INSN_COST);
17477   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17478   ins_encode %{
17479     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17480             as_FloatRegister($src$$reg),
17481             as_FloatRegister($shift$$reg));
17482   %}
17483   ins_pipe(vshift64);
17484 %}
17485 
17486 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17487   predicate(n->as_Vector()->length() == 8);
17488   match(Set dst (LShiftVS src shift));
17489   ins_cost(INSN_COST);
17490   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17491   ins_encode %{
17492     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17493             as_FloatRegister($src$$reg),
17494             as_FloatRegister($shift$$reg));
17495   %}
17496   ins_pipe(vshift128);
17497 %}
17498 
17499 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17500   predicate(n->as_Vector()->length() == 2 ||
17501             n->as_Vector()->length() == 4);
17502   match(Set dst (RShiftVS src shift));
17503   ins_cost(INSN_COST);
17504   effect(TEMP tmp);
17505   format %{ "negr  $tmp,$shift\t"
17506             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17507   ins_encode %{
17508     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17509             as_FloatRegister($shift$$reg));
17510     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17511             as_FloatRegister($src$$reg),
17512             as_FloatRegister($tmp$$reg));
17513   %}
17514   ins_pipe(vshift64);
17515 %}
17516 
17517 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17518   predicate(n->as_Vector()->length() == 8);
17519   match(Set dst (RShiftVS src shift));
17520   ins_cost(INSN_COST);
17521   effect(TEMP tmp);
17522   format %{ "negr  $tmp,$shift\t"
17523             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17524   ins_encode %{
17525     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17526             as_FloatRegister($shift$$reg));
17527     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17528             as_FloatRegister($src$$reg),
17529             as_FloatRegister($tmp$$reg));
17530   %}
17531   ins_pipe(vshift128);
17532 %}
17533 
17534 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17535   predicate(n->as_Vector()->length() == 2 ||
17536             n->as_Vector()->length() == 4);
17537   match(Set dst (URShiftVS src shift));
17538   ins_cost(INSN_COST);
17539   effect(TEMP tmp);
17540   format %{ "negr  $tmp,$shift\t"
17541             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17542   ins_encode %{
17543     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17544             as_FloatRegister($shift$$reg));
17545     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17546             as_FloatRegister($src$$reg),
17547             as_FloatRegister($tmp$$reg));
17548   %}
17549   ins_pipe(vshift64);
17550 %}
17551 
17552 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17553   predicate(n->as_Vector()->length() == 8);
17554   match(Set dst (URShiftVS src shift));
17555   ins_cost(INSN_COST);
17556   effect(TEMP tmp);
17557   format %{ "negr  $tmp,$shift\t"
17558             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17559   ins_encode %{
17560     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17561             as_FloatRegister($shift$$reg));
17562     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17563             as_FloatRegister($src$$reg),
17564             as_FloatRegister($tmp$$reg));
17565   %}
17566   ins_pipe(vshift128);
17567 %}
17568 
17569 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17570   predicate(n->as_Vector()->length() == 2 ||
17571             n->as_Vector()->length() == 4);
17572   match(Set dst (LShiftVS src (LShiftCntV shift)));
17573   ins_cost(INSN_COST);
17574   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17575   ins_encode %{
17576     int sh = (int)$shift$$constant;
17577     if (sh >= 16) {
17578       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17579              as_FloatRegister($src$$reg),
17580              as_FloatRegister($src$$reg));
17581     } else {
17582       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17583              as_FloatRegister($src$$reg), sh);
17584     }
17585   %}
17586   ins_pipe(vshift64_imm);
17587 %}
17588 
17589 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17590   predicate(n->as_Vector()->length() == 8);
17591   match(Set dst (LShiftVS src (LShiftCntV shift)));
17592   ins_cost(INSN_COST);
17593   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17594   ins_encode %{
17595     int sh = (int)$shift$$constant;
17596     if (sh >= 16) {
17597       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17598              as_FloatRegister($src$$reg),
17599              as_FloatRegister($src$$reg));
17600     } else {
17601       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17602              as_FloatRegister($src$$reg), sh);
17603     }
17604   %}
17605   ins_pipe(vshift128_imm);
17606 %}
17607 
17608 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17609   predicate(n->as_Vector()->length() == 2 ||
17610             n->as_Vector()->length() == 4);
17611   match(Set dst (RShiftVS src (LShiftCntV shift)));
17612   ins_cost(INSN_COST);
17613   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17614   ins_encode %{
17615     int sh = (int)$shift$$constant;
17616     if (sh >= 16) sh = 15;
17617     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17618            as_FloatRegister($src$$reg), sh);
17619   %}
17620   ins_pipe(vshift64_imm);
17621 %}
17622 
17623 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17624   predicate(n->as_Vector()->length() == 8);
17625   match(Set dst (RShiftVS src (LShiftCntV shift)));
17626   ins_cost(INSN_COST);
17627   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17628   ins_encode %{
17629     int sh = (int)$shift$$constant;
17630     if (sh >= 16) sh = 15;
17631     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17632            as_FloatRegister($src$$reg), sh);
17633   %}
17634   ins_pipe(vshift128_imm);
17635 %}
17636 
17637 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17638   predicate(n->as_Vector()->length() == 2 ||
17639             n->as_Vector()->length() == 4);
17640   match(Set dst (URShiftVS src (RShiftCntV shift)));
17641   ins_cost(INSN_COST);
17642   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17643   ins_encode %{
17644     int sh = (int)$shift$$constant;
17645     if (sh >= 16) {
17646       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17647              as_FloatRegister($src$$reg),
17648              as_FloatRegister($src$$reg));
17649     } else {
17650       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17651              as_FloatRegister($src$$reg), sh);
17652     }
17653   %}
17654   ins_pipe(vshift64_imm);
17655 %}
17656 
17657 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17658   predicate(n->as_Vector()->length() == 8);
17659   match(Set dst (URShiftVS src (RShiftCntV shift)));
17660   ins_cost(INSN_COST);
17661   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17662   ins_encode %{
17663     int sh = (int)$shift$$constant;
17664     if (sh >= 16) {
17665       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17666              as_FloatRegister($src$$reg),
17667              as_FloatRegister($src$$reg));
17668     } else {
17669       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17670              as_FloatRegister($src$$reg), sh);
17671     }
17672   %}
17673   ins_pipe(vshift128_imm);
17674 %}
17675 
17676 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17677   predicate(n->as_Vector()->length() == 2);
17678   match(Set dst (LShiftVI src shift));
17679   ins_cost(INSN_COST);
17680   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17681   ins_encode %{
17682     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17683             as_FloatRegister($src$$reg),
17684             as_FloatRegister($shift$$reg));
17685   %}
17686   ins_pipe(vshift64);
17687 %}
17688 
17689 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17690   predicate(n->as_Vector()->length() == 4);
17691   match(Set dst (LShiftVI src shift));
17692   ins_cost(INSN_COST);
17693   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17694   ins_encode %{
17695     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17696             as_FloatRegister($src$$reg),
17697             as_FloatRegister($shift$$reg));
17698   %}
17699   ins_pipe(vshift128);
17700 %}
17701 
17702 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17703   predicate(n->as_Vector()->length() == 2);
17704   match(Set dst (RShiftVI src shift));
17705   ins_cost(INSN_COST);
17706   effect(TEMP tmp);
17707   format %{ "negr  $tmp,$shift\t"
17708             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17709   ins_encode %{
17710     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17711             as_FloatRegister($shift$$reg));
17712     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17713             as_FloatRegister($src$$reg),
17714             as_FloatRegister($tmp$$reg));
17715   %}
17716   ins_pipe(vshift64);
17717 %}
17718 
17719 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17720   predicate(n->as_Vector()->length() == 4);
17721   match(Set dst (RShiftVI src shift));
17722   ins_cost(INSN_COST);
17723   effect(TEMP tmp);
17724   format %{ "negr  $tmp,$shift\t"
17725             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17726   ins_encode %{
17727     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17728             as_FloatRegister($shift$$reg));
17729     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17730             as_FloatRegister($src$$reg),
17731             as_FloatRegister($tmp$$reg));
17732   %}
17733   ins_pipe(vshift128);
17734 %}
17735 
17736 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17737   predicate(n->as_Vector()->length() == 2);
17738   match(Set dst (URShiftVI src shift));
17739   ins_cost(INSN_COST);
17740   effect(TEMP tmp);
17741   format %{ "negr  $tmp,$shift\t"
17742             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17743   ins_encode %{
17744     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17745             as_FloatRegister($shift$$reg));
17746     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17747             as_FloatRegister($src$$reg),
17748             as_FloatRegister($tmp$$reg));
17749   %}
17750   ins_pipe(vshift64);
17751 %}
17752 
17753 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17754   predicate(n->as_Vector()->length() == 4);
17755   match(Set dst (URShiftVI src shift));
17756   ins_cost(INSN_COST);
17757   effect(TEMP tmp);
17758   format %{ "negr  $tmp,$shift\t"
17759             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17760   ins_encode %{
17761     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17762             as_FloatRegister($shift$$reg));
17763     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17764             as_FloatRegister($src$$reg),
17765             as_FloatRegister($tmp$$reg));
17766   %}
17767   ins_pipe(vshift128);
17768 %}
17769 
17770 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17771   predicate(n->as_Vector()->length() == 2);
17772   match(Set dst (LShiftVI src (LShiftCntV shift)));
17773   ins_cost(INSN_COST);
17774   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17775   ins_encode %{
17776     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17777            as_FloatRegister($src$$reg),
17778            (int)$shift$$constant);
17779   %}
17780   ins_pipe(vshift64_imm);
17781 %}
17782 
17783 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17784   predicate(n->as_Vector()->length() == 4);
17785   match(Set dst (LShiftVI src (LShiftCntV shift)));
17786   ins_cost(INSN_COST);
17787   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17788   ins_encode %{
17789     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17790            as_FloatRegister($src$$reg),
17791            (int)$shift$$constant);
17792   %}
17793   ins_pipe(vshift128_imm);
17794 %}
17795 
17796 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17797   predicate(n->as_Vector()->length() == 2);
17798   match(Set dst (RShiftVI src (RShiftCntV shift)));
17799   ins_cost(INSN_COST);
17800   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17801   ins_encode %{
17802     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17803             as_FloatRegister($src$$reg),
17804             (int)$shift$$constant);
17805   %}
17806   ins_pipe(vshift64_imm);
17807 %}
17808 
17809 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17810   predicate(n->as_Vector()->length() == 4);
17811   match(Set dst (RShiftVI src (RShiftCntV shift)));
17812   ins_cost(INSN_COST);
17813   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17814   ins_encode %{
17815     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17816             as_FloatRegister($src$$reg),
17817             (int)$shift$$constant);
17818   %}
17819   ins_pipe(vshift128_imm);
17820 %}
17821 
17822 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17823   predicate(n->as_Vector()->length() == 2);
17824   match(Set dst (URShiftVI src (RShiftCntV shift)));
17825   ins_cost(INSN_COST);
17826   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17827   ins_encode %{
17828     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17829             as_FloatRegister($src$$reg),
17830             (int)$shift$$constant);
17831   %}
17832   ins_pipe(vshift64_imm);
17833 %}
17834 
17835 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17836   predicate(n->as_Vector()->length() == 4);
17837   match(Set dst (URShiftVI src (RShiftCntV shift)));
17838   ins_cost(INSN_COST);
17839   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17840   ins_encode %{
17841     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17842             as_FloatRegister($src$$reg),
17843             (int)$shift$$constant);
17844   %}
17845   ins_pipe(vshift128_imm);
17846 %}
17847 
17848 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17849   predicate(n->as_Vector()->length() == 2);
17850   match(Set dst (LShiftVL src shift));
17851   ins_cost(INSN_COST);
17852   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17853   ins_encode %{
17854     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17855             as_FloatRegister($src$$reg),
17856             as_FloatRegister($shift$$reg));
17857   %}
17858   ins_pipe(vshift128);
17859 %}
17860 
17861 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17862   predicate(n->as_Vector()->length() == 2);
17863   match(Set dst (RShiftVL src shift));
17864   ins_cost(INSN_COST);
17865   effect(TEMP tmp);
17866   format %{ "negr  $tmp,$shift\t"
17867             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17868   ins_encode %{
17869     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17870             as_FloatRegister($shift$$reg));
17871     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17872             as_FloatRegister($src$$reg),
17873             as_FloatRegister($tmp$$reg));
17874   %}
17875   ins_pipe(vshift128);
17876 %}
17877 
17878 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17879   predicate(n->as_Vector()->length() == 2);
17880   match(Set dst (URShiftVL src shift));
17881   ins_cost(INSN_COST);
17882   effect(TEMP tmp);
17883   format %{ "negr  $tmp,$shift\t"
17884             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17885   ins_encode %{
17886     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17887             as_FloatRegister($shift$$reg));
17888     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17889             as_FloatRegister($src$$reg),
17890             as_FloatRegister($tmp$$reg));
17891   %}
17892   ins_pipe(vshift128);
17893 %}
17894 
17895 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17896   predicate(n->as_Vector()->length() == 2);
17897   match(Set dst (LShiftVL src (LShiftCntV shift)));
17898   ins_cost(INSN_COST);
17899   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17900   ins_encode %{
17901     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17902            as_FloatRegister($src$$reg),
17903            (int)$shift$$constant);
17904   %}
17905   ins_pipe(vshift128_imm);
17906 %}
17907 
17908 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17909   predicate(n->as_Vector()->length() == 2);
17910   match(Set dst (RShiftVL src (RShiftCntV shift)));
17911   ins_cost(INSN_COST);
17912   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17913   ins_encode %{
17914     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17915             as_FloatRegister($src$$reg),
17916             (int)$shift$$constant);
17917   %}
17918   ins_pipe(vshift128_imm);
17919 %}
17920 
17921 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17922   predicate(n->as_Vector()->length() == 2);
17923   match(Set dst (URShiftVL src (RShiftCntV shift)));
17924   ins_cost(INSN_COST);
17925   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17926   ins_encode %{
17927     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17928             as_FloatRegister($src$$reg),
17929             (int)$shift$$constant);
17930   %}
17931   ins_pipe(vshift128_imm);
17932 %}
17933 
17934 instruct vmax2F(vecD dst, vecD src1, vecD src2)
17935 %{
17936   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17937   match(Set dst (MaxV src1 src2));
17938   ins_cost(INSN_COST);
17939   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
17940   ins_encode %{
17941     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
17942             as_FloatRegister($src1$$reg),
17943             as_FloatRegister($src2$$reg));
17944   %}
17945   ins_pipe(vdop_fp64);
17946 %}
17947 
17948 instruct vmax4F(vecX dst, vecX src1, vecX src2)
17949 %{
17950   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17951   match(Set dst (MaxV src1 src2));
17952   ins_cost(INSN_COST);
17953   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
17954   ins_encode %{
17955     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
17956             as_FloatRegister($src1$$reg),
17957             as_FloatRegister($src2$$reg));
17958   %}
17959   ins_pipe(vdop_fp128);
17960 %}
17961 
17962 instruct vmax2D(vecX dst, vecX src1, vecX src2)
17963 %{
17964   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17965   match(Set dst (MaxV src1 src2));
17966   ins_cost(INSN_COST);
17967   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
17968   ins_encode %{
17969     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
17970             as_FloatRegister($src1$$reg),
17971             as_FloatRegister($src2$$reg));
17972   %}
17973   ins_pipe(vdop_fp128);
17974 %}
17975 
17976 instruct vmin2F(vecD dst, vecD src1, vecD src2)
17977 %{
17978   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17979   match(Set dst (MinV src1 src2));
17980   ins_cost(INSN_COST);
17981   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
17982   ins_encode %{
17983     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
17984             as_FloatRegister($src1$$reg),
17985             as_FloatRegister($src2$$reg));
17986   %}
17987   ins_pipe(vdop_fp64);
17988 %}
17989 
17990 instruct vmin4F(vecX dst, vecX src1, vecX src2)
17991 %{
17992   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17993   match(Set dst (MinV src1 src2));
17994   ins_cost(INSN_COST);
17995   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
17996   ins_encode %{
17997     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
17998             as_FloatRegister($src1$$reg),
17999             as_FloatRegister($src2$$reg));
18000   %}
18001   ins_pipe(vdop_fp128);
18002 %}
18003 
18004 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18005 %{
18006   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18007   match(Set dst (MinV src1 src2));
18008   ins_cost(INSN_COST);
18009   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18010   ins_encode %{
18011     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18012             as_FloatRegister($src1$$reg),
18013             as_FloatRegister($src2$$reg));
18014   %}
18015   ins_pipe(vdop_fp128);
18016 %}
18017 
18018 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18019   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18020   match(Set dst (RoundDoubleModeV src rmode));
18021   format %{ "frint  $dst, $src, $rmode" %}
18022   ins_encode %{
18023     switch ($rmode$$constant) {
18024       case RoundDoubleModeNode::rmode_rint:
18025         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18026                   as_FloatRegister($src$$reg));
18027         break;
18028       case RoundDoubleModeNode::rmode_floor:
18029         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18030                   as_FloatRegister($src$$reg));
18031         break;
18032       case RoundDoubleModeNode::rmode_ceil:
18033         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18034                   as_FloatRegister($src$$reg));
18035         break;
18036     }
18037   %}
18038   ins_pipe(vdop_fp128);
18039 %}
18040 
18041 instruct vpopcount4I(vecX dst, vecX src) %{
18042   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18043   match(Set dst (PopCountVI src));
18044   format %{
18045     "cnt     $dst, $src\t# vector (16B)\n\t"
18046     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18047     "uaddlp  $dst, $dst\t# vector (8H)"
18048   %}
18049   ins_encode %{
18050      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18051             as_FloatRegister($src$$reg));
18052      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18053                as_FloatRegister($dst$$reg));
18054      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18055                as_FloatRegister($dst$$reg));
18056   %}
18057   ins_pipe(pipe_class_default);
18058 %}
18059 
18060 instruct vpopcount2I(vecD dst, vecD src) %{
18061   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18062   match(Set dst (PopCountVI src));
18063   format %{
18064     "cnt     $dst, $src\t# vector (8B)\n\t"
18065     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18066     "uaddlp  $dst, $dst\t# vector (4H)"
18067   %}
18068   ins_encode %{
18069      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18070             as_FloatRegister($src$$reg));
18071      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18072                as_FloatRegister($dst$$reg));
18073      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18074                as_FloatRegister($dst$$reg));
18075   %}
18076   ins_pipe(pipe_class_default);
18077 %}
18078 
18079 //----------PEEPHOLE RULES-----------------------------------------------------
18080 // These must follow all instruction definitions as they use the names
18081 // defined in the instructions definitions.
18082 //
18083 // peepmatch ( root_instr_name [preceding_instruction]* );
18084 //
18085 // peepconstraint %{
18086 // (instruction_number.operand_name relational_op instruction_number.operand_name
18087 //  [, ...] );
18088 // // instruction numbers are zero-based using left to right order in peepmatch
18089 //
18090 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18091 // // provide an instruction_number.operand_name for each operand that appears
18092 // // in the replacement instruction's match rule
18093 //
18094 // ---------VM FLAGS---------------------------------------------------------
18095 //
18096 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18097 //
18098 // Each peephole rule is given an identifying number starting with zero and
18099 // increasing by one in the order seen by the parser.  An individual peephole
18100 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18101 // on the command-line.
18102 //
18103 // ---------CURRENT LIMITATIONS----------------------------------------------
18104 //
18105 // Only match adjacent instructions in same basic block
18106 // Only equality constraints
18107 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18108 // Only one replacement instruction
18109 //
18110 // ---------EXAMPLE----------------------------------------------------------
18111 //
18112 // // pertinent parts of existing instructions in architecture description
18113 // instruct movI(iRegINoSp dst, iRegI src)
18114 // %{
18115 //   match(Set dst (CopyI src));
18116 // %}
18117 //
18118 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18119 // %{
18120 //   match(Set dst (AddI dst src));
18121 //   effect(KILL cr);
18122 // %}
18123 //
18124 // // Change (inc mov) to lea
18125 // peephole %{
18126 //   // increment preceeded by register-register move
18127 //   peepmatch ( incI_iReg movI );
18128 //   // require that the destination register of the increment
18129 //   // match the destination register of the move
18130 //   peepconstraint ( 0.dst == 1.dst );
18131 //   // construct a replacement instruction that sets
18132 //   // the destination to ( move's source register + one )
18133 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18134 // %}
18135 //
18136 
18137 // Implementation no longer uses movX instructions since
18138 // machine-independent system no longer uses CopyX nodes.
18139 //
18140 // peephole
18141 // %{
18142 //   peepmatch (incI_iReg movI);
18143 //   peepconstraint (0.dst == 1.dst);
18144 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18145 // %}
18146 
18147 // peephole
18148 // %{
18149 //   peepmatch (decI_iReg movI);
18150 //   peepconstraint (0.dst == 1.dst);
18151 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18152 // %}
18153 
18154 // peephole
18155 // %{
18156 //   peepmatch (addI_iReg_imm movI);
18157 //   peepconstraint (0.dst == 1.dst);
18158 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18159 // %}
18160 
18161 // peephole
18162 // %{
18163 //   peepmatch (incL_iReg movL);
18164 //   peepconstraint (0.dst == 1.dst);
18165 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18166 // %}
18167 
18168 // peephole
18169 // %{
18170 //   peepmatch (decL_iReg movL);
18171 //   peepconstraint (0.dst == 1.dst);
18172 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18173 // %}
18174 
18175 // peephole
18176 // %{
18177 //   peepmatch (addL_iReg_imm movL);
18178 //   peepconstraint (0.dst == 1.dst);
18179 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18180 // %}
18181 
18182 // peephole
18183 // %{
18184 //   peepmatch (addP_iReg_imm movP);
18185 //   peepconstraint (0.dst == 1.dst);
18186 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18187 // %}
18188 
18189 // // Change load of spilled value to only a spill
18190 // instruct storeI(memory mem, iRegI src)
18191 // %{
18192 //   match(Set mem (StoreI mem src));
18193 // %}
18194 //
18195 // instruct loadI(iRegINoSp dst, memory mem)
18196 // %{
18197 //   match(Set dst (LoadI mem));
18198 // %}
18199 //
18200 
18201 //----------SMARTSPILL RULES---------------------------------------------------
18202 // These must follow all instruction definitions as they use the names
18203 // defined in the instructions definitions.
18204 
18205 // Local Variables:
18206 // mode: c++
18207 // End: