1 //
   2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit general purpose registers
 439 reg_class all_reg32(
 440     R0,
 441     R1,
 442     R2,
 443     R3,
 444     R4,
 445     R5,
 446     R6,
 447     R7,
 448     R10,
 449     R11,
 450     R12,
 451     R13,
 452     R14,
 453     R15,
 454     R16,
 455     R17,
 456     R18,
 457     R19,
 458     R20,
 459     R21,
 460     R22,
 461     R23,
 462     R24,
 463     R25,
 464     R26,
 465     R27,
 466     R28,
 467     R29,
 468     R30,
 469     R31
 470 );
 471 
 472 
 473 // Class for all 32 bit integer registers (excluding SP which
 474 // will never be used as an integer register)
 475 reg_class any_reg32 %{
 476   return _ANY_REG32_mask;
 477 %}
 478 
 479 // Singleton class for R0 int register
 480 reg_class int_r0_reg(R0);
 481 
 482 // Singleton class for R2 int register
 483 reg_class int_r2_reg(R2);
 484 
 485 // Singleton class for R3 int register
 486 reg_class int_r3_reg(R3);
 487 
 488 // Singleton class for R4 int register
 489 reg_class int_r4_reg(R4);
 490 
 491 // Singleton class for R31 int register
 492 reg_class int_r31_reg(R31);
 493 
 494 // Class for all 64 bit general purpose registers
 495 reg_class all_reg(
 496     R0, R0_H,
 497     R1, R1_H,
 498     R2, R2_H,
 499     R3, R3_H,
 500     R4, R4_H,
 501     R5, R5_H,
 502     R6, R6_H,
 503     R7, R7_H,
 504     R10, R10_H,
 505     R11, R11_H,
 506     R12, R12_H,
 507     R13, R13_H,
 508     R14, R14_H,
 509     R15, R15_H,
 510     R16, R16_H,
 511     R17, R17_H,
 512     R18, R18_H,
 513     R19, R19_H,
 514     R20, R20_H,
 515     R21, R21_H,
 516     R22, R22_H,
 517     R23, R23_H,
 518     R24, R24_H,
 519     R25, R25_H,
 520     R26, R26_H,
 521     R27, R27_H,
 522     R28, R28_H,
 523     R29, R29_H,
 524     R30, R30_H,
 525     R31, R31_H
 526 );
 527 
 528 // Class for all long integer registers (including SP)
 529 reg_class any_reg %{
 530   return _ANY_REG_mask;
 531 %}
 532 
 533 // Class for non-allocatable 32 bit registers
 534 reg_class non_allocatable_reg32(
 535     R28,                        // thread
 536     R30,                        // lr
 537     R31                         // sp
 538 );
 539 
 540 // Class for non-allocatable 64 bit registers
 541 reg_class non_allocatable_reg(
 542     R28, R28_H,                 // thread
 543     R30, R30_H,                 // lr
 544     R31, R31_H                  // sp
 545 );
 546 
 547 // Class for all non-special integer registers
 548 reg_class no_special_reg32 %{
 549   return _NO_SPECIAL_REG32_mask;
 550 %}
 551 
 552 // Class for all non-special long integer registers
 553 reg_class no_special_reg %{
 554   return _NO_SPECIAL_REG_mask;
 555 %}
 556 
 557 // Class for 64 bit register r0
 558 reg_class r0_reg(
 559     R0, R0_H
 560 );
 561 
 562 // Class for 64 bit register r1
 563 reg_class r1_reg(
 564     R1, R1_H
 565 );
 566 
 567 // Class for 64 bit register r2
 568 reg_class r2_reg(
 569     R2, R2_H
 570 );
 571 
 572 // Class for 64 bit register r3
 573 reg_class r3_reg(
 574     R3, R3_H
 575 );
 576 
 577 // Class for 64 bit register r4
 578 reg_class r4_reg(
 579     R4, R4_H
 580 );
 581 
 582 // Class for 64 bit register r5
 583 reg_class r5_reg(
 584     R5, R5_H
 585 );
 586 
 587 // Class for 64 bit register r10
 588 reg_class r10_reg(
 589     R10, R10_H
 590 );
 591 
 592 // Class for 64 bit register r11
 593 reg_class r11_reg(
 594     R11, R11_H
 595 );
 596 
 597 // Class for method register
 598 reg_class method_reg(
 599     R12, R12_H
 600 );
 601 
 602 // Class for heapbase register
 603 reg_class heapbase_reg(
 604     R27, R27_H
 605 );
 606 
 607 // Class for thread register
 608 reg_class thread_reg(
 609     R28, R28_H
 610 );
 611 
 612 // Class for frame pointer register
 613 reg_class fp_reg(
 614     R29, R29_H
 615 );
 616 
 617 // Class for link register
 618 reg_class lr_reg(
 619     R30, R30_H
 620 );
 621 
 622 // Class for long sp register
 623 reg_class sp_reg(
 624   R31, R31_H
 625 );
 626 
 627 // Class for all pointer registers
 628 reg_class ptr_reg %{
 629   return _PTR_REG_mask;
 630 %}
 631 
 632 // Class for all non_special pointer registers
 633 reg_class no_special_ptr_reg %{
 634   return _NO_SPECIAL_PTR_REG_mask;
 635 %}
 636 
 637 // Class for all float registers
 638 reg_class float_reg(
 639     V0,
 640     V1,
 641     V2,
 642     V3,
 643     V4,
 644     V5,
 645     V6,
 646     V7,
 647     V8,
 648     V9,
 649     V10,
 650     V11,
 651     V12,
 652     V13,
 653     V14,
 654     V15,
 655     V16,
 656     V17,
 657     V18,
 658     V19,
 659     V20,
 660     V21,
 661     V22,
 662     V23,
 663     V24,
 664     V25,
 665     V26,
 666     V27,
 667     V28,
 668     V29,
 669     V30,
 670     V31
 671 );
 672 
 673 // Double precision float registers have virtual `high halves' that
 674 // are needed by the allocator.
 675 // Class for all double registers
 676 reg_class double_reg(
 677     V0, V0_H,
 678     V1, V1_H,
 679     V2, V2_H,
 680     V3, V3_H,
 681     V4, V4_H,
 682     V5, V5_H,
 683     V6, V6_H,
 684     V7, V7_H,
 685     V8, V8_H,
 686     V9, V9_H,
 687     V10, V10_H,
 688     V11, V11_H,
 689     V12, V12_H,
 690     V13, V13_H,
 691     V14, V14_H,
 692     V15, V15_H,
 693     V16, V16_H,
 694     V17, V17_H,
 695     V18, V18_H,
 696     V19, V19_H,
 697     V20, V20_H,
 698     V21, V21_H,
 699     V22, V22_H,
 700     V23, V23_H,
 701     V24, V24_H,
 702     V25, V25_H,
 703     V26, V26_H,
 704     V27, V27_H,
 705     V28, V28_H,
 706     V29, V29_H,
 707     V30, V30_H,
 708     V31, V31_H
 709 );
 710 
 711 // Class for all 64bit vector registers
 712 reg_class vectord_reg(
 713     V0, V0_H,
 714     V1, V1_H,
 715     V2, V2_H,
 716     V3, V3_H,
 717     V4, V4_H,
 718     V5, V5_H,
 719     V6, V6_H,
 720     V7, V7_H,
 721     V8, V8_H,
 722     V9, V9_H,
 723     V10, V10_H,
 724     V11, V11_H,
 725     V12, V12_H,
 726     V13, V13_H,
 727     V14, V14_H,
 728     V15, V15_H,
 729     V16, V16_H,
 730     V17, V17_H,
 731     V18, V18_H,
 732     V19, V19_H,
 733     V20, V20_H,
 734     V21, V21_H,
 735     V22, V22_H,
 736     V23, V23_H,
 737     V24, V24_H,
 738     V25, V25_H,
 739     V26, V26_H,
 740     V27, V27_H,
 741     V28, V28_H,
 742     V29, V29_H,
 743     V30, V30_H,
 744     V31, V31_H
 745 );
 746 
 747 // Class for all 128bit vector registers
 748 reg_class vectorx_reg(
 749     V0, V0_H, V0_J, V0_K,
 750     V1, V1_H, V1_J, V1_K,
 751     V2, V2_H, V2_J, V2_K,
 752     V3, V3_H, V3_J, V3_K,
 753     V4, V4_H, V4_J, V4_K,
 754     V5, V5_H, V5_J, V5_K,
 755     V6, V6_H, V6_J, V6_K,
 756     V7, V7_H, V7_J, V7_K,
 757     V8, V8_H, V8_J, V8_K,
 758     V9, V9_H, V9_J, V9_K,
 759     V10, V10_H, V10_J, V10_K,
 760     V11, V11_H, V11_J, V11_K,
 761     V12, V12_H, V12_J, V12_K,
 762     V13, V13_H, V13_J, V13_K,
 763     V14, V14_H, V14_J, V14_K,
 764     V15, V15_H, V15_J, V15_K,
 765     V16, V16_H, V16_J, V16_K,
 766     V17, V17_H, V17_J, V17_K,
 767     V18, V18_H, V18_J, V18_K,
 768     V19, V19_H, V19_J, V19_K,
 769     V20, V20_H, V20_J, V20_K,
 770     V21, V21_H, V21_J, V21_K,
 771     V22, V22_H, V22_J, V22_K,
 772     V23, V23_H, V23_J, V23_K,
 773     V24, V24_H, V24_J, V24_K,
 774     V25, V25_H, V25_J, V25_K,
 775     V26, V26_H, V26_J, V26_K,
 776     V27, V27_H, V27_J, V27_K,
 777     V28, V28_H, V28_J, V28_K,
 778     V29, V29_H, V29_J, V29_K,
 779     V30, V30_H, V30_J, V30_K,
 780     V31, V31_H, V31_J, V31_K
 781 );
 782 
 783 // Class for 128 bit register v0
 784 reg_class v0_reg(
 785     V0, V0_H
 786 );
 787 
 788 // Class for 128 bit register v1
 789 reg_class v1_reg(
 790     V1, V1_H
 791 );
 792 
 793 // Class for 128 bit register v2
 794 reg_class v2_reg(
 795     V2, V2_H
 796 );
 797 
 798 // Class for 128 bit register v3
 799 reg_class v3_reg(
 800     V3, V3_H
 801 );
 802 
 803 // Class for 128 bit register v4
 804 reg_class v4_reg(
 805     V4, V4_H
 806 );
 807 
 808 // Class for 128 bit register v5
 809 reg_class v5_reg(
 810     V5, V5_H
 811 );
 812 
 813 // Class for 128 bit register v6
 814 reg_class v6_reg(
 815     V6, V6_H
 816 );
 817 
 818 // Class for 128 bit register v7
 819 reg_class v7_reg(
 820     V7, V7_H
 821 );
 822 
 823 // Class for 128 bit register v8
 824 reg_class v8_reg(
 825     V8, V8_H
 826 );
 827 
 828 // Class for 128 bit register v9
 829 reg_class v9_reg(
 830     V9, V9_H
 831 );
 832 
 833 // Class for 128 bit register v10
 834 reg_class v10_reg(
 835     V10, V10_H
 836 );
 837 
 838 // Class for 128 bit register v11
 839 reg_class v11_reg(
 840     V11, V11_H
 841 );
 842 
 843 // Class for 128 bit register v12
 844 reg_class v12_reg(
 845     V12, V12_H
 846 );
 847 
 848 // Class for 128 bit register v13
 849 reg_class v13_reg(
 850     V13, V13_H
 851 );
 852 
 853 // Class for 128 bit register v14
 854 reg_class v14_reg(
 855     V14, V14_H
 856 );
 857 
 858 // Class for 128 bit register v15
 859 reg_class v15_reg(
 860     V15, V15_H
 861 );
 862 
 863 // Class for 128 bit register v16
 864 reg_class v16_reg(
 865     V16, V16_H
 866 );
 867 
 868 // Class for 128 bit register v17
 869 reg_class v17_reg(
 870     V17, V17_H
 871 );
 872 
 873 // Class for 128 bit register v18
 874 reg_class v18_reg(
 875     V18, V18_H
 876 );
 877 
 878 // Class for 128 bit register v19
 879 reg_class v19_reg(
 880     V19, V19_H
 881 );
 882 
 883 // Class for 128 bit register v20
 884 reg_class v20_reg(
 885     V20, V20_H
 886 );
 887 
 888 // Class for 128 bit register v21
 889 reg_class v21_reg(
 890     V21, V21_H
 891 );
 892 
 893 // Class for 128 bit register v22
 894 reg_class v22_reg(
 895     V22, V22_H
 896 );
 897 
 898 // Class for 128 bit register v23
 899 reg_class v23_reg(
 900     V23, V23_H
 901 );
 902 
 903 // Class for 128 bit register v24
 904 reg_class v24_reg(
 905     V24, V24_H
 906 );
 907 
 908 // Class for 128 bit register v25
 909 reg_class v25_reg(
 910     V25, V25_H
 911 );
 912 
 913 // Class for 128 bit register v26
 914 reg_class v26_reg(
 915     V26, V26_H
 916 );
 917 
 918 // Class for 128 bit register v27
 919 reg_class v27_reg(
 920     V27, V27_H
 921 );
 922 
 923 // Class for 128 bit register v28
 924 reg_class v28_reg(
 925     V28, V28_H
 926 );
 927 
 928 // Class for 128 bit register v29
 929 reg_class v29_reg(
 930     V29, V29_H
 931 );
 932 
 933 // Class for 128 bit register v30
 934 reg_class v30_reg(
 935     V30, V30_H
 936 );
 937 
 938 // Class for 128 bit register v31
 939 reg_class v31_reg(
 940     V31, V31_H
 941 );
 942 
 943 // Singleton class for condition codes
 944 reg_class int_flags(RFLAGS);
 945 
 946 %}
 947 
 948 //----------DEFINITION BLOCK---------------------------------------------------
 949 // Define name --> value mappings to inform the ADLC of an integer valued name
 950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 951 // Format:
 952 //        int_def  <name>         ( <int_value>, <expression>);
 953 // Generated Code in ad_<arch>.hpp
 954 //        #define  <name>   (<expression>)
 955 //        // value == <int_value>
 956 // Generated code in ad_<arch>.cpp adlc_verification()
 957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 958 //
 959 
 960 // we follow the ppc-aix port in using a simple cost model which ranks
 961 // register operations as cheap, memory ops as more expensive and
 962 // branches as most expensive. the first two have a low as well as a
 963 // normal cost. huge cost appears to be a way of saying don't do
 964 // something
 965 
 966 definitions %{
 967   // The default cost (of a register move instruction).
 968   int_def INSN_COST            (    100,     100);
 969   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 970   int_def CALL_COST            (    200,     2 * INSN_COST);
 971   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 972 %}
 973 
 974 
 975 //----------SOURCE BLOCK-------------------------------------------------------
 976 // This is a block of C++ code which provides values, functions, and
 977 // definitions necessary in the rest of the architecture description
 978 
 979 source_hpp %{
 980 
 981 #include "asm/macroAssembler.hpp"
 982 #include "gc/shared/cardTable.hpp"
 983 #include "gc/shared/cardTableBarrierSet.hpp"
 984 #include "gc/shared/collectedHeap.hpp"
 985 #include "opto/addnode.hpp"
 986 #include "opto/convertnode.hpp"
 987 
 988 extern RegMask _ANY_REG32_mask;
 989 extern RegMask _ANY_REG_mask;
 990 extern RegMask _PTR_REG_mask;
 991 extern RegMask _NO_SPECIAL_REG32_mask;
 992 extern RegMask _NO_SPECIAL_REG_mask;
 993 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 994 
 995 class CallStubImpl {
 996 
 997   //--------------------------------------------------------------
 998   //---<  Used for optimization in Compile::shorten_branches  >---
 999   //--------------------------------------------------------------
1000 
1001  public:
1002   // Size of call trampoline stub.
1003   static uint size_call_trampoline() {
1004     return 0; // no call trampolines on this platform
1005   }
1006 
1007   // number of relocations needed by a call trampoline stub
1008   static uint reloc_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 };
1012 
1013 class HandlerImpl {
1014 
1015  public:
1016 
1017   static int emit_exception_handler(CodeBuffer &cbuf);
1018   static int emit_deopt_handler(CodeBuffer& cbuf);
1019 
1020   static uint size_exception_handler() {
1021     return MacroAssembler::far_branch_size();
1022   }
1023 
1024   static uint size_deopt_handler() {
1025     // count one adr and one far branch instruction
1026     return 4 * NativeInstruction::instruction_size;
1027   }
1028 };
1029 
1030 class Node::PD {
1031 public:
1032   enum NodeFlags {
1033     _last_flag = Node::_last_flag
1034   };
1035 };
1036 
1037  bool is_CAS(int opcode, bool maybe_volatile);
1038 
1039   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1040 
1041   bool unnecessary_acquire(const Node *barrier);
1042   bool needs_acquiring_load(const Node *load);
1043 
1044   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1045 
1046   bool unnecessary_release(const Node *barrier);
1047   bool unnecessary_volatile(const Node *barrier);
1048   bool needs_releasing_store(const Node *store);
1049 
1050   // predicate controlling translation of CompareAndSwapX
1051   bool needs_acquiring_load_exclusive(const Node *load);
1052 
1053   // predicate controlling addressing modes
1054   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1055 %}
1056 
1057 source %{
1058 
1059   // Derived RegMask with conditionally allocatable registers
1060 
1061   void PhaseOutput::pd_perform_mach_node_analysis() {
1062   }
1063 
1064   int MachNode::pd_alignment_required() const {
1065     return 1;
1066   }
1067 
1068   int MachNode::compute_padding(int current_offset) const {
1069     return 0;
1070   }
1071 
1072   RegMask _ANY_REG32_mask;
1073   RegMask _ANY_REG_mask;
1074   RegMask _PTR_REG_mask;
1075   RegMask _NO_SPECIAL_REG32_mask;
1076   RegMask _NO_SPECIAL_REG_mask;
1077   RegMask _NO_SPECIAL_PTR_REG_mask;
1078 
1079   void reg_mask_init() {
1080     // We derive below RegMask(s) from the ones which are auto-generated from
1081     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
1082     // registers conditionally reserved.
1083 
1084     _ANY_REG32_mask = _ALL_REG32_mask;
1085     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
1086 
1087     _ANY_REG_mask = _ALL_REG_mask;
1088 
1089     _PTR_REG_mask = _ALL_REG_mask;
1090 
1091     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
1092     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
1093 
1094     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
1095     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1096 
1097     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
1098     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1099 
1100     // r27 is not allocatable when compressed oops is on and heapbase is not
1101     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
1102     if (UseCompressedOops && CompressedOops::ptrs_base() != NULL) {
1103       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
1104       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1105       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1106     }
1107 
1108     // r29 is not allocatable when PreserveFramePointer is on
1109     if (PreserveFramePointer) {
1110       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
1111       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
1112       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
1113     }
1114   }
1115 
1116   // Optimizaton of volatile gets and puts
1117   // -------------------------------------
1118   //
1119   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1120   // use to implement volatile reads and writes. For a volatile read
1121   // we simply need
1122   //
1123   //   ldar<x>
1124   //
1125   // and for a volatile write we need
1126   //
1127   //   stlr<x>
1128   //
1129   // Alternatively, we can implement them by pairing a normal
1130   // load/store with a memory barrier. For a volatile read we need
1131   //
1132   //   ldr<x>
1133   //   dmb ishld
1134   //
1135   // for a volatile write
1136   //
1137   //   dmb ish
1138   //   str<x>
1139   //   dmb ish
1140   //
1141   // We can also use ldaxr and stlxr to implement compare and swap CAS
1142   // sequences. These are normally translated to an instruction
1143   // sequence like the following
1144   //
1145   //   dmb      ish
1146   // retry:
1147   //   ldxr<x>   rval raddr
1148   //   cmp       rval rold
1149   //   b.ne done
1150   //   stlxr<x>  rval, rnew, rold
1151   //   cbnz      rval retry
1152   // done:
1153   //   cset      r0, eq
1154   //   dmb ishld
1155   //
1156   // Note that the exclusive store is already using an stlxr
1157   // instruction. That is required to ensure visibility to other
1158   // threads of the exclusive write (assuming it succeeds) before that
1159   // of any subsequent writes.
1160   //
1161   // The following instruction sequence is an improvement on the above
1162   //
1163   // retry:
1164   //   ldaxr<x>  rval raddr
1165   //   cmp       rval rold
1166   //   b.ne done
1167   //   stlxr<x>  rval, rnew, rold
1168   //   cbnz      rval retry
1169   // done:
1170   //   cset      r0, eq
1171   //
1172   // We don't need the leading dmb ish since the stlxr guarantees
1173   // visibility of prior writes in the case that the swap is
1174   // successful. Crucially we don't have to worry about the case where
1175   // the swap is not successful since no valid program should be
1176   // relying on visibility of prior changes by the attempting thread
1177   // in the case where the CAS fails.
1178   //
1179   // Similarly, we don't need the trailing dmb ishld if we substitute
1180   // an ldaxr instruction since that will provide all the guarantees we
1181   // require regarding observation of changes made by other threads
1182   // before any change to the CAS address observed by the load.
1183   //
1184   // In order to generate the desired instruction sequence we need to
1185   // be able to identify specific 'signature' ideal graph node
1186   // sequences which i) occur as a translation of a volatile reads or
1187   // writes or CAS operations and ii) do not occur through any other
1188   // translation or graph transformation. We can then provide
1189   // alternative aldc matching rules which translate these node
1190   // sequences to the desired machine code sequences. Selection of the
1191   // alternative rules can be implemented by predicates which identify
1192   // the relevant node sequences.
1193   //
1194   // The ideal graph generator translates a volatile read to the node
1195   // sequence
1196   //
1197   //   LoadX[mo_acquire]
1198   //   MemBarAcquire
1199   //
1200   // As a special case when using the compressed oops optimization we
1201   // may also see this variant
1202   //
1203   //   LoadN[mo_acquire]
1204   //   DecodeN
1205   //   MemBarAcquire
1206   //
1207   // A volatile write is translated to the node sequence
1208   //
1209   //   MemBarRelease
1210   //   StoreX[mo_release] {CardMark}-optional
1211   //   MemBarVolatile
1212   //
1213   // n.b. the above node patterns are generated with a strict
1214   // 'signature' configuration of input and output dependencies (see
1215   // the predicates below for exact details). The card mark may be as
1216   // simple as a few extra nodes or, in a few GC configurations, may
1217   // include more complex control flow between the leading and
1218   // trailing memory barriers. However, whatever the card mark
1219   // configuration these signatures are unique to translated volatile
1220   // reads/stores -- they will not appear as a result of any other
1221   // bytecode translation or inlining nor as a consequence of
1222   // optimizing transforms.
1223   //
1224   // We also want to catch inlined unsafe volatile gets and puts and
1225   // be able to implement them using either ldar<x>/stlr<x> or some
1226   // combination of ldr<x>/stlr<x> and dmb instructions.
1227   //
1228   // Inlined unsafe volatiles puts manifest as a minor variant of the
1229   // normal volatile put node sequence containing an extra cpuorder
1230   // membar
1231   //
1232   //   MemBarRelease
1233   //   MemBarCPUOrder
1234   //   StoreX[mo_release] {CardMark}-optional
1235   //   MemBarCPUOrder
1236   //   MemBarVolatile
1237   //
1238   // n.b. as an aside, a cpuorder membar is not itself subject to
1239   // matching and translation by adlc rules.  However, the rule
1240   // predicates need to detect its presence in order to correctly
1241   // select the desired adlc rules.
1242   //
1243   // Inlined unsafe volatile gets manifest as a slightly different
1244   // node sequence to a normal volatile get because of the
1245   // introduction of some CPUOrder memory barriers to bracket the
1246   // Load. However, but the same basic skeleton of a LoadX feeding a
1247   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1248   // present
1249   //
1250   //   MemBarCPUOrder
1251   //        ||       \\
1252   //   MemBarCPUOrder LoadX[mo_acquire]
1253   //        ||            |
1254   //        ||       {DecodeN} optional
1255   //        ||       /
1256   //     MemBarAcquire
1257   //
1258   // In this case the acquire membar does not directly depend on the
1259   // load. However, we can be sure that the load is generated from an
1260   // inlined unsafe volatile get if we see it dependent on this unique
1261   // sequence of membar nodes. Similarly, given an acquire membar we
1262   // can know that it was added because of an inlined unsafe volatile
1263   // get if it is fed and feeds a cpuorder membar and if its feed
1264   // membar also feeds an acquiring load.
1265   //
1266   // Finally an inlined (Unsafe) CAS operation is translated to the
1267   // following ideal graph
1268   //
1269   //   MemBarRelease
1270   //   MemBarCPUOrder
1271   //   CompareAndSwapX {CardMark}-optional
1272   //   MemBarCPUOrder
1273   //   MemBarAcquire
1274   //
1275   // So, where we can identify these volatile read and write
1276   // signatures we can choose to plant either of the above two code
1277   // sequences. For a volatile read we can simply plant a normal
1278   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1279   // also choose to inhibit translation of the MemBarAcquire and
1280   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1281   //
1282   // When we recognise a volatile store signature we can choose to
1283   // plant at a dmb ish as a translation for the MemBarRelease, a
1284   // normal str<x> and then a dmb ish for the MemBarVolatile.
1285   // Alternatively, we can inhibit translation of the MemBarRelease
1286   // and MemBarVolatile and instead plant a simple stlr<x>
1287   // instruction.
1288   //
1289   // when we recognise a CAS signature we can choose to plant a dmb
1290   // ish as a translation for the MemBarRelease, the conventional
1291   // macro-instruction sequence for the CompareAndSwap node (which
1292   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1293   // Alternatively, we can elide generation of the dmb instructions
1294   // and plant the alternative CompareAndSwap macro-instruction
1295   // sequence (which uses ldaxr<x>).
1296   //
1297   // Of course, the above only applies when we see these signature
1298   // configurations. We still want to plant dmb instructions in any
1299   // other cases where we may see a MemBarAcquire, MemBarRelease or
1300   // MemBarVolatile. For example, at the end of a constructor which
1301   // writes final/volatile fields we will see a MemBarRelease
1302   // instruction and this needs a 'dmb ish' lest we risk the
1303   // constructed object being visible without making the
1304   // final/volatile field writes visible.
1305   //
1306   // n.b. the translation rules below which rely on detection of the
1307   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1308   // If we see anything other than the signature configurations we
1309   // always just translate the loads and stores to ldr<x> and str<x>
1310   // and translate acquire, release and volatile membars to the
1311   // relevant dmb instructions.
1312   //
1313 
1314   // is_CAS(int opcode, bool maybe_volatile)
1315   //
1316   // return true if opcode is one of the possible CompareAndSwapX
1317   // values otherwise false.
1318 
1319   bool is_CAS(int opcode, bool maybe_volatile)
1320   {
1321     switch(opcode) {
1322       // We handle these
1323     case Op_CompareAndSwapI:
1324     case Op_CompareAndSwapL:
1325     case Op_CompareAndSwapP:
1326     case Op_CompareAndSwapN:
1327     case Op_ShenandoahCompareAndSwapP:
1328     case Op_ShenandoahCompareAndSwapN:
1329     case Op_CompareAndSwapB:
1330     case Op_CompareAndSwapS:
1331     case Op_GetAndSetI:
1332     case Op_GetAndSetL:
1333     case Op_GetAndSetP:
1334     case Op_GetAndSetN:
1335     case Op_GetAndAddI:
1336     case Op_GetAndAddL:
1337       return true;
1338     case Op_CompareAndExchangeI:
1339     case Op_CompareAndExchangeN:
1340     case Op_CompareAndExchangeB:
1341     case Op_CompareAndExchangeS:
1342     case Op_CompareAndExchangeL:
1343     case Op_CompareAndExchangeP:
1344     case Op_WeakCompareAndSwapB:
1345     case Op_WeakCompareAndSwapS:
1346     case Op_WeakCompareAndSwapI:
1347     case Op_WeakCompareAndSwapL:
1348     case Op_WeakCompareAndSwapP:
1349     case Op_WeakCompareAndSwapN:
1350     case Op_ShenandoahWeakCompareAndSwapP:
1351     case Op_ShenandoahWeakCompareAndSwapN:
1352     case Op_ShenandoahCompareAndExchangeP:
1353     case Op_ShenandoahCompareAndExchangeN:
1354       return maybe_volatile;
1355     default:
1356       return false;
1357     }
1358   }
1359 
1360   // helper to determine the maximum number of Phi nodes we may need to
1361   // traverse when searching from a card mark membar for the merge mem
1362   // feeding a trailing membar or vice versa
1363 
1364 // predicates controlling emit of ldr<x>/ldar<x>
1365 
1366 bool unnecessary_acquire(const Node *barrier)
1367 {
1368   assert(barrier->is_MemBar(), "expecting a membar");
1369 
1370   MemBarNode* mb = barrier->as_MemBar();
1371 
1372   if (mb->trailing_load()) {
1373     return true;
1374   }
1375 
1376   if (mb->trailing_load_store()) {
1377     Node* load_store = mb->in(MemBarNode::Precedent);
1378     assert(load_store->is_LoadStore(), "unexpected graph shape");
1379     return is_CAS(load_store->Opcode(), true);
1380   }
1381 
1382   return false;
1383 }
1384 
1385 bool needs_acquiring_load(const Node *n)
1386 {
1387   assert(n->is_Load(), "expecting a load");
1388   LoadNode *ld = n->as_Load();
1389   return ld->is_acquire();
1390 }
1391 
1392 bool unnecessary_release(const Node *n)
1393 {
1394   assert((n->is_MemBar() &&
1395           n->Opcode() == Op_MemBarRelease),
1396          "expecting a release membar");
1397 
1398   MemBarNode *barrier = n->as_MemBar();
1399   if (!barrier->leading()) {
1400     return false;
1401   } else {
1402     Node* trailing = barrier->trailing_membar();
1403     MemBarNode* trailing_mb = trailing->as_MemBar();
1404     assert(trailing_mb->trailing(), "Not a trailing membar?");
1405     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1406 
1407     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1408     if (mem->is_Store()) {
1409       assert(mem->as_Store()->is_release(), "");
1410       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1411       return true;
1412     } else {
1413       assert(mem->is_LoadStore(), "");
1414       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1415       return is_CAS(mem->Opcode(), true);
1416     }
1417   }
1418   return false;
1419 }
1420 
1421 bool unnecessary_volatile(const Node *n)
1422 {
1423   // assert n->is_MemBar();
1424   MemBarNode *mbvol = n->as_MemBar();
1425 
1426   bool release = mbvol->trailing_store();
1427   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1428 #ifdef ASSERT
1429   if (release) {
1430     Node* leading = mbvol->leading_membar();
1431     assert(leading->Opcode() == Op_MemBarRelease, "");
1432     assert(leading->as_MemBar()->leading_store(), "");
1433     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1434   }
1435 #endif
1436 
1437   return release;
1438 }
1439 
1440 // predicates controlling emit of str<x>/stlr<x>
1441 
1442 bool needs_releasing_store(const Node *n)
1443 {
1444   // assert n->is_Store();
1445   StoreNode *st = n->as_Store();
1446   return st->trailing_membar() != NULL;
1447 }
1448 
1449 // predicate controlling translation of CAS
1450 //
1451 // returns true if CAS needs to use an acquiring load otherwise false
1452 
1453 bool needs_acquiring_load_exclusive(const Node *n)
1454 {
1455   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1456   LoadStoreNode* ldst = n->as_LoadStore();
1457   if (is_CAS(n->Opcode(), false)) {
1458     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1459   } else {
1460     return ldst->trailing_membar() != NULL;
1461   }
1462 
1463   // so we can just return true here
1464   return true;
1465 }
1466 
1467 #define __ _masm.
1468 
1469 // advance declarations for helper functions to convert register
1470 // indices to register objects
1471 
1472 // the ad file has to provide implementations of certain methods
1473 // expected by the generic code
1474 //
1475 // REQUIRED FUNCTIONALITY
1476 
1477 //=============================================================================
1478 
1479 // !!!!! Special hack to get all types of calls to specify the byte offset
1480 //       from the start of the call to the point where the return address
1481 //       will point.
1482 
1483 int MachCallStaticJavaNode::ret_addr_offset()
1484 {
1485   // call should be a simple bl
1486   int off = 4;
1487   return off;
1488 }
1489 
1490 int MachCallDynamicJavaNode::ret_addr_offset()
1491 {
1492   return 16; // movz, movk, movk, bl
1493 }
1494 
1495 int MachCallRuntimeNode::ret_addr_offset() {
1496   // for generated stubs the call will be
1497   //   far_call(addr)
1498   // for real runtime callouts it will be six instructions
1499   // see aarch64_enc_java_to_runtime
1500   //   adr(rscratch2, retaddr)
1501   //   lea(rscratch1, RuntimeAddress(addr)
1502   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1503   //   blr(rscratch1)
1504   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1505   if (cb) {
1506     return MacroAssembler::far_branch_size();
1507   } else {
1508     return 6 * NativeInstruction::instruction_size;
1509   }
1510 }
1511 
1512 // Indicate if the safepoint node needs the polling page as an input
1513 
1514 // the shared code plants the oop data at the start of the generated
1515 // code for the safepoint node and that needs ot be at the load
1516 // instruction itself. so we cannot plant a mov of the safepoint poll
1517 // address followed by a load. setting this to true means the mov is
1518 // scheduled as a prior instruction. that's better for scheduling
1519 // anyway.
1520 
1521 bool SafePointNode::needs_polling_address_input()
1522 {
1523   return true;
1524 }
1525 
1526 //=============================================================================
1527 
1528 #ifndef PRODUCT
1529 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1530   st->print("BREAKPOINT");
1531 }
1532 #endif
1533 
1534 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1535   C2_MacroAssembler _masm(&cbuf);
1536   __ brk(0);
1537 }
1538 
1539 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1540   return MachNode::size(ra_);
1541 }
1542 
1543 //=============================================================================
1544 
1545 #ifndef PRODUCT
1546   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1547     st->print("nop \t# %d bytes pad for loops and calls", _count);
1548   }
1549 #endif
1550 
1551   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1552     C2_MacroAssembler _masm(&cbuf);
1553     for (int i = 0; i < _count; i++) {
1554       __ nop();
1555     }
1556   }
1557 
1558   uint MachNopNode::size(PhaseRegAlloc*) const {
1559     return _count * NativeInstruction::instruction_size;
1560   }
1561 
1562 //=============================================================================
1563 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1564 
1565 int ConstantTable::calculate_table_base_offset() const {
1566   return 0;  // absolute addressing, no offset
1567 }
1568 
1569 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1570 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1571   ShouldNotReachHere();
1572 }
1573 
1574 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1575   // Empty encoding
1576 }
1577 
1578 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1579   return 0;
1580 }
1581 
1582 #ifndef PRODUCT
1583 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1584   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1585 }
1586 #endif
1587 
1588 #ifndef PRODUCT
1589 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1590   Compile* C = ra_->C;
1591 
1592   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1593 
1594   if (C->output()->need_stack_bang(framesize))
1595     st->print("# stack bang size=%d\n\t", framesize);
1596 
1597   if (framesize < ((1 << 9) + 2 * wordSize)) {
1598     st->print("sub  sp, sp, #%d\n\t", framesize);
1599     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1600     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1601   } else {
1602     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1603     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1604     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1605     st->print("sub  sp, sp, rscratch1");
1606   }
1607   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
1608     st->print("\n\t");
1609     st->print("ldr  rscratch1, [guard]\n\t");
1610     st->print("dmb ishld\n\t");
1611     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
1612     st->print("cmp  rscratch1, rscratch2\n\t");
1613     st->print("b.eq skip");
1614     st->print("\n\t");
1615     st->print("blr #nmethod_entry_barrier_stub\n\t");
1616     st->print("b skip\n\t");
1617     st->print("guard: int\n\t");
1618     st->print("\n\t");
1619     st->print("skip:\n\t");
1620   }
1621 }
1622 #endif
1623 
1624 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1625   Compile* C = ra_->C;
1626   C2_MacroAssembler _masm(&cbuf);
1627 
1628   // n.b. frame size includes space for return pc and rfp
1629   const int64_t framesize = C->output()->frame_size_in_bytes();
1630   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1631 
1632   // insert a nop at the start of the prolog so we can patch in a
1633   // branch if we need to invalidate the method later
1634   __ nop();
1635 
1636   if (C->clinit_barrier_on_entry()) {
1637     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1638 
1639     Label L_skip_barrier;
1640 
1641     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1642     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1643     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1644     __ bind(L_skip_barrier);
1645   }
1646 
1647   int bangsize = C->output()->bang_size_in_bytes();
1648   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1649     __ generate_stack_overflow_check(bangsize);
1650 
1651   __ build_frame(framesize);
1652 
1653   if (C->stub_function() == NULL) {
1654     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1655     bs->nmethod_entry_barrier(&_masm);
1656   }
1657 
1658   if (VerifyStackAtCalls) {
1659     Unimplemented();
1660   }
1661 
1662   C->output()->set_frame_complete(cbuf.insts_size());
1663 
1664   if (C->has_mach_constant_base_node()) {
1665     // NOTE: We set the table base offset here because users might be
1666     // emitted before MachConstantBaseNode.
1667     ConstantTable& constant_table = C->output()->constant_table();
1668     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1669   }
1670 }
1671 
1672 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1673 {
1674   return MachNode::size(ra_); // too many variables; just compute it
1675                               // the hard way
1676 }
1677 
1678 int MachPrologNode::reloc() const
1679 {
1680   return 0;
1681 }
1682 
1683 //=============================================================================
1684 
1685 #ifndef PRODUCT
1686 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1687   Compile* C = ra_->C;
1688   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1689 
1690   st->print("# pop frame %d\n\t",framesize);
1691 
1692   if (framesize == 0) {
1693     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1694   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1695     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1696     st->print("add  sp, sp, #%d\n\t", framesize);
1697   } else {
1698     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1699     st->print("add  sp, sp, rscratch1\n\t");
1700     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1701   }
1702 
1703   if (do_polling() && C->is_method_compilation()) {
1704     st->print("# touch polling page\n\t");
1705     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
1706     st->print("ldr zr, [rscratch1]");
1707   }
1708 }
1709 #endif
1710 
1711 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712   Compile* C = ra_->C;
1713   C2_MacroAssembler _masm(&cbuf);
1714   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1715 
1716   __ remove_frame(framesize);
1717 
1718   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1719     __ reserved_stack_check();
1720   }
1721 
1722   if (do_polling() && C->is_method_compilation()) {
1723     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
1724   }
1725 }
1726 
1727 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1728   // Variable size. Determine dynamically.
1729   return MachNode::size(ra_);
1730 }
1731 
1732 int MachEpilogNode::reloc() const {
1733   // Return number of relocatable values contained in this instruction.
1734   return 1; // 1 for polling page.
1735 }
1736 
1737 const Pipeline * MachEpilogNode::pipeline() const {
1738   return MachNode::pipeline_class();
1739 }
1740 
1741 //=============================================================================
1742 
1743 // Figure out which register class each belongs in: rc_int, rc_float or
1744 // rc_stack.
1745 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1746 
1747 static enum RC rc_class(OptoReg::Name reg) {
1748 
1749   if (reg == OptoReg::Bad) {
1750     return rc_bad;
1751   }
1752 
1753   // we have 30 int registers * 2 halves
1754   // (rscratch1 and rscratch2 are omitted)
1755   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1756 
1757   if (reg < slots_of_int_registers) {
1758     return rc_int;
1759   }
1760 
1761   // we have 32 float register * 4 halves
1762   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1763     return rc_float;
1764   }
1765 
1766   // Between float regs & stack is the flags regs.
1767   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1768 
1769   return rc_stack;
1770 }
1771 
1772 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1773   Compile* C = ra_->C;
1774 
1775   // Get registers to move.
1776   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1777   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1778   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1779   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1780 
1781   enum RC src_hi_rc = rc_class(src_hi);
1782   enum RC src_lo_rc = rc_class(src_lo);
1783   enum RC dst_hi_rc = rc_class(dst_hi);
1784   enum RC dst_lo_rc = rc_class(dst_lo);
1785 
1786   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1787 
1788   if (src_hi != OptoReg::Bad) {
1789     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1790            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1791            "expected aligned-adjacent pairs");
1792   }
1793 
1794   if (src_lo == dst_lo && src_hi == dst_hi) {
1795     return 0;            // Self copy, no move.
1796   }
1797 
1798   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1799               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1800   int src_offset = ra_->reg2offset(src_lo);
1801   int dst_offset = ra_->reg2offset(dst_lo);
1802 
1803   if (bottom_type()->isa_vect() != NULL) {
1804     uint ireg = ideal_reg();
1805     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1806     if (cbuf) {
1807       C2_MacroAssembler _masm(cbuf);
1808       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1809       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1810         // stack->stack
1811         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1812         if (ireg == Op_VecD) {
1813           __ unspill(rscratch1, true, src_offset);
1814           __ spill(rscratch1, true, dst_offset);
1815         } else {
1816           __ spill_copy128(src_offset, dst_offset);
1817         }
1818       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1819         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1820                ireg == Op_VecD ? __ T8B : __ T16B,
1821                as_FloatRegister(Matcher::_regEncode[src_lo]));
1822       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1823         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1824                        ireg == Op_VecD ? __ D : __ Q,
1825                        ra_->reg2offset(dst_lo));
1826       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1827         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1828                        ireg == Op_VecD ? __ D : __ Q,
1829                        ra_->reg2offset(src_lo));
1830       } else {
1831         ShouldNotReachHere();
1832       }
1833     }
1834   } else if (cbuf) {
1835     C2_MacroAssembler _masm(cbuf);
1836     switch (src_lo_rc) {
1837     case rc_int:
1838       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1839         if (is64) {
1840             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1841                    as_Register(Matcher::_regEncode[src_lo]));
1842         } else {
1843             C2_MacroAssembler _masm(cbuf);
1844             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1845                     as_Register(Matcher::_regEncode[src_lo]));
1846         }
1847       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1848         if (is64) {
1849             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1850                      as_Register(Matcher::_regEncode[src_lo]));
1851         } else {
1852             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1853                      as_Register(Matcher::_regEncode[src_lo]));
1854         }
1855       } else {                    // gpr --> stack spill
1856         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1857         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1858       }
1859       break;
1860     case rc_float:
1861       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1862         if (is64) {
1863             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1864                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1865         } else {
1866             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1867                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1868         }
1869       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1870           if (cbuf) {
1871             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1872                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1873         } else {
1874             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1875                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1876         }
1877       } else {                    // fpr --> stack spill
1878         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1879         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1880                  is64 ? __ D : __ S, dst_offset);
1881       }
1882       break;
1883     case rc_stack:
1884       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1885         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1886       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1887         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1888                    is64 ? __ D : __ S, src_offset);
1889       } else {                    // stack --> stack copy
1890         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1891         __ unspill(rscratch1, is64, src_offset);
1892         __ spill(rscratch1, is64, dst_offset);
1893       }
1894       break;
1895     default:
1896       assert(false, "bad rc_class for spill");
1897       ShouldNotReachHere();
1898     }
1899   }
1900 
1901   if (st) {
1902     st->print("spill ");
1903     if (src_lo_rc == rc_stack) {
1904       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1905     } else {
1906       st->print("%s -> ", Matcher::regName[src_lo]);
1907     }
1908     if (dst_lo_rc == rc_stack) {
1909       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1910     } else {
1911       st->print("%s", Matcher::regName[dst_lo]);
1912     }
1913     if (bottom_type()->isa_vect() != NULL) {
1914       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1915     } else {
1916       st->print("\t# spill size = %d", is64 ? 64:32);
1917     }
1918   }
1919 
1920   return 0;
1921 
1922 }
1923 
1924 #ifndef PRODUCT
1925 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1926   if (!ra_)
1927     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1928   else
1929     implementation(NULL, ra_, false, st);
1930 }
1931 #endif
1932 
1933 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1934   implementation(&cbuf, ra_, false, NULL);
1935 }
1936 
1937 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1938   return MachNode::size(ra_);
1939 }
1940 
1941 //=============================================================================
1942 
1943 #ifndef PRODUCT
1944 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1945   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1946   int reg = ra_->get_reg_first(this);
1947   st->print("add %s, rsp, #%d]\t# box lock",
1948             Matcher::regName[reg], offset);
1949 }
1950 #endif
1951 
1952 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1953   C2_MacroAssembler _masm(&cbuf);
1954 
1955   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1956   int reg    = ra_->get_encode(this);
1957 
1958   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1959     __ add(as_Register(reg), sp, offset);
1960   } else {
1961     ShouldNotReachHere();
1962   }
1963 }
1964 
1965 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1966   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1967   return 4;
1968 }
1969 
1970 //=============================================================================
1971 
1972 #ifndef PRODUCT
1973 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1974 {
1975   st->print_cr("# MachUEPNode");
1976   if (UseCompressedClassPointers) {
1977     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1978     if (CompressedKlassPointers::shift() != 0) {
1979       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1980     }
1981   } else {
1982    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1983   }
1984   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1985   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1986 }
1987 #endif
1988 
1989 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1990 {
1991   // This is the unverified entry point.
1992   C2_MacroAssembler _masm(&cbuf);
1993 
1994   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1995   Label skip;
1996   // TODO
1997   // can we avoid this skip and still use a reloc?
1998   __ br(Assembler::EQ, skip);
1999   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2000   __ bind(skip);
2001 }
2002 
2003 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2004 {
2005   return MachNode::size(ra_);
2006 }
2007 
2008 // REQUIRED EMIT CODE
2009 
2010 //=============================================================================
2011 
2012 // Emit exception handler code.
2013 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2014 {
2015   // mov rscratch1 #exception_blob_entry_point
2016   // br rscratch1
2017   // Note that the code buffer's insts_mark is always relative to insts.
2018   // That's why we must use the macroassembler to generate a handler.
2019   C2_MacroAssembler _masm(&cbuf);
2020   address base = __ start_a_stub(size_exception_handler());
2021   if (base == NULL) {
2022     ciEnv::current()->record_failure("CodeCache is full");
2023     return 0;  // CodeBuffer::expand failed
2024   }
2025   int offset = __ offset();
2026   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2027   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2028   __ end_a_stub();
2029   return offset;
2030 }
2031 
2032 // Emit deopt handler code.
2033 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2034 {
2035   // Note that the code buffer's insts_mark is always relative to insts.
2036   // That's why we must use the macroassembler to generate a handler.
2037   C2_MacroAssembler _masm(&cbuf);
2038   address base = __ start_a_stub(size_deopt_handler());
2039   if (base == NULL) {
2040     ciEnv::current()->record_failure("CodeCache is full");
2041     return 0;  // CodeBuffer::expand failed
2042   }
2043   int offset = __ offset();
2044 
2045   __ adr(lr, __ pc());
2046   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2047 
2048   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2049   __ end_a_stub();
2050   return offset;
2051 }
2052 
2053 // REQUIRED MATCHER CODE
2054 
2055 //=============================================================================
2056 
2057 const bool Matcher::match_rule_supported(int opcode) {
2058   if (!has_match_rule(opcode))
2059     return false;
2060 
2061   bool ret_value = true;
2062   switch (opcode) {
2063     case Op_CacheWB:
2064     case Op_CacheWBPreSync:
2065     case Op_CacheWBPostSync:
2066       if (!VM_Version::supports_data_cache_line_flush()) {
2067         ret_value = false;
2068       }
2069       break;
2070   }
2071 
2072   return ret_value; // Per default match rules are supported.
2073 }
2074 
2075 // Identify extra cases that we might want to provide match rules for vector nodes and
2076 // other intrinsics guarded with vector length (vlen) and element type (bt).
2077 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
2078   if (!match_rule_supported(opcode)) {
2079     return false;
2080   }
2081 
2082   // Special cases which require vector length
2083   switch (opcode) {
2084     case Op_MulAddVS2VI: {
2085       if (vlen != 4) {
2086         return false;
2087       }
2088       break;
2089     }
2090   }
2091 
2092   return true; // Per default match rules are supported.
2093 }
2094 
2095 const bool Matcher::has_predicated_vectors(void) {
2096   return false;
2097 }
2098 
2099 const int Matcher::float_pressure(int default_pressure_threshold) {
2100   return default_pressure_threshold;
2101 }
2102 
2103 int Matcher::regnum_to_fpu_offset(int regnum)
2104 {
2105   Unimplemented();
2106   return 0;
2107 }
2108 
2109 // Is this branch offset short enough that a short branch can be used?
2110 //
2111 // NOTE: If the platform does not provide any short branch variants, then
2112 //       this method should return false for offset 0.
2113 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2114   // The passed offset is relative to address of the branch.
2115 
2116   return (-32768 <= offset && offset < 32768);
2117 }
2118 
2119 const bool Matcher::isSimpleConstant64(jlong value) {
2120   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2121   // Probably always true, even if a temp register is required.
2122   return true;
2123 }
2124 
2125 // true just means we have fast l2f conversion
2126 const bool Matcher::convL2FSupported(void) {
2127   return true;
2128 }
2129 
2130 // Vector width in bytes.
2131 const int Matcher::vector_width_in_bytes(BasicType bt) {
2132   int size = MIN2(16,(int)MaxVectorSize);
2133   // Minimum 2 values in vector
2134   if (size < 2*type2aelembytes(bt)) size = 0;
2135   // But never < 4
2136   if (size < 4) size = 0;
2137   return size;
2138 }
2139 
2140 // Limits on vector size (number of elements) loaded into vector.
2141 const int Matcher::max_vector_size(const BasicType bt) {
2142   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2143 }
2144 const int Matcher::min_vector_size(const BasicType bt) {
2145 //  For the moment limit the vector size to 8 bytes
2146     int size = 8 / type2aelembytes(bt);
2147     if (size < 2) size = 2;
2148     return size;
2149 }
2150 
2151 // Vector ideal reg.
2152 const uint Matcher::vector_ideal_reg(int len) {
2153   switch(len) {
2154     case  8: return Op_VecD;
2155     case 16: return Op_VecX;
2156   }
2157   ShouldNotReachHere();
2158   return 0;
2159 }
2160 
2161 // AES support not yet implemented
2162 const bool Matcher::pass_original_key_for_aes() {
2163   return false;
2164 }
2165 
2166 // aarch64 supports misaligned vectors store/load.
2167 const bool Matcher::misaligned_vectors_ok() {
2168   return true;
2169 }
2170 
2171 // false => size gets scaled to BytesPerLong, ok.
2172 const bool Matcher::init_array_count_is_in_bytes = false;
2173 
2174 // Use conditional move (CMOVL)
2175 const int Matcher::long_cmove_cost() {
2176   // long cmoves are no more expensive than int cmoves
2177   return 0;
2178 }
2179 
2180 const int Matcher::float_cmove_cost() {
2181   // float cmoves are no more expensive than int cmoves
2182   return 0;
2183 }
2184 
2185 // Does the CPU require late expand (see block.cpp for description of late expand)?
2186 const bool Matcher::require_postalloc_expand = false;
2187 
2188 // Do we need to mask the count passed to shift instructions or does
2189 // the cpu only look at the lower 5/6 bits anyway?
2190 const bool Matcher::need_masked_shift_count = false;
2191 
2192 // No support for generic vector operands.
2193 const bool Matcher::supports_generic_vector_operands  = false;
2194 
2195 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2196   ShouldNotReachHere(); // generic vector operands not supported
2197   return NULL;
2198 }
2199 
2200 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
2201   ShouldNotReachHere();  // generic vector operands not supported
2202   return false;
2203 }
2204 
2205 bool Matcher::is_generic_vector(MachOper* opnd)  {
2206   ShouldNotReachHere();  // generic vector operands not supported
2207   return false;
2208 }
2209 
2210 // This affects two different things:
2211 //  - how Decode nodes are matched
2212 //  - how ImplicitNullCheck opportunities are recognized
2213 // If true, the matcher will try to remove all Decodes and match them
2214 // (as operands) into nodes. NullChecks are not prepared to deal with
2215 // Decodes by final_graph_reshaping().
2216 // If false, final_graph_reshaping() forces the decode behind the Cmp
2217 // for a NullCheck. The matcher matches the Decode node into a register.
2218 // Implicit_null_check optimization moves the Decode along with the
2219 // memory operation back up before the NullCheck.
2220 bool Matcher::narrow_oop_use_complex_address() {
2221   return CompressedOops::shift() == 0;
2222 }
2223 
2224 bool Matcher::narrow_klass_use_complex_address() {
2225 // TODO
2226 // decide whether we need to set this to true
2227   return false;
2228 }
2229 
2230 bool Matcher::const_oop_prefer_decode() {
2231   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2232   return CompressedOops::base() == NULL;
2233 }
2234 
2235 bool Matcher::const_klass_prefer_decode() {
2236   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2237   return CompressedKlassPointers::base() == NULL;
2238 }
2239 
2240 // Is it better to copy float constants, or load them directly from
2241 // memory?  Intel can load a float constant from a direct address,
2242 // requiring no extra registers.  Most RISCs will have to materialize
2243 // an address into a register first, so they would do better to copy
2244 // the constant from stack.
2245 const bool Matcher::rematerialize_float_constants = false;
2246 
2247 // If CPU can load and store mis-aligned doubles directly then no
2248 // fixup is needed.  Else we split the double into 2 integer pieces
2249 // and move it piece-by-piece.  Only happens when passing doubles into
2250 // C code as the Java calling convention forces doubles to be aligned.
2251 const bool Matcher::misaligned_doubles_ok = true;
2252 
2253 // No-op on amd64
2254 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2255   Unimplemented();
2256 }
2257 
2258 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
2259 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2260 
2261 // Are floats converted to double when stored to stack during
2262 // deoptimization?
2263 bool Matcher::float_in_double() { return false; }
2264 
2265 // Do ints take an entire long register or just half?
2266 // The relevant question is how the int is callee-saved:
2267 // the whole long is written but de-opt'ing will have to extract
2268 // the relevant 32 bits.
2269 const bool Matcher::int_in_long = true;
2270 
2271 // Return whether or not this register is ever used as an argument.
2272 // This function is used on startup to build the trampoline stubs in
2273 // generateOptoStub.  Registers not mentioned will be killed by the VM
2274 // call in the trampoline, and arguments in those registers not be
2275 // available to the callee.
2276 bool Matcher::can_be_java_arg(int reg)
2277 {
2278   return
2279     reg ==  R0_num || reg == R0_H_num ||
2280     reg ==  R1_num || reg == R1_H_num ||
2281     reg ==  R2_num || reg == R2_H_num ||
2282     reg ==  R3_num || reg == R3_H_num ||
2283     reg ==  R4_num || reg == R4_H_num ||
2284     reg ==  R5_num || reg == R5_H_num ||
2285     reg ==  R6_num || reg == R6_H_num ||
2286     reg ==  R7_num || reg == R7_H_num ||
2287     reg ==  V0_num || reg == V0_H_num ||
2288     reg ==  V1_num || reg == V1_H_num ||
2289     reg ==  V2_num || reg == V2_H_num ||
2290     reg ==  V3_num || reg == V3_H_num ||
2291     reg ==  V4_num || reg == V4_H_num ||
2292     reg ==  V5_num || reg == V5_H_num ||
2293     reg ==  V6_num || reg == V6_H_num ||
2294     reg ==  V7_num || reg == V7_H_num;
2295 }
2296 
2297 bool Matcher::is_spillable_arg(int reg)
2298 {
2299   return can_be_java_arg(reg);
2300 }
2301 
2302 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2303   return false;
2304 }
2305 
2306 RegMask Matcher::divI_proj_mask() {
2307   ShouldNotReachHere();
2308   return RegMask();
2309 }
2310 
2311 // Register for MODI projection of divmodI.
2312 RegMask Matcher::modI_proj_mask() {
2313   ShouldNotReachHere();
2314   return RegMask();
2315 }
2316 
2317 // Register for DIVL projection of divmodL.
2318 RegMask Matcher::divL_proj_mask() {
2319   ShouldNotReachHere();
2320   return RegMask();
2321 }
2322 
2323 // Register for MODL projection of divmodL.
2324 RegMask Matcher::modL_proj_mask() {
2325   ShouldNotReachHere();
2326   return RegMask();
2327 }
2328 
2329 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2330   return FP_REG_mask();
2331 }
2332 
2333 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2334   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2335     Node* u = addp->fast_out(i);
2336     if (u->is_Mem()) {
2337       int opsize = u->as_Mem()->memory_size();
2338       assert(opsize > 0, "unexpected memory operand size");
2339       if (u->as_Mem()->memory_size() != (1<<shift)) {
2340         return false;
2341       }
2342     }
2343   }
2344   return true;
2345 }
2346 
2347 const bool Matcher::convi2l_type_required = false;
2348 
2349 // Should the matcher clone input 'm' of node 'n'?
2350 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2351   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
2352     mstack.push(m, Visit);           // m = ShiftCntV
2353     return true;
2354   }
2355   return false;
2356 }
2357 
2358 // Should the Matcher clone shifts on addressing modes, expecting them
2359 // to be subsumed into complex addressing expressions or compute them
2360 // into registers?
2361 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2362   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2363     return true;
2364   }
2365 
2366   Node *off = m->in(AddPNode::Offset);
2367   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2368       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2369       // Are there other uses besides address expressions?
2370       !is_visited(off)) {
2371     address_visited.set(off->_idx); // Flag as address_visited
2372     mstack.push(off->in(2), Visit);
2373     Node *conv = off->in(1);
2374     if (conv->Opcode() == Op_ConvI2L &&
2375         // Are there other uses besides address expressions?
2376         !is_visited(conv)) {
2377       address_visited.set(conv->_idx); // Flag as address_visited
2378       mstack.push(conv->in(1), Pre_Visit);
2379     } else {
2380       mstack.push(conv, Pre_Visit);
2381     }
2382     address_visited.test_set(m->_idx); // Flag as address_visited
2383     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2384     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2385     return true;
2386   } else if (off->Opcode() == Op_ConvI2L &&
2387              // Are there other uses besides address expressions?
2388              !is_visited(off)) {
2389     address_visited.test_set(m->_idx); // Flag as address_visited
2390     address_visited.set(off->_idx); // Flag as address_visited
2391     mstack.push(off->in(1), Pre_Visit);
2392     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2393     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2394     return true;
2395   }
2396   return false;
2397 }
2398 
2399 void Compile::reshape_address(AddPNode* addp) {
2400 }
2401 
2402 
2403 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2404   C2_MacroAssembler _masm(&cbuf);                                       \
2405   {                                                                     \
2406     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2407     guarantee(DISP == 0, "mode not permitted for volatile");            \
2408     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2409     __ INSN(REG, as_Register(BASE));                                    \
2410   }
2411 
2412 
2413 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2414   {
2415     Address::extend scale;
2416 
2417     // Hooboy, this is fugly.  We need a way to communicate to the
2418     // encoder that the index needs to be sign extended, so we have to
2419     // enumerate all the cases.
2420     switch (opcode) {
2421     case INDINDEXSCALEDI2L:
2422     case INDINDEXSCALEDI2LN:
2423     case INDINDEXI2L:
2424     case INDINDEXI2LN:
2425       scale = Address::sxtw(size);
2426       break;
2427     default:
2428       scale = Address::lsl(size);
2429     }
2430 
2431     if (index == -1) {
2432       return Address(base, disp);
2433     } else {
2434       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2435       return Address(base, as_Register(index), scale);
2436     }
2437   }
2438 
2439 
2440 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2441 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2442 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2443 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2444                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2445 
2446   // Used for all non-volatile memory accesses.  The use of
2447   // $mem->opcode() to discover whether this pattern uses sign-extended
2448   // offsets is something of a kludge.
2449   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
2450                         Register reg, int opcode,
2451                         Register base, int index, int scale, int disp,
2452                         int size_in_memory)
2453   {
2454     Address addr = mem2address(opcode, base, index, scale, disp);
2455     if (addr.getMode() == Address::base_plus_offset) {
2456       /* If we get an out-of-range offset it is a bug in the compiler,
2457          so we assert here. */
2458       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
2459              "c2 compiler bug");
2460       /* Fix up any out-of-range offsets. */
2461       assert_different_registers(rscratch1, base);
2462       assert_different_registers(rscratch1, reg);
2463       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2464     }
2465     (masm.*insn)(reg, addr);
2466   }
2467 
2468   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
2469                         FloatRegister reg, int opcode,
2470                         Register base, int index, int size, int disp,
2471                         int size_in_memory)
2472   {
2473     Address::extend scale;
2474 
2475     switch (opcode) {
2476     case INDINDEXSCALEDI2L:
2477     case INDINDEXSCALEDI2LN:
2478       scale = Address::sxtw(size);
2479       break;
2480     default:
2481       scale = Address::lsl(size);
2482     }
2483 
2484     if (index == -1) {
2485       /* If we get an out-of-range offset it is a bug in the compiler,
2486          so we assert here. */
2487       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
2488       /* Fix up any out-of-range offsets. */
2489       assert_different_registers(rscratch1, base);
2490       Address addr = Address(base, disp);
2491       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2492       (masm.*insn)(reg, addr);
2493     } else {
2494       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2495       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2496     }
2497   }
2498 
2499   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
2500                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2501                         int opcode, Register base, int index, int size, int disp)
2502   {
2503     if (index == -1) {
2504       (masm.*insn)(reg, T, Address(base, disp));
2505     } else {
2506       assert(disp == 0, "unsupported address mode");
2507       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2508     }
2509   }
2510 
2511 %}
2512 
2513 
2514 
2515 //----------ENCODING BLOCK-----------------------------------------------------
2516 // This block specifies the encoding classes used by the compiler to
2517 // output byte streams.  Encoding classes are parameterized macros
2518 // used by Machine Instruction Nodes in order to generate the bit
2519 // encoding of the instruction.  Operands specify their base encoding
2520 // interface with the interface keyword.  There are currently
2521 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2522 // COND_INTER.  REG_INTER causes an operand to generate a function
2523 // which returns its register number when queried.  CONST_INTER causes
2524 // an operand to generate a function which returns the value of the
2525 // constant when queried.  MEMORY_INTER causes an operand to generate
2526 // four functions which return the Base Register, the Index Register,
2527 // the Scale Value, and the Offset Value of the operand when queried.
2528 // COND_INTER causes an operand to generate six functions which return
2529 // the encoding code (ie - encoding bits for the instruction)
2530 // associated with each basic boolean condition for a conditional
2531 // instruction.
2532 //
2533 // Instructions specify two basic values for encoding.  Again, a
2534 // function is available to check if the constant displacement is an
2535 // oop. They use the ins_encode keyword to specify their encoding
2536 // classes (which must be a sequence of enc_class names, and their
2537 // parameters, specified in the encoding block), and they use the
2538 // opcode keyword to specify, in order, their primary, secondary, and
2539 // tertiary opcode.  Only the opcode sections which a particular
2540 // instruction needs for encoding need to be specified.
2541 encode %{
2542   // Build emit functions for each basic byte or larger field in the
2543   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2544   // from C++ code in the enc_class source block.  Emit functions will
2545   // live in the main source block for now.  In future, we can
2546   // generalize this by adding a syntax that specifies the sizes of
2547   // fields in an order, so that the adlc can build the emit functions
2548   // automagically
2549 
2550   // catch all for unimplemented encodings
2551   enc_class enc_unimplemented %{
2552     C2_MacroAssembler _masm(&cbuf);
2553     __ unimplemented("C2 catch all");
2554   %}
2555 
2556   // BEGIN Non-volatile memory access
2557 
2558   // This encoding class is generated automatically from ad_encode.m4.
2559   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2560   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
2561     Register dst_reg = as_Register($dst$$reg);
2562     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2563                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2564   %}
2565 
2566   // This encoding class is generated automatically from ad_encode.m4.
2567   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2568   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
2569     Register dst_reg = as_Register($dst$$reg);
2570     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2571                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2572   %}
2573 
2574   // This encoding class is generated automatically from ad_encode.m4.
2575   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2576   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
2577     Register dst_reg = as_Register($dst$$reg);
2578     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2579                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2580   %}
2581 
2582   // This encoding class is generated automatically from ad_encode.m4.
2583   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2584   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
2585     Register dst_reg = as_Register($dst$$reg);
2586     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2587                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2588   %}
2589 
2590   // This encoding class is generated automatically from ad_encode.m4.
2591   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2592   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
2593     Register dst_reg = as_Register($dst$$reg);
2594     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2595                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2596   %}
2597 
2598   // This encoding class is generated automatically from ad_encode.m4.
2599   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2600   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
2601     Register dst_reg = as_Register($dst$$reg);
2602     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2604   %}
2605 
2606   // This encoding class is generated automatically from ad_encode.m4.
2607   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2608   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
2609     Register dst_reg = as_Register($dst$$reg);
2610     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2611                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2612   %}
2613 
2614   // This encoding class is generated automatically from ad_encode.m4.
2615   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2616   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
2617     Register dst_reg = as_Register($dst$$reg);
2618     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2619                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2620   %}
2621 
2622   // This encoding class is generated automatically from ad_encode.m4.
2623   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2624   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
2625     Register dst_reg = as_Register($dst$$reg);
2626     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2627                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2628   %}
2629 
2630   // This encoding class is generated automatically from ad_encode.m4.
2631   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2632   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
2633     Register dst_reg = as_Register($dst$$reg);
2634     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2635                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2636   %}
2637 
2638   // This encoding class is generated automatically from ad_encode.m4.
2639   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2640   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
2641     Register dst_reg = as_Register($dst$$reg);
2642     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2643                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2644   %}
2645 
2646   // This encoding class is generated automatically from ad_encode.m4.
2647   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2648   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
2649     Register dst_reg = as_Register($dst$$reg);
2650     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2651                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2652   %}
2653 
2654   // This encoding class is generated automatically from ad_encode.m4.
2655   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2656   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
2657     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2658     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2659                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2660   %}
2661 
2662   // This encoding class is generated automatically from ad_encode.m4.
2663   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2664   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
2665     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2666     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2667                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2668   %}
2669 
2670   // This encoding class is generated automatically from ad_encode.m4.
2671   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2672   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
2673     Register src_reg = as_Register($src$$reg);
2674     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2675                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2676   %}
2677 
2678   // This encoding class is generated automatically from ad_encode.m4.
2679   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2680   enc_class aarch64_enc_strb0(memory1 mem) %{
2681     C2_MacroAssembler _masm(&cbuf);
2682     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2683                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2684   %}
2685 
2686   // This encoding class is generated automatically from ad_encode.m4.
2687   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2688   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
2689     Register src_reg = as_Register($src$$reg);
2690     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2692   %}
2693 
2694   // This encoding class is generated automatically from ad_encode.m4.
2695   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2696   enc_class aarch64_enc_strh0(memory2 mem) %{
2697     C2_MacroAssembler _masm(&cbuf);
2698     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2699                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2700   %}
2701 
2702   // This encoding class is generated automatically from ad_encode.m4.
2703   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2704   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
2705     Register src_reg = as_Register($src$$reg);
2706     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2707                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2708   %}
2709 
2710   // This encoding class is generated automatically from ad_encode.m4.
2711   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2712   enc_class aarch64_enc_strw0(memory4 mem) %{
2713     C2_MacroAssembler _masm(&cbuf);
2714     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2715                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2716   %}
2717 
2718   // This encoding class is generated automatically from ad_encode.m4.
2719   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2720   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
2721     Register src_reg = as_Register($src$$reg);
2722     // we sometimes get asked to store the stack pointer into the
2723     // current thread -- we cannot do that directly on AArch64
2724     if (src_reg == r31_sp) {
2725       C2_MacroAssembler _masm(&cbuf);
2726       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2727       __ mov(rscratch2, sp);
2728       src_reg = rscratch2;
2729     }
2730     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2731                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2732   %}
2733 
2734   // This encoding class is generated automatically from ad_encode.m4.
2735   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2736   enc_class aarch64_enc_str0(memory8 mem) %{
2737     C2_MacroAssembler _masm(&cbuf);
2738     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2739                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2740   %}
2741 
2742   // This encoding class is generated automatically from ad_encode.m4.
2743   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2744   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
2745     FloatRegister src_reg = as_FloatRegister($src$$reg);
2746     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2747                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2748   %}
2749 
2750   // This encoding class is generated automatically from ad_encode.m4.
2751   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2752   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
2753     FloatRegister src_reg = as_FloatRegister($src$$reg);
2754     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2755                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2756   %}
2757 
2758   // This encoding class is generated automatically from ad_encode.m4.
2759   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2760   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
2761     C2_MacroAssembler _masm(&cbuf);
2762     address con = (address)$src$$constant;
2763     // need to do this the hard way until we can manage relocs
2764     // for 32 bit constants
2765     __ movoop(rscratch2, (jobject)con);
2766     if (con) __ encode_heap_oop_not_null(rscratch2);
2767     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2768                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2769   %}
2770 
2771   // This encoding class is generated automatically from ad_encode.m4.
2772   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2773   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
2774     C2_MacroAssembler _masm(&cbuf);
2775     address con = (address)$src$$constant;
2776     // need to do this the hard way until we can manage relocs
2777     // for 32 bit constants
2778     __ movoop(rscratch2, (jobject)con);
2779     __ encode_klass_not_null(rscratch2);
2780     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2781                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2782   %}
2783 
2784   // This encoding class is generated automatically from ad_encode.m4.
2785   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2786   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
2787       C2_MacroAssembler _masm(&cbuf);
2788       __ membar(Assembler::StoreStore);
2789       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2790                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2791   %}
2792 
2793   // END Non-volatile memory access
2794 
2795   // Vector loads and stores
2796   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2797     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2798     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2799        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2800   %}
2801 
2802   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2803     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2804     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2805        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2806   %}
2807 
2808   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2809     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2810     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2811        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2812   %}
2813 
2814   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2815     FloatRegister src_reg = as_FloatRegister($src$$reg);
2816     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2817        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2818   %}
2819 
2820   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2821     FloatRegister src_reg = as_FloatRegister($src$$reg);
2822     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2823        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2824   %}
2825 
2826   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2827     FloatRegister src_reg = as_FloatRegister($src$$reg);
2828     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2829        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2830   %}
2831 
2832   // volatile loads and stores
2833 
2834   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2835     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2836                  rscratch1, stlrb);
2837   %}
2838 
2839   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2840     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2841                  rscratch1, stlrh);
2842   %}
2843 
2844   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2845     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2846                  rscratch1, stlrw);
2847   %}
2848 
2849 
2850   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2851     Register dst_reg = as_Register($dst$$reg);
2852     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2853              rscratch1, ldarb);
2854     __ sxtbw(dst_reg, dst_reg);
2855   %}
2856 
2857   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2858     Register dst_reg = as_Register($dst$$reg);
2859     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2860              rscratch1, ldarb);
2861     __ sxtb(dst_reg, dst_reg);
2862   %}
2863 
2864   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2865     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2866              rscratch1, ldarb);
2867   %}
2868 
2869   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2870     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2871              rscratch1, ldarb);
2872   %}
2873 
2874   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2875     Register dst_reg = as_Register($dst$$reg);
2876     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2877              rscratch1, ldarh);
2878     __ sxthw(dst_reg, dst_reg);
2879   %}
2880 
2881   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2882     Register dst_reg = as_Register($dst$$reg);
2883     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2884              rscratch1, ldarh);
2885     __ sxth(dst_reg, dst_reg);
2886   %}
2887 
2888   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2889     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2890              rscratch1, ldarh);
2891   %}
2892 
2893   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2894     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2895              rscratch1, ldarh);
2896   %}
2897 
2898   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2899     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2900              rscratch1, ldarw);
2901   %}
2902 
2903   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2904     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2905              rscratch1, ldarw);
2906   %}
2907 
2908   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2909     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2910              rscratch1, ldar);
2911   %}
2912 
2913   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2914     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2915              rscratch1, ldarw);
2916     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2917   %}
2918 
2919   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2920     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2921              rscratch1, ldar);
2922     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2923   %}
2924 
2925   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2926     Register src_reg = as_Register($src$$reg);
2927     // we sometimes get asked to store the stack pointer into the
2928     // current thread -- we cannot do that directly on AArch64
2929     if (src_reg == r31_sp) {
2930       C2_MacroAssembler _masm(&cbuf);
2931       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2932       __ mov(rscratch2, sp);
2933       src_reg = rscratch2;
2934     }
2935     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2936                  rscratch1, stlr);
2937   %}
2938 
2939   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2940     {
2941       C2_MacroAssembler _masm(&cbuf);
2942       FloatRegister src_reg = as_FloatRegister($src$$reg);
2943       __ fmovs(rscratch2, src_reg);
2944     }
2945     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2946                  rscratch1, stlrw);
2947   %}
2948 
2949   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2950     {
2951       C2_MacroAssembler _masm(&cbuf);
2952       FloatRegister src_reg = as_FloatRegister($src$$reg);
2953       __ fmovd(rscratch2, src_reg);
2954     }
2955     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2956                  rscratch1, stlr);
2957   %}
2958 
2959   // synchronized read/update encodings
2960 
2961   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
2962     C2_MacroAssembler _masm(&cbuf);
2963     Register dst_reg = as_Register($dst$$reg);
2964     Register base = as_Register($mem$$base);
2965     int index = $mem$$index;
2966     int scale = $mem$$scale;
2967     int disp = $mem$$disp;
2968     if (index == -1) {
2969        if (disp != 0) {
2970         __ lea(rscratch1, Address(base, disp));
2971         __ ldaxr(dst_reg, rscratch1);
2972       } else {
2973         // TODO
2974         // should we ever get anything other than this case?
2975         __ ldaxr(dst_reg, base);
2976       }
2977     } else {
2978       Register index_reg = as_Register(index);
2979       if (disp == 0) {
2980         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2981         __ ldaxr(dst_reg, rscratch1);
2982       } else {
2983         __ lea(rscratch1, Address(base, disp));
2984         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2985         __ ldaxr(dst_reg, rscratch1);
2986       }
2987     }
2988   %}
2989 
2990   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
2991     C2_MacroAssembler _masm(&cbuf);
2992     Register src_reg = as_Register($src$$reg);
2993     Register base = as_Register($mem$$base);
2994     int index = $mem$$index;
2995     int scale = $mem$$scale;
2996     int disp = $mem$$disp;
2997     if (index == -1) {
2998        if (disp != 0) {
2999         __ lea(rscratch2, Address(base, disp));
3000         __ stlxr(rscratch1, src_reg, rscratch2);
3001       } else {
3002         // TODO
3003         // should we ever get anything other than this case?
3004         __ stlxr(rscratch1, src_reg, base);
3005       }
3006     } else {
3007       Register index_reg = as_Register(index);
3008       if (disp == 0) {
3009         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3010         __ stlxr(rscratch1, src_reg, rscratch2);
3011       } else {
3012         __ lea(rscratch2, Address(base, disp));
3013         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3014         __ stlxr(rscratch1, src_reg, rscratch2);
3015       }
3016     }
3017     __ cmpw(rscratch1, zr);
3018   %}
3019 
3020   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3021     C2_MacroAssembler _masm(&cbuf);
3022     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3023     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3024                Assembler::xword, /*acquire*/ false, /*release*/ true,
3025                /*weak*/ false, noreg);
3026   %}
3027 
3028   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3029     C2_MacroAssembler _masm(&cbuf);
3030     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3031     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3032                Assembler::word, /*acquire*/ false, /*release*/ true,
3033                /*weak*/ false, noreg);
3034   %}
3035 
3036   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3037     C2_MacroAssembler _masm(&cbuf);
3038     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3039     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3040                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3041                /*weak*/ false, noreg);
3042   %}
3043 
3044   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3045     C2_MacroAssembler _masm(&cbuf);
3046     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3047     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3048                Assembler::byte, /*acquire*/ false, /*release*/ true,
3049                /*weak*/ false, noreg);
3050   %}
3051 
3052 
3053   // The only difference between aarch64_enc_cmpxchg and
3054   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3055   // CompareAndSwap sequence to serve as a barrier on acquiring a
3056   // lock.
3057   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3058     C2_MacroAssembler _masm(&cbuf);
3059     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3060     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3061                Assembler::xword, /*acquire*/ true, /*release*/ true,
3062                /*weak*/ false, noreg);
3063   %}
3064 
3065   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3066     C2_MacroAssembler _masm(&cbuf);
3067     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3068     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3069                Assembler::word, /*acquire*/ true, /*release*/ true,
3070                /*weak*/ false, noreg);
3071   %}
3072 
3073   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3074     C2_MacroAssembler _masm(&cbuf);
3075     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3076     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3077                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3078                /*weak*/ false, noreg);
3079   %}
3080 
3081   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3082     C2_MacroAssembler _masm(&cbuf);
3083     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3084     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3085                Assembler::byte, /*acquire*/ true, /*release*/ true,
3086                /*weak*/ false, noreg);
3087   %}
3088 
3089   // auxiliary used for CompareAndSwapX to set result register
3090   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3091     C2_MacroAssembler _masm(&cbuf);
3092     Register res_reg = as_Register($res$$reg);
3093     __ cset(res_reg, Assembler::EQ);
3094   %}
3095 
3096   // prefetch encodings
3097 
3098   enc_class aarch64_enc_prefetchw(memory mem) %{
3099     C2_MacroAssembler _masm(&cbuf);
3100     Register base = as_Register($mem$$base);
3101     int index = $mem$$index;
3102     int scale = $mem$$scale;
3103     int disp = $mem$$disp;
3104     if (index == -1) {
3105       __ prfm(Address(base, disp), PSTL1KEEP);
3106     } else {
3107       Register index_reg = as_Register(index);
3108       if (disp == 0) {
3109         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3110       } else {
3111         __ lea(rscratch1, Address(base, disp));
3112         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3113       }
3114     }
3115   %}
3116 
3117   /// mov envcodings
3118 
3119   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3120     C2_MacroAssembler _masm(&cbuf);
3121     uint32_t con = (uint32_t)$src$$constant;
3122     Register dst_reg = as_Register($dst$$reg);
3123     if (con == 0) {
3124       __ movw(dst_reg, zr);
3125     } else {
3126       __ movw(dst_reg, con);
3127     }
3128   %}
3129 
3130   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3131     C2_MacroAssembler _masm(&cbuf);
3132     Register dst_reg = as_Register($dst$$reg);
3133     uint64_t con = (uint64_t)$src$$constant;
3134     if (con == 0) {
3135       __ mov(dst_reg, zr);
3136     } else {
3137       __ mov(dst_reg, con);
3138     }
3139   %}
3140 
3141   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3142     C2_MacroAssembler _masm(&cbuf);
3143     Register dst_reg = as_Register($dst$$reg);
3144     address con = (address)$src$$constant;
3145     if (con == NULL || con == (address)1) {
3146       ShouldNotReachHere();
3147     } else {
3148       relocInfo::relocType rtype = $src->constant_reloc();
3149       if (rtype == relocInfo::oop_type) {
3150         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3151       } else if (rtype == relocInfo::metadata_type) {
3152         __ mov_metadata(dst_reg, (Metadata*)con);
3153       } else {
3154         assert(rtype == relocInfo::none, "unexpected reloc type");
3155         if (con < (address)(uintptr_t)os::vm_page_size()) {
3156           __ mov(dst_reg, con);
3157         } else {
3158           uint64_t offset;
3159           __ adrp(dst_reg, con, offset);
3160           __ add(dst_reg, dst_reg, offset);
3161         }
3162       }
3163     }
3164   %}
3165 
3166   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3167     C2_MacroAssembler _masm(&cbuf);
3168     Register dst_reg = as_Register($dst$$reg);
3169     __ mov(dst_reg, zr);
3170   %}
3171 
3172   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3173     C2_MacroAssembler _masm(&cbuf);
3174     Register dst_reg = as_Register($dst$$reg);
3175     __ mov(dst_reg, (uint64_t)1);
3176   %}
3177 
3178   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3179     C2_MacroAssembler _masm(&cbuf);
3180     __ load_byte_map_base($dst$$Register);
3181   %}
3182 
3183   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3184     C2_MacroAssembler _masm(&cbuf);
3185     Register dst_reg = as_Register($dst$$reg);
3186     address con = (address)$src$$constant;
3187     if (con == NULL) {
3188       ShouldNotReachHere();
3189     } else {
3190       relocInfo::relocType rtype = $src->constant_reloc();
3191       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3192       __ set_narrow_oop(dst_reg, (jobject)con);
3193     }
3194   %}
3195 
3196   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3197     C2_MacroAssembler _masm(&cbuf);
3198     Register dst_reg = as_Register($dst$$reg);
3199     __ mov(dst_reg, zr);
3200   %}
3201 
3202   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3203     C2_MacroAssembler _masm(&cbuf);
3204     Register dst_reg = as_Register($dst$$reg);
3205     address con = (address)$src$$constant;
3206     if (con == NULL) {
3207       ShouldNotReachHere();
3208     } else {
3209       relocInfo::relocType rtype = $src->constant_reloc();
3210       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3211       __ set_narrow_klass(dst_reg, (Klass *)con);
3212     }
3213   %}
3214 
3215   // arithmetic encodings
3216 
3217   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3218     C2_MacroAssembler _masm(&cbuf);
3219     Register dst_reg = as_Register($dst$$reg);
3220     Register src_reg = as_Register($src1$$reg);
3221     int32_t con = (int32_t)$src2$$constant;
3222     // add has primary == 0, subtract has primary == 1
3223     if ($primary) { con = -con; }
3224     if (con < 0) {
3225       __ subw(dst_reg, src_reg, -con);
3226     } else {
3227       __ addw(dst_reg, src_reg, con);
3228     }
3229   %}
3230 
3231   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3232     C2_MacroAssembler _masm(&cbuf);
3233     Register dst_reg = as_Register($dst$$reg);
3234     Register src_reg = as_Register($src1$$reg);
3235     int32_t con = (int32_t)$src2$$constant;
3236     // add has primary == 0, subtract has primary == 1
3237     if ($primary) { con = -con; }
3238     if (con < 0) {
3239       __ sub(dst_reg, src_reg, -con);
3240     } else {
3241       __ add(dst_reg, src_reg, con);
3242     }
3243   %}
3244 
3245   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3246     C2_MacroAssembler _masm(&cbuf);
3247    Register dst_reg = as_Register($dst$$reg);
3248    Register src1_reg = as_Register($src1$$reg);
3249    Register src2_reg = as_Register($src2$$reg);
3250     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3251   %}
3252 
3253   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3254     C2_MacroAssembler _masm(&cbuf);
3255    Register dst_reg = as_Register($dst$$reg);
3256    Register src1_reg = as_Register($src1$$reg);
3257    Register src2_reg = as_Register($src2$$reg);
3258     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3259   %}
3260 
3261   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3262     C2_MacroAssembler _masm(&cbuf);
3263    Register dst_reg = as_Register($dst$$reg);
3264    Register src1_reg = as_Register($src1$$reg);
3265    Register src2_reg = as_Register($src2$$reg);
3266     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3267   %}
3268 
3269   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3270     C2_MacroAssembler _masm(&cbuf);
3271    Register dst_reg = as_Register($dst$$reg);
3272    Register src1_reg = as_Register($src1$$reg);
3273    Register src2_reg = as_Register($src2$$reg);
3274     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3275   %}
3276 
3277   // compare instruction encodings
3278 
3279   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3280     C2_MacroAssembler _masm(&cbuf);
3281     Register reg1 = as_Register($src1$$reg);
3282     Register reg2 = as_Register($src2$$reg);
3283     __ cmpw(reg1, reg2);
3284   %}
3285 
3286   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3287     C2_MacroAssembler _masm(&cbuf);
3288     Register reg = as_Register($src1$$reg);
3289     int32_t val = $src2$$constant;
3290     if (val >= 0) {
3291       __ subsw(zr, reg, val);
3292     } else {
3293       __ addsw(zr, reg, -val);
3294     }
3295   %}
3296 
3297   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3298     C2_MacroAssembler _masm(&cbuf);
3299     Register reg1 = as_Register($src1$$reg);
3300     uint32_t val = (uint32_t)$src2$$constant;
3301     __ movw(rscratch1, val);
3302     __ cmpw(reg1, rscratch1);
3303   %}
3304 
3305   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3306     C2_MacroAssembler _masm(&cbuf);
3307     Register reg1 = as_Register($src1$$reg);
3308     Register reg2 = as_Register($src2$$reg);
3309     __ cmp(reg1, reg2);
3310   %}
3311 
3312   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3313     C2_MacroAssembler _masm(&cbuf);
3314     Register reg = as_Register($src1$$reg);
3315     int64_t val = $src2$$constant;
3316     if (val >= 0) {
3317       __ subs(zr, reg, val);
3318     } else if (val != -val) {
3319       __ adds(zr, reg, -val);
3320     } else {
3321     // aargh, Long.MIN_VALUE is a special case
3322       __ orr(rscratch1, zr, (uint64_t)val);
3323       __ subs(zr, reg, rscratch1);
3324     }
3325   %}
3326 
3327   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3328     C2_MacroAssembler _masm(&cbuf);
3329     Register reg1 = as_Register($src1$$reg);
3330     uint64_t val = (uint64_t)$src2$$constant;
3331     __ mov(rscratch1, val);
3332     __ cmp(reg1, rscratch1);
3333   %}
3334 
3335   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3336     C2_MacroAssembler _masm(&cbuf);
3337     Register reg1 = as_Register($src1$$reg);
3338     Register reg2 = as_Register($src2$$reg);
3339     __ cmp(reg1, reg2);
3340   %}
3341 
3342   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3343     C2_MacroAssembler _masm(&cbuf);
3344     Register reg1 = as_Register($src1$$reg);
3345     Register reg2 = as_Register($src2$$reg);
3346     __ cmpw(reg1, reg2);
3347   %}
3348 
3349   enc_class aarch64_enc_testp(iRegP src) %{
3350     C2_MacroAssembler _masm(&cbuf);
3351     Register reg = as_Register($src$$reg);
3352     __ cmp(reg, zr);
3353   %}
3354 
3355   enc_class aarch64_enc_testn(iRegN src) %{
3356     C2_MacroAssembler _masm(&cbuf);
3357     Register reg = as_Register($src$$reg);
3358     __ cmpw(reg, zr);
3359   %}
3360 
3361   enc_class aarch64_enc_b(label lbl) %{
3362     C2_MacroAssembler _masm(&cbuf);
3363     Label *L = $lbl$$label;
3364     __ b(*L);
3365   %}
3366 
3367   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3368     C2_MacroAssembler _masm(&cbuf);
3369     Label *L = $lbl$$label;
3370     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3371   %}
3372 
3373   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3374     C2_MacroAssembler _masm(&cbuf);
3375     Label *L = $lbl$$label;
3376     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3377   %}
3378 
3379   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3380   %{
3381      Register sub_reg = as_Register($sub$$reg);
3382      Register super_reg = as_Register($super$$reg);
3383      Register temp_reg = as_Register($temp$$reg);
3384      Register result_reg = as_Register($result$$reg);
3385 
3386      Label miss;
3387      C2_MacroAssembler _masm(&cbuf);
3388      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3389                                      NULL, &miss,
3390                                      /*set_cond_codes:*/ true);
3391      if ($primary) {
3392        __ mov(result_reg, zr);
3393      }
3394      __ bind(miss);
3395   %}
3396 
3397   enc_class aarch64_enc_java_static_call(method meth) %{
3398     C2_MacroAssembler _masm(&cbuf);
3399 
3400     address addr = (address)$meth$$method;
3401     address call;
3402     if (!_method) {
3403       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3404       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3405     } else {
3406       int method_index = resolved_method_index(cbuf);
3407       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3408                                                   : static_call_Relocation::spec(method_index);
3409       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3410 
3411       // Emit stub for static call
3412       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3413       if (stub == NULL) {
3414         ciEnv::current()->record_failure("CodeCache is full");
3415         return;
3416       }
3417     }
3418     if (call == NULL) {
3419       ciEnv::current()->record_failure("CodeCache is full");
3420       return;
3421     }
3422   %}
3423 
3424   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3425     C2_MacroAssembler _masm(&cbuf);
3426     int method_index = resolved_method_index(cbuf);
3427     address call = __ ic_call((address)$meth$$method, method_index);
3428     if (call == NULL) {
3429       ciEnv::current()->record_failure("CodeCache is full");
3430       return;
3431     }
3432   %}
3433 
3434   enc_class aarch64_enc_call_epilog() %{
3435     C2_MacroAssembler _masm(&cbuf);
3436     if (VerifyStackAtCalls) {
3437       // Check that stack depth is unchanged: find majik cookie on stack
3438       __ call_Unimplemented();
3439     }
3440   %}
3441 
3442   enc_class aarch64_enc_java_to_runtime(method meth) %{
3443     C2_MacroAssembler _masm(&cbuf);
3444 
3445     // some calls to generated routines (arraycopy code) are scheduled
3446     // by C2 as runtime calls. if so we can call them using a br (they
3447     // will be in a reachable segment) otherwise we have to use a blr
3448     // which loads the absolute address into a register.
3449     address entry = (address)$meth$$method;
3450     CodeBlob *cb = CodeCache::find_blob(entry);
3451     if (cb) {
3452       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3453       if (call == NULL) {
3454         ciEnv::current()->record_failure("CodeCache is full");
3455         return;
3456       }
3457     } else {
3458       Label retaddr;
3459       __ adr(rscratch2, retaddr);
3460       __ lea(rscratch1, RuntimeAddress(entry));
3461       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3462       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3463       __ blr(rscratch1);
3464       __ bind(retaddr);
3465       __ add(sp, sp, 2 * wordSize);
3466     }
3467   %}
3468 
3469   enc_class aarch64_enc_rethrow() %{
3470     C2_MacroAssembler _masm(&cbuf);
3471     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3472   %}
3473 
3474   enc_class aarch64_enc_ret() %{
3475     C2_MacroAssembler _masm(&cbuf);
3476     __ ret(lr);
3477   %}
3478 
3479   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3480     C2_MacroAssembler _masm(&cbuf);
3481     Register target_reg = as_Register($jump_target$$reg);
3482     __ br(target_reg);
3483   %}
3484 
3485   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3486     C2_MacroAssembler _masm(&cbuf);
3487     Register target_reg = as_Register($jump_target$$reg);
3488     // exception oop should be in r0
3489     // ret addr has been popped into lr
3490     // callee expects it in r3
3491     __ mov(r3, lr);
3492     __ br(target_reg);
3493   %}
3494 
3495   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3496     C2_MacroAssembler _masm(&cbuf);
3497     Register oop = as_Register($object$$reg);
3498     Register box = as_Register($box$$reg);
3499     Register disp_hdr = as_Register($tmp$$reg);
3500     Register tmp = as_Register($tmp2$$reg);
3501     Label cont;
3502     Label object_has_monitor;
3503     Label cas_failed;
3504 
3505     assert_different_registers(oop, box, tmp, disp_hdr);
3506 
3507     // Load markWord from object into displaced_header.
3508     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3509 
3510     if (UseBiasedLocking && !UseOptoBiasInlining) {
3511       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3512     }
3513 
3514     // Check for existing monitor
3515     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3516 
3517     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3518     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3519 
3520     // Initialize the box. (Must happen before we update the object mark!)
3521     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3522 
3523     // Compare object markWord with an unlocked value (tmp) and if
3524     // equal exchange the stack address of our box with object markWord.
3525     // On failure disp_hdr contains the possibly locked markWord.
3526     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3527                /*release*/ true, /*weak*/ false, disp_hdr);
3528     __ br(Assembler::EQ, cont);
3529 
3530     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3531 
3532     // If the compare-and-exchange succeeded, then we found an unlocked
3533     // object, will have now locked it will continue at label cont
3534 
3535     __ bind(cas_failed);
3536     // We did not see an unlocked object so try the fast recursive case.
3537 
3538     // Check if the owner is self by comparing the value in the
3539     // markWord of object (disp_hdr) with the stack pointer.
3540     __ mov(rscratch1, sp);
3541     __ sub(disp_hdr, disp_hdr, rscratch1);
3542     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3543     // If condition is true we are cont and hence we can store 0 as the
3544     // displaced header in the box, which indicates that it is a recursive lock.
3545     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3546     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3547 
3548     __ b(cont);
3549 
3550     // Handle existing monitor.
3551     __ bind(object_has_monitor);
3552 
3553     // The object's monitor m is unlocked iff m->owner == NULL,
3554     // otherwise m->owner may contain a thread or a stack address.
3555     //
3556     // Try to CAS m->owner from NULL to current thread.
3557     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3558     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3559                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3560 
3561     // Store a non-null value into the box to avoid looking like a re-entrant
3562     // lock. The fast-path monitor unlock code checks for
3563     // markWord::monitor_value so use markWord::unused_mark which has the
3564     // relevant bit set, and also matches ObjectSynchronizer::enter.
3565     __ mov(tmp, (address)markWord::unused_mark().value());
3566     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3567 
3568     __ bind(cont);
3569     // flag == EQ indicates success
3570     // flag == NE indicates failure
3571   %}
3572 
3573   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3574     C2_MacroAssembler _masm(&cbuf);
3575     Register oop = as_Register($object$$reg);
3576     Register box = as_Register($box$$reg);
3577     Register disp_hdr = as_Register($tmp$$reg);
3578     Register tmp = as_Register($tmp2$$reg);
3579     Label cont;
3580     Label object_has_monitor;
3581 
3582     assert_different_registers(oop, box, tmp, disp_hdr);
3583 
3584     if (UseBiasedLocking && !UseOptoBiasInlining) {
3585       __ biased_locking_exit(oop, tmp, cont);
3586     }
3587 
3588     // Find the lock address and load the displaced header from the stack.
3589     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3590 
3591     // If the displaced header is 0, we have a recursive unlock.
3592     __ cmp(disp_hdr, zr);
3593     __ br(Assembler::EQ, cont);
3594 
3595     // Handle existing monitor.
3596     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3597     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3598 
3599     // Check if it is still a light weight lock, this is is true if we
3600     // see the stack address of the basicLock in the markWord of the
3601     // object.
3602 
3603     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3604                /*release*/ true, /*weak*/ false, tmp);
3605     __ b(cont);
3606 
3607     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3608 
3609     // Handle existing monitor.
3610     __ bind(object_has_monitor);
3611     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3612     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3613     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3614     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3615     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3616     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3617     __ cmp(rscratch1, zr); // Sets flags for result
3618     __ br(Assembler::NE, cont);
3619 
3620     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3621     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3622     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3623     __ cmp(rscratch1, zr); // Sets flags for result
3624     __ cbnz(rscratch1, cont);
3625     // need a release store here
3626     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3627     __ stlr(zr, tmp); // set unowned
3628 
3629     __ bind(cont);
3630     // flag == EQ indicates success
3631     // flag == NE indicates failure
3632   %}
3633 
3634 %}
3635 
3636 //----------FRAME--------------------------------------------------------------
3637 // Definition of frame structure and management information.
3638 //
3639 //  S T A C K   L A Y O U T    Allocators stack-slot number
3640 //                             |   (to get allocators register number
3641 //  G  Owned by    |        |  v    add OptoReg::stack0())
3642 //  r   CALLER     |        |
3643 //  o     |        +--------+      pad to even-align allocators stack-slot
3644 //  w     V        |  pad0  |        numbers; owned by CALLER
3645 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3646 //  h     ^        |   in   |  5
3647 //        |        |  args  |  4   Holes in incoming args owned by SELF
3648 //  |     |        |        |  3
3649 //  |     |        +--------+
3650 //  V     |        | old out|      Empty on Intel, window on Sparc
3651 //        |    old |preserve|      Must be even aligned.
3652 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3653 //        |        |   in   |  3   area for Intel ret address
3654 //     Owned by    |preserve|      Empty on Sparc.
3655 //       SELF      +--------+
3656 //        |        |  pad2  |  2   pad to align old SP
3657 //        |        +--------+  1
3658 //        |        | locks  |  0
3659 //        |        +--------+----> OptoReg::stack0(), even aligned
3660 //        |        |  pad1  | 11   pad to align new SP
3661 //        |        +--------+
3662 //        |        |        | 10
3663 //        |        | spills |  9   spills
3664 //        V        |        |  8   (pad0 slot for callee)
3665 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3666 //        ^        |  out   |  7
3667 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3668 //     Owned by    +--------+
3669 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3670 //        |    new |preserve|      Must be even-aligned.
3671 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3672 //        |        |        |
3673 //
3674 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3675 //         known from SELF's arguments and the Java calling convention.
3676 //         Region 6-7 is determined per call site.
3677 // Note 2: If the calling convention leaves holes in the incoming argument
3678 //         area, those holes are owned by SELF.  Holes in the outgoing area
3679 //         are owned by the CALLEE.  Holes should not be nessecary in the
3680 //         incoming area, as the Java calling convention is completely under
3681 //         the control of the AD file.  Doubles can be sorted and packed to
3682 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3683 //         varargs C calling conventions.
3684 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3685 //         even aligned with pad0 as needed.
3686 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3687 //           (the latter is true on Intel but is it false on AArch64?)
3688 //         region 6-11 is even aligned; it may be padded out more so that
3689 //         the region from SP to FP meets the minimum stack alignment.
3690 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3691 //         alignment.  Region 11, pad1, may be dynamically extended so that
3692 //         SP meets the minimum alignment.
3693 
3694 frame %{
3695   // What direction does stack grow in (assumed to be same for C & Java)
3696   stack_direction(TOWARDS_LOW);
3697 
3698   // These three registers define part of the calling convention
3699   // between compiled code and the interpreter.
3700 
3701   // Inline Cache Register or methodOop for I2C.
3702   inline_cache_reg(R12);
3703 
3704   // Method Oop Register when calling interpreter.
3705   interpreter_method_oop_reg(R12);
3706 
3707   // Number of stack slots consumed by locking an object
3708   sync_stack_slots(2);
3709 
3710   // Compiled code's Frame Pointer
3711   frame_pointer(R31);
3712 
3713   // Interpreter stores its frame pointer in a register which is
3714   // stored to the stack by I2CAdaptors.
3715   // I2CAdaptors convert from interpreted java to compiled java.
3716   interpreter_frame_pointer(R29);
3717 
3718   // Stack alignment requirement
3719   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3720 
3721   // Number of stack slots between incoming argument block and the start of
3722   // a new frame.  The PROLOG must add this many slots to the stack.  The
3723   // EPILOG must remove this many slots. aarch64 needs two slots for
3724   // return address and fp.
3725   // TODO think this is correct but check
3726   in_preserve_stack_slots(4);
3727 
3728   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3729   // for calls to C.  Supports the var-args backing area for register parms.
3730   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3731 
3732   // The after-PROLOG location of the return address.  Location of
3733   // return address specifies a type (REG or STACK) and a number
3734   // representing the register number (i.e. - use a register name) or
3735   // stack slot.
3736   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3737   // Otherwise, it is above the locks and verification slot and alignment word
3738   // TODO this may well be correct but need to check why that - 2 is there
3739   // ppc port uses 0 but we definitely need to allow for fixed_slots
3740   // which folds in the space used for monitors
3741   return_addr(STACK - 2 +
3742               align_up((Compile::current()->in_preserve_stack_slots() +
3743                         Compile::current()->fixed_slots()),
3744                        stack_alignment_in_slots()));
3745 
3746   // Body of function which returns an integer array locating
3747   // arguments either in registers or in stack slots.  Passed an array
3748   // of ideal registers called "sig" and a "length" count.  Stack-slot
3749   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3750   // arguments for a CALLEE.  Incoming stack arguments are
3751   // automatically biased by the preserve_stack_slots field above.
3752 
3753   calling_convention
3754   %{
3755     // No difference between ingoing/outgoing just pass false
3756     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3757   %}
3758 
3759   c_calling_convention
3760   %{
3761     // This is obviously always outgoing
3762     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3763   %}
3764 
3765   // Location of compiled Java return values.  Same as C for now.
3766   return_value
3767   %{
3768     // TODO do we allow ideal_reg == Op_RegN???
3769     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3770            "only return normal values");
3771 
3772     static const int lo[Op_RegL + 1] = { // enum name
3773       0,                                 // Op_Node
3774       0,                                 // Op_Set
3775       R0_num,                            // Op_RegN
3776       R0_num,                            // Op_RegI
3777       R0_num,                            // Op_RegP
3778       V0_num,                            // Op_RegF
3779       V0_num,                            // Op_RegD
3780       R0_num                             // Op_RegL
3781     };
3782 
3783     static const int hi[Op_RegL + 1] = { // enum name
3784       0,                                 // Op_Node
3785       0,                                 // Op_Set
3786       OptoReg::Bad,                      // Op_RegN
3787       OptoReg::Bad,                      // Op_RegI
3788       R0_H_num,                          // Op_RegP
3789       OptoReg::Bad,                      // Op_RegF
3790       V0_H_num,                          // Op_RegD
3791       R0_H_num                           // Op_RegL
3792     };
3793 
3794     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3795   %}
3796 %}
3797 
3798 //----------ATTRIBUTES---------------------------------------------------------
3799 //----------Operand Attributes-------------------------------------------------
3800 op_attrib op_cost(1);        // Required cost attribute
3801 
3802 //----------Instruction Attributes---------------------------------------------
3803 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3804 ins_attrib ins_size(32);        // Required size attribute (in bits)
3805 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3806                                 // a non-matching short branch variant
3807                                 // of some long branch?
3808 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3809                                 // be a power of 2) specifies the
3810                                 // alignment that some part of the
3811                                 // instruction (not necessarily the
3812                                 // start) requires.  If > 1, a
3813                                 // compute_padding() function must be
3814                                 // provided for the instruction
3815 
3816 //----------OPERANDS-----------------------------------------------------------
3817 // Operand definitions must precede instruction definitions for correct parsing
3818 // in the ADLC because operands constitute user defined types which are used in
3819 // instruction definitions.
3820 
3821 //----------Simple Operands----------------------------------------------------
3822 
3823 // Integer operands 32 bit
3824 // 32 bit immediate
3825 operand immI()
3826 %{
3827   match(ConI);
3828 
3829   op_cost(0);
3830   format %{ %}
3831   interface(CONST_INTER);
3832 %}
3833 
3834 // 32 bit zero
3835 operand immI0()
3836 %{
3837   predicate(n->get_int() == 0);
3838   match(ConI);
3839 
3840   op_cost(0);
3841   format %{ %}
3842   interface(CONST_INTER);
3843 %}
3844 
3845 // 32 bit unit increment
3846 operand immI_1()
3847 %{
3848   predicate(n->get_int() == 1);
3849   match(ConI);
3850 
3851   op_cost(0);
3852   format %{ %}
3853   interface(CONST_INTER);
3854 %}
3855 
3856 // 32 bit unit decrement
3857 operand immI_M1()
3858 %{
3859   predicate(n->get_int() == -1);
3860   match(ConI);
3861 
3862   op_cost(0);
3863   format %{ %}
3864   interface(CONST_INTER);
3865 %}
3866 
3867 // Shift values for add/sub extension shift
3868 operand immIExt()
3869 %{
3870   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3871   match(ConI);
3872 
3873   op_cost(0);
3874   format %{ %}
3875   interface(CONST_INTER);
3876 %}
3877 
3878 operand immI_le_4()
3879 %{
3880   predicate(n->get_int() <= 4);
3881   match(ConI);
3882 
3883   op_cost(0);
3884   format %{ %}
3885   interface(CONST_INTER);
3886 %}
3887 
3888 operand immI_31()
3889 %{
3890   predicate(n->get_int() == 31);
3891   match(ConI);
3892 
3893   op_cost(0);
3894   format %{ %}
3895   interface(CONST_INTER);
3896 %}
3897 
3898 operand immI_8()
3899 %{
3900   predicate(n->get_int() == 8);
3901   match(ConI);
3902 
3903   op_cost(0);
3904   format %{ %}
3905   interface(CONST_INTER);
3906 %}
3907 
3908 operand immI_16()
3909 %{
3910   predicate(n->get_int() == 16);
3911   match(ConI);
3912 
3913   op_cost(0);
3914   format %{ %}
3915   interface(CONST_INTER);
3916 %}
3917 
3918 operand immI_24()
3919 %{
3920   predicate(n->get_int() == 24);
3921   match(ConI);
3922 
3923   op_cost(0);
3924   format %{ %}
3925   interface(CONST_INTER);
3926 %}
3927 
3928 operand immI_32()
3929 %{
3930   predicate(n->get_int() == 32);
3931   match(ConI);
3932 
3933   op_cost(0);
3934   format %{ %}
3935   interface(CONST_INTER);
3936 %}
3937 
3938 operand immI_48()
3939 %{
3940   predicate(n->get_int() == 48);
3941   match(ConI);
3942 
3943   op_cost(0);
3944   format %{ %}
3945   interface(CONST_INTER);
3946 %}
3947 
3948 operand immI_56()
3949 %{
3950   predicate(n->get_int() == 56);
3951   match(ConI);
3952 
3953   op_cost(0);
3954   format %{ %}
3955   interface(CONST_INTER);
3956 %}
3957 
3958 operand immI_63()
3959 %{
3960   predicate(n->get_int() == 63);
3961   match(ConI);
3962 
3963   op_cost(0);
3964   format %{ %}
3965   interface(CONST_INTER);
3966 %}
3967 
3968 operand immI_64()
3969 %{
3970   predicate(n->get_int() == 64);
3971   match(ConI);
3972 
3973   op_cost(0);
3974   format %{ %}
3975   interface(CONST_INTER);
3976 %}
3977 
3978 operand immI_255()
3979 %{
3980   predicate(n->get_int() == 255);
3981   match(ConI);
3982 
3983   op_cost(0);
3984   format %{ %}
3985   interface(CONST_INTER);
3986 %}
3987 
3988 operand immI_65535()
3989 %{
3990   predicate(n->get_int() == 65535);
3991   match(ConI);
3992 
3993   op_cost(0);
3994   format %{ %}
3995   interface(CONST_INTER);
3996 %}
3997 
3998 operand immL_255()
3999 %{
4000   predicate(n->get_long() == 255L);
4001   match(ConL);
4002 
4003   op_cost(0);
4004   format %{ %}
4005   interface(CONST_INTER);
4006 %}
4007 
4008 operand immL_65535()
4009 %{
4010   predicate(n->get_long() == 65535L);
4011   match(ConL);
4012 
4013   op_cost(0);
4014   format %{ %}
4015   interface(CONST_INTER);
4016 %}
4017 
4018 operand immL_4294967295()
4019 %{
4020   predicate(n->get_long() == 4294967295L);
4021   match(ConL);
4022 
4023   op_cost(0);
4024   format %{ %}
4025   interface(CONST_INTER);
4026 %}
4027 
4028 operand immL_bitmask()
4029 %{
4030   predicate((n->get_long() != 0)
4031             && ((n->get_long() & 0xc000000000000000l) == 0)
4032             && is_power_of_2(n->get_long() + 1));
4033   match(ConL);
4034 
4035   op_cost(0);
4036   format %{ %}
4037   interface(CONST_INTER);
4038 %}
4039 
4040 operand immI_bitmask()
4041 %{
4042   predicate((n->get_int() != 0)
4043             && ((n->get_int() & 0xc0000000) == 0)
4044             && is_power_of_2(n->get_int() + 1));
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 // Scale values for scaled offset addressing modes (up to long but not quad)
4053 operand immIScale()
4054 %{
4055   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4056   match(ConI);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 // 26 bit signed offset -- for pc-relative branches
4064 operand immI26()
4065 %{
4066   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4067   match(ConI);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 // 19 bit signed offset -- for pc-relative loads
4075 operand immI19()
4076 %{
4077   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4078   match(ConI);
4079 
4080   op_cost(0);
4081   format %{ %}
4082   interface(CONST_INTER);
4083 %}
4084 
4085 // 12 bit unsigned offset -- for base plus immediate loads
4086 operand immIU12()
4087 %{
4088   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4089   match(ConI);
4090 
4091   op_cost(0);
4092   format %{ %}
4093   interface(CONST_INTER);
4094 %}
4095 
4096 operand immLU12()
4097 %{
4098   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4099   match(ConL);
4100 
4101   op_cost(0);
4102   format %{ %}
4103   interface(CONST_INTER);
4104 %}
4105 
4106 // Offset for scaled or unscaled immediate loads and stores
4107 operand immIOffset()
4108 %{
4109   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4110   match(ConI);
4111 
4112   op_cost(0);
4113   format %{ %}
4114   interface(CONST_INTER);
4115 %}
4116 
4117 operand immIOffset1()
4118 %{
4119   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4120   match(ConI);
4121 
4122   op_cost(0);
4123   format %{ %}
4124   interface(CONST_INTER);
4125 %}
4126 
4127 operand immIOffset2()
4128 %{
4129   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
4130   match(ConI);
4131 
4132   op_cost(0);
4133   format %{ %}
4134   interface(CONST_INTER);
4135 %}
4136 
4137 operand immIOffset4()
4138 %{
4139   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4140   match(ConI);
4141 
4142   op_cost(0);
4143   format %{ %}
4144   interface(CONST_INTER);
4145 %}
4146 
4147 operand immIOffset8()
4148 %{
4149   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4150   match(ConI);
4151 
4152   op_cost(0);
4153   format %{ %}
4154   interface(CONST_INTER);
4155 %}
4156 
4157 operand immIOffset16()
4158 %{
4159   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4160   match(ConI);
4161 
4162   op_cost(0);
4163   format %{ %}
4164   interface(CONST_INTER);
4165 %}
4166 
4167 operand immLoffset()
4168 %{
4169   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4170   match(ConL);
4171 
4172   op_cost(0);
4173   format %{ %}
4174   interface(CONST_INTER);
4175 %}
4176 
4177 operand immLoffset1()
4178 %{
4179   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4180   match(ConL);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 operand immLoffset2()
4188 %{
4189   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
4190   match(ConL);
4191 
4192   op_cost(0);
4193   format %{ %}
4194   interface(CONST_INTER);
4195 %}
4196 
4197 operand immLoffset4()
4198 %{
4199   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4200   match(ConL);
4201 
4202   op_cost(0);
4203   format %{ %}
4204   interface(CONST_INTER);
4205 %}
4206 
4207 operand immLoffset8()
4208 %{
4209   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4210   match(ConL);
4211 
4212   op_cost(0);
4213   format %{ %}
4214   interface(CONST_INTER);
4215 %}
4216 
4217 operand immLoffset16()
4218 %{
4219   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4220   match(ConL);
4221 
4222   op_cost(0);
4223   format %{ %}
4224   interface(CONST_INTER);
4225 %}
4226 
4227 // 32 bit integer valid for add sub immediate
4228 operand immIAddSub()
4229 %{
4230   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
4231   match(ConI);
4232   op_cost(0);
4233   format %{ %}
4234   interface(CONST_INTER);
4235 %}
4236 
4237 // 32 bit unsigned integer valid for logical immediate
4238 // TODO -- check this is right when e.g the mask is 0x80000000
4239 operand immILog()
4240 %{
4241   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
4242   match(ConI);
4243 
4244   op_cost(0);
4245   format %{ %}
4246   interface(CONST_INTER);
4247 %}
4248 
4249 // Integer operands 64 bit
4250 // 64 bit immediate
4251 operand immL()
4252 %{
4253   match(ConL);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // 64 bit zero
4261 operand immL0()
4262 %{
4263   predicate(n->get_long() == 0);
4264   match(ConL);
4265 
4266   op_cost(0);
4267   format %{ %}
4268   interface(CONST_INTER);
4269 %}
4270 
4271 // 64 bit unit increment
4272 operand immL_1()
4273 %{
4274   predicate(n->get_long() == 1);
4275   match(ConL);
4276 
4277   op_cost(0);
4278   format %{ %}
4279   interface(CONST_INTER);
4280 %}
4281 
4282 // 64 bit unit decrement
4283 operand immL_M1()
4284 %{
4285   predicate(n->get_long() == -1);
4286   match(ConL);
4287 
4288   op_cost(0);
4289   format %{ %}
4290   interface(CONST_INTER);
4291 %}
4292 
4293 // 32 bit offset of pc in thread anchor
4294 
4295 operand immL_pc_off()
4296 %{
4297   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4298                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4299   match(ConL);
4300 
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // 64 bit integer valid for add sub immediate
4307 operand immLAddSub()
4308 %{
4309   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4310   match(ConL);
4311   op_cost(0);
4312   format %{ %}
4313   interface(CONST_INTER);
4314 %}
4315 
4316 // 64 bit integer valid for logical immediate
4317 operand immLLog()
4318 %{
4319   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
4320   match(ConL);
4321   op_cost(0);
4322   format %{ %}
4323   interface(CONST_INTER);
4324 %}
4325 
4326 // Long Immediate: low 32-bit mask
4327 operand immL_32bits()
4328 %{
4329   predicate(n->get_long() == 0xFFFFFFFFL);
4330   match(ConL);
4331   op_cost(0);
4332   format %{ %}
4333   interface(CONST_INTER);
4334 %}
4335 
4336 // Pointer operands
4337 // Pointer Immediate
4338 operand immP()
4339 %{
4340   match(ConP);
4341 
4342   op_cost(0);
4343   format %{ %}
4344   interface(CONST_INTER);
4345 %}
4346 
4347 // NULL Pointer Immediate
4348 operand immP0()
4349 %{
4350   predicate(n->get_ptr() == 0);
4351   match(ConP);
4352 
4353   op_cost(0);
4354   format %{ %}
4355   interface(CONST_INTER);
4356 %}
4357 
4358 // Pointer Immediate One
4359 // this is used in object initialization (initial object header)
4360 operand immP_1()
4361 %{
4362   predicate(n->get_ptr() == 1);
4363   match(ConP);
4364 
4365   op_cost(0);
4366   format %{ %}
4367   interface(CONST_INTER);
4368 %}
4369 
4370 // Card Table Byte Map Base
4371 operand immByteMapBase()
4372 %{
4373   // Get base of card map
4374   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4375             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4376   match(ConP);
4377 
4378   op_cost(0);
4379   format %{ %}
4380   interface(CONST_INTER);
4381 %}
4382 
4383 // Pointer Immediate Minus One
4384 // this is used when we want to write the current PC to the thread anchor
4385 operand immP_M1()
4386 %{
4387   predicate(n->get_ptr() == -1);
4388   match(ConP);
4389 
4390   op_cost(0);
4391   format %{ %}
4392   interface(CONST_INTER);
4393 %}
4394 
4395 // Pointer Immediate Minus Two
4396 // this is used when we want to write the current PC to the thread anchor
4397 operand immP_M2()
4398 %{
4399   predicate(n->get_ptr() == -2);
4400   match(ConP);
4401 
4402   op_cost(0);
4403   format %{ %}
4404   interface(CONST_INTER);
4405 %}
4406 
4407 // Float and Double operands
4408 // Double Immediate
4409 operand immD()
4410 %{
4411   match(ConD);
4412   op_cost(0);
4413   format %{ %}
4414   interface(CONST_INTER);
4415 %}
4416 
4417 // Double Immediate: +0.0d
4418 operand immD0()
4419 %{
4420   predicate(jlong_cast(n->getd()) == 0);
4421   match(ConD);
4422 
4423   op_cost(0);
4424   format %{ %}
4425   interface(CONST_INTER);
4426 %}
4427 
4428 // constant 'double +0.0'.
4429 operand immDPacked()
4430 %{
4431   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4432   match(ConD);
4433   op_cost(0);
4434   format %{ %}
4435   interface(CONST_INTER);
4436 %}
4437 
4438 // Float Immediate
4439 operand immF()
4440 %{
4441   match(ConF);
4442   op_cost(0);
4443   format %{ %}
4444   interface(CONST_INTER);
4445 %}
4446 
4447 // Float Immediate: +0.0f.
4448 operand immF0()
4449 %{
4450   predicate(jint_cast(n->getf()) == 0);
4451   match(ConF);
4452 
4453   op_cost(0);
4454   format %{ %}
4455   interface(CONST_INTER);
4456 %}
4457 
4458 //
4459 operand immFPacked()
4460 %{
4461   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4462   match(ConF);
4463   op_cost(0);
4464   format %{ %}
4465   interface(CONST_INTER);
4466 %}
4467 
4468 // Narrow pointer operands
4469 // Narrow Pointer Immediate
4470 operand immN()
4471 %{
4472   match(ConN);
4473 
4474   op_cost(0);
4475   format %{ %}
4476   interface(CONST_INTER);
4477 %}
4478 
4479 // Narrow NULL Pointer Immediate
4480 operand immN0()
4481 %{
4482   predicate(n->get_narrowcon() == 0);
4483   match(ConN);
4484 
4485   op_cost(0);
4486   format %{ %}
4487   interface(CONST_INTER);
4488 %}
4489 
4490 operand immNKlass()
4491 %{
4492   match(ConNKlass);
4493 
4494   op_cost(0);
4495   format %{ %}
4496   interface(CONST_INTER);
4497 %}
4498 
4499 // Integer 32 bit Register Operands
4500 // Integer 32 bitRegister (excludes SP)
4501 operand iRegI()
4502 %{
4503   constraint(ALLOC_IN_RC(any_reg32));
4504   match(RegI);
4505   match(iRegINoSp);
4506   op_cost(0);
4507   format %{ %}
4508   interface(REG_INTER);
4509 %}
4510 
4511 // Integer 32 bit Register not Special
4512 operand iRegINoSp()
4513 %{
4514   constraint(ALLOC_IN_RC(no_special_reg32));
4515   match(RegI);
4516   op_cost(0);
4517   format %{ %}
4518   interface(REG_INTER);
4519 %}
4520 
4521 // Integer 64 bit Register Operands
4522 // Integer 64 bit Register (includes SP)
4523 operand iRegL()
4524 %{
4525   constraint(ALLOC_IN_RC(any_reg));
4526   match(RegL);
4527   match(iRegLNoSp);
4528   op_cost(0);
4529   format %{ %}
4530   interface(REG_INTER);
4531 %}
4532 
4533 // Integer 64 bit Register not Special
4534 operand iRegLNoSp()
4535 %{
4536   constraint(ALLOC_IN_RC(no_special_reg));
4537   match(RegL);
4538   match(iRegL_R0);
4539   format %{ %}
4540   interface(REG_INTER);
4541 %}
4542 
4543 // Pointer Register Operands
4544 // Pointer Register
4545 operand iRegP()
4546 %{
4547   constraint(ALLOC_IN_RC(ptr_reg));
4548   match(RegP);
4549   match(iRegPNoSp);
4550   match(iRegP_R0);
4551   //match(iRegP_R2);
4552   //match(iRegP_R4);
4553   //match(iRegP_R5);
4554   match(thread_RegP);
4555   op_cost(0);
4556   format %{ %}
4557   interface(REG_INTER);
4558 %}
4559 
4560 // Pointer 64 bit Register not Special
4561 operand iRegPNoSp()
4562 %{
4563   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4564   match(RegP);
4565   // match(iRegP);
4566   // match(iRegP_R0);
4567   // match(iRegP_R2);
4568   // match(iRegP_R4);
4569   // match(iRegP_R5);
4570   // match(thread_RegP);
4571   op_cost(0);
4572   format %{ %}
4573   interface(REG_INTER);
4574 %}
4575 
4576 // Pointer 64 bit Register R0 only
4577 operand iRegP_R0()
4578 %{
4579   constraint(ALLOC_IN_RC(r0_reg));
4580   match(RegP);
4581   // match(iRegP);
4582   match(iRegPNoSp);
4583   op_cost(0);
4584   format %{ %}
4585   interface(REG_INTER);
4586 %}
4587 
4588 // Pointer 64 bit Register R1 only
4589 operand iRegP_R1()
4590 %{
4591   constraint(ALLOC_IN_RC(r1_reg));
4592   match(RegP);
4593   // match(iRegP);
4594   match(iRegPNoSp);
4595   op_cost(0);
4596   format %{ %}
4597   interface(REG_INTER);
4598 %}
4599 
4600 // Pointer 64 bit Register R2 only
4601 operand iRegP_R2()
4602 %{
4603   constraint(ALLOC_IN_RC(r2_reg));
4604   match(RegP);
4605   // match(iRegP);
4606   match(iRegPNoSp);
4607   op_cost(0);
4608   format %{ %}
4609   interface(REG_INTER);
4610 %}
4611 
4612 // Pointer 64 bit Register R3 only
4613 operand iRegP_R3()
4614 %{
4615   constraint(ALLOC_IN_RC(r3_reg));
4616   match(RegP);
4617   // match(iRegP);
4618   match(iRegPNoSp);
4619   op_cost(0);
4620   format %{ %}
4621   interface(REG_INTER);
4622 %}
4623 
4624 // Pointer 64 bit Register R4 only
4625 operand iRegP_R4()
4626 %{
4627   constraint(ALLOC_IN_RC(r4_reg));
4628   match(RegP);
4629   // match(iRegP);
4630   match(iRegPNoSp);
4631   op_cost(0);
4632   format %{ %}
4633   interface(REG_INTER);
4634 %}
4635 
4636 // Pointer 64 bit Register R5 only
4637 operand iRegP_R5()
4638 %{
4639   constraint(ALLOC_IN_RC(r5_reg));
4640   match(RegP);
4641   // match(iRegP);
4642   match(iRegPNoSp);
4643   op_cost(0);
4644   format %{ %}
4645   interface(REG_INTER);
4646 %}
4647 
4648 // Pointer 64 bit Register R10 only
4649 operand iRegP_R10()
4650 %{
4651   constraint(ALLOC_IN_RC(r10_reg));
4652   match(RegP);
4653   // match(iRegP);
4654   match(iRegPNoSp);
4655   op_cost(0);
4656   format %{ %}
4657   interface(REG_INTER);
4658 %}
4659 
4660 // Long 64 bit Register R0 only
4661 operand iRegL_R0()
4662 %{
4663   constraint(ALLOC_IN_RC(r0_reg));
4664   match(RegL);
4665   match(iRegLNoSp);
4666   op_cost(0);
4667   format %{ %}
4668   interface(REG_INTER);
4669 %}
4670 
4671 // Long 64 bit Register R2 only
4672 operand iRegL_R2()
4673 %{
4674   constraint(ALLOC_IN_RC(r2_reg));
4675   match(RegL);
4676   match(iRegLNoSp);
4677   op_cost(0);
4678   format %{ %}
4679   interface(REG_INTER);
4680 %}
4681 
4682 // Long 64 bit Register R3 only
4683 operand iRegL_R3()
4684 %{
4685   constraint(ALLOC_IN_RC(r3_reg));
4686   match(RegL);
4687   match(iRegLNoSp);
4688   op_cost(0);
4689   format %{ %}
4690   interface(REG_INTER);
4691 %}
4692 
4693 // Long 64 bit Register R11 only
4694 operand iRegL_R11()
4695 %{
4696   constraint(ALLOC_IN_RC(r11_reg));
4697   match(RegL);
4698   match(iRegLNoSp);
4699   op_cost(0);
4700   format %{ %}
4701   interface(REG_INTER);
4702 %}
4703 
4704 // Pointer 64 bit Register FP only
4705 operand iRegP_FP()
4706 %{
4707   constraint(ALLOC_IN_RC(fp_reg));
4708   match(RegP);
4709   // match(iRegP);
4710   op_cost(0);
4711   format %{ %}
4712   interface(REG_INTER);
4713 %}
4714 
4715 // Register R0 only
4716 operand iRegI_R0()
4717 %{
4718   constraint(ALLOC_IN_RC(int_r0_reg));
4719   match(RegI);
4720   match(iRegINoSp);
4721   op_cost(0);
4722   format %{ %}
4723   interface(REG_INTER);
4724 %}
4725 
4726 // Register R2 only
4727 operand iRegI_R2()
4728 %{
4729   constraint(ALLOC_IN_RC(int_r2_reg));
4730   match(RegI);
4731   match(iRegINoSp);
4732   op_cost(0);
4733   format %{ %}
4734   interface(REG_INTER);
4735 %}
4736 
4737 // Register R3 only
4738 operand iRegI_R3()
4739 %{
4740   constraint(ALLOC_IN_RC(int_r3_reg));
4741   match(RegI);
4742   match(iRegINoSp);
4743   op_cost(0);
4744   format %{ %}
4745   interface(REG_INTER);
4746 %}
4747 
4748 
4749 // Register R4 only
4750 operand iRegI_R4()
4751 %{
4752   constraint(ALLOC_IN_RC(int_r4_reg));
4753   match(RegI);
4754   match(iRegINoSp);
4755   op_cost(0);
4756   format %{ %}
4757   interface(REG_INTER);
4758 %}
4759 
4760 
4761 // Pointer Register Operands
4762 // Narrow Pointer Register
4763 operand iRegN()
4764 %{
4765   constraint(ALLOC_IN_RC(any_reg32));
4766   match(RegN);
4767   match(iRegNNoSp);
4768   op_cost(0);
4769   format %{ %}
4770   interface(REG_INTER);
4771 %}
4772 
4773 operand iRegN_R0()
4774 %{
4775   constraint(ALLOC_IN_RC(r0_reg));
4776   match(iRegN);
4777   op_cost(0);
4778   format %{ %}
4779   interface(REG_INTER);
4780 %}
4781 
4782 operand iRegN_R2()
4783 %{
4784   constraint(ALLOC_IN_RC(r2_reg));
4785   match(iRegN);
4786   op_cost(0);
4787   format %{ %}
4788   interface(REG_INTER);
4789 %}
4790 
4791 operand iRegN_R3()
4792 %{
4793   constraint(ALLOC_IN_RC(r3_reg));
4794   match(iRegN);
4795   op_cost(0);
4796   format %{ %}
4797   interface(REG_INTER);
4798 %}
4799 
4800 // Integer 64 bit Register not Special
4801 operand iRegNNoSp()
4802 %{
4803   constraint(ALLOC_IN_RC(no_special_reg32));
4804   match(RegN);
4805   op_cost(0);
4806   format %{ %}
4807   interface(REG_INTER);
4808 %}
4809 
4810 // heap base register -- used for encoding immN0
4811 
4812 operand iRegIHeapbase()
4813 %{
4814   constraint(ALLOC_IN_RC(heapbase_reg));
4815   match(RegI);
4816   op_cost(0);
4817   format %{ %}
4818   interface(REG_INTER);
4819 %}
4820 
4821 // Float Register
4822 // Float register operands
4823 operand vRegF()
4824 %{
4825   constraint(ALLOC_IN_RC(float_reg));
4826   match(RegF);
4827 
4828   op_cost(0);
4829   format %{ %}
4830   interface(REG_INTER);
4831 %}
4832 
4833 // Double Register
4834 // Double register operands
4835 operand vRegD()
4836 %{
4837   constraint(ALLOC_IN_RC(double_reg));
4838   match(RegD);
4839 
4840   op_cost(0);
4841   format %{ %}
4842   interface(REG_INTER);
4843 %}
4844 
4845 operand vecD()
4846 %{
4847   constraint(ALLOC_IN_RC(vectord_reg));
4848   match(VecD);
4849 
4850   op_cost(0);
4851   format %{ %}
4852   interface(REG_INTER);
4853 %}
4854 
4855 operand vecX()
4856 %{
4857   constraint(ALLOC_IN_RC(vectorx_reg));
4858   match(VecX);
4859 
4860   op_cost(0);
4861   format %{ %}
4862   interface(REG_INTER);
4863 %}
4864 
4865 operand vRegD_V0()
4866 %{
4867   constraint(ALLOC_IN_RC(v0_reg));
4868   match(RegD);
4869   op_cost(0);
4870   format %{ %}
4871   interface(REG_INTER);
4872 %}
4873 
4874 operand vRegD_V1()
4875 %{
4876   constraint(ALLOC_IN_RC(v1_reg));
4877   match(RegD);
4878   op_cost(0);
4879   format %{ %}
4880   interface(REG_INTER);
4881 %}
4882 
4883 operand vRegD_V2()
4884 %{
4885   constraint(ALLOC_IN_RC(v2_reg));
4886   match(RegD);
4887   op_cost(0);
4888   format %{ %}
4889   interface(REG_INTER);
4890 %}
4891 
4892 operand vRegD_V3()
4893 %{
4894   constraint(ALLOC_IN_RC(v3_reg));
4895   match(RegD);
4896   op_cost(0);
4897   format %{ %}
4898   interface(REG_INTER);
4899 %}
4900 
4901 operand vRegD_V4()
4902 %{
4903   constraint(ALLOC_IN_RC(v4_reg));
4904   match(RegD);
4905   op_cost(0);
4906   format %{ %}
4907   interface(REG_INTER);
4908 %}
4909 
4910 operand vRegD_V5()
4911 %{
4912   constraint(ALLOC_IN_RC(v5_reg));
4913   match(RegD);
4914   op_cost(0);
4915   format %{ %}
4916   interface(REG_INTER);
4917 %}
4918 
4919 operand vRegD_V6()
4920 %{
4921   constraint(ALLOC_IN_RC(v6_reg));
4922   match(RegD);
4923   op_cost(0);
4924   format %{ %}
4925   interface(REG_INTER);
4926 %}
4927 
4928 operand vRegD_V7()
4929 %{
4930   constraint(ALLOC_IN_RC(v7_reg));
4931   match(RegD);
4932   op_cost(0);
4933   format %{ %}
4934   interface(REG_INTER);
4935 %}
4936 
4937 operand vRegD_V8()
4938 %{
4939   constraint(ALLOC_IN_RC(v8_reg));
4940   match(RegD);
4941   op_cost(0);
4942   format %{ %}
4943   interface(REG_INTER);
4944 %}
4945 
4946 operand vRegD_V9()
4947 %{
4948   constraint(ALLOC_IN_RC(v9_reg));
4949   match(RegD);
4950   op_cost(0);
4951   format %{ %}
4952   interface(REG_INTER);
4953 %}
4954 
4955 operand vRegD_V10()
4956 %{
4957   constraint(ALLOC_IN_RC(v10_reg));
4958   match(RegD);
4959   op_cost(0);
4960   format %{ %}
4961   interface(REG_INTER);
4962 %}
4963 
4964 operand vRegD_V11()
4965 %{
4966   constraint(ALLOC_IN_RC(v11_reg));
4967   match(RegD);
4968   op_cost(0);
4969   format %{ %}
4970   interface(REG_INTER);
4971 %}
4972 
4973 operand vRegD_V12()
4974 %{
4975   constraint(ALLOC_IN_RC(v12_reg));
4976   match(RegD);
4977   op_cost(0);
4978   format %{ %}
4979   interface(REG_INTER);
4980 %}
4981 
4982 operand vRegD_V13()
4983 %{
4984   constraint(ALLOC_IN_RC(v13_reg));
4985   match(RegD);
4986   op_cost(0);
4987   format %{ %}
4988   interface(REG_INTER);
4989 %}
4990 
4991 operand vRegD_V14()
4992 %{
4993   constraint(ALLOC_IN_RC(v14_reg));
4994   match(RegD);
4995   op_cost(0);
4996   format %{ %}
4997   interface(REG_INTER);
4998 %}
4999 
5000 operand vRegD_V15()
5001 %{
5002   constraint(ALLOC_IN_RC(v15_reg));
5003   match(RegD);
5004   op_cost(0);
5005   format %{ %}
5006   interface(REG_INTER);
5007 %}
5008 
5009 operand vRegD_V16()
5010 %{
5011   constraint(ALLOC_IN_RC(v16_reg));
5012   match(RegD);
5013   op_cost(0);
5014   format %{ %}
5015   interface(REG_INTER);
5016 %}
5017 
5018 operand vRegD_V17()
5019 %{
5020   constraint(ALLOC_IN_RC(v17_reg));
5021   match(RegD);
5022   op_cost(0);
5023   format %{ %}
5024   interface(REG_INTER);
5025 %}
5026 
5027 operand vRegD_V18()
5028 %{
5029   constraint(ALLOC_IN_RC(v18_reg));
5030   match(RegD);
5031   op_cost(0);
5032   format %{ %}
5033   interface(REG_INTER);
5034 %}
5035 
5036 operand vRegD_V19()
5037 %{
5038   constraint(ALLOC_IN_RC(v19_reg));
5039   match(RegD);
5040   op_cost(0);
5041   format %{ %}
5042   interface(REG_INTER);
5043 %}
5044 
5045 operand vRegD_V20()
5046 %{
5047   constraint(ALLOC_IN_RC(v20_reg));
5048   match(RegD);
5049   op_cost(0);
5050   format %{ %}
5051   interface(REG_INTER);
5052 %}
5053 
5054 operand vRegD_V21()
5055 %{
5056   constraint(ALLOC_IN_RC(v21_reg));
5057   match(RegD);
5058   op_cost(0);
5059   format %{ %}
5060   interface(REG_INTER);
5061 %}
5062 
5063 operand vRegD_V22()
5064 %{
5065   constraint(ALLOC_IN_RC(v22_reg));
5066   match(RegD);
5067   op_cost(0);
5068   format %{ %}
5069   interface(REG_INTER);
5070 %}
5071 
5072 operand vRegD_V23()
5073 %{
5074   constraint(ALLOC_IN_RC(v23_reg));
5075   match(RegD);
5076   op_cost(0);
5077   format %{ %}
5078   interface(REG_INTER);
5079 %}
5080 
5081 operand vRegD_V24()
5082 %{
5083   constraint(ALLOC_IN_RC(v24_reg));
5084   match(RegD);
5085   op_cost(0);
5086   format %{ %}
5087   interface(REG_INTER);
5088 %}
5089 
5090 operand vRegD_V25()
5091 %{
5092   constraint(ALLOC_IN_RC(v25_reg));
5093   match(RegD);
5094   op_cost(0);
5095   format %{ %}
5096   interface(REG_INTER);
5097 %}
5098 
5099 operand vRegD_V26()
5100 %{
5101   constraint(ALLOC_IN_RC(v26_reg));
5102   match(RegD);
5103   op_cost(0);
5104   format %{ %}
5105   interface(REG_INTER);
5106 %}
5107 
5108 operand vRegD_V27()
5109 %{
5110   constraint(ALLOC_IN_RC(v27_reg));
5111   match(RegD);
5112   op_cost(0);
5113   format %{ %}
5114   interface(REG_INTER);
5115 %}
5116 
5117 operand vRegD_V28()
5118 %{
5119   constraint(ALLOC_IN_RC(v28_reg));
5120   match(RegD);
5121   op_cost(0);
5122   format %{ %}
5123   interface(REG_INTER);
5124 %}
5125 
5126 operand vRegD_V29()
5127 %{
5128   constraint(ALLOC_IN_RC(v29_reg));
5129   match(RegD);
5130   op_cost(0);
5131   format %{ %}
5132   interface(REG_INTER);
5133 %}
5134 
5135 operand vRegD_V30()
5136 %{
5137   constraint(ALLOC_IN_RC(v30_reg));
5138   match(RegD);
5139   op_cost(0);
5140   format %{ %}
5141   interface(REG_INTER);
5142 %}
5143 
5144 operand vRegD_V31()
5145 %{
5146   constraint(ALLOC_IN_RC(v31_reg));
5147   match(RegD);
5148   op_cost(0);
5149   format %{ %}
5150   interface(REG_INTER);
5151 %}
5152 
5153 // Flags register, used as output of signed compare instructions
5154 
5155 // note that on AArch64 we also use this register as the output for
5156 // for floating point compare instructions (CmpF CmpD). this ensures
5157 // that ordered inequality tests use GT, GE, LT or LE none of which
5158 // pass through cases where the result is unordered i.e. one or both
5159 // inputs to the compare is a NaN. this means that the ideal code can
5160 // replace e.g. a GT with an LE and not end up capturing the NaN case
5161 // (where the comparison should always fail). EQ and NE tests are
5162 // always generated in ideal code so that unordered folds into the NE
5163 // case, matching the behaviour of AArch64 NE.
5164 //
5165 // This differs from x86 where the outputs of FP compares use a
5166 // special FP flags registers and where compares based on this
5167 // register are distinguished into ordered inequalities (cmpOpUCF) and
5168 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5169 // to explicitly handle the unordered case in branches. x86 also has
5170 // to include extra CMoveX rules to accept a cmpOpUCF input.
5171 
5172 operand rFlagsReg()
5173 %{
5174   constraint(ALLOC_IN_RC(int_flags));
5175   match(RegFlags);
5176 
5177   op_cost(0);
5178   format %{ "RFLAGS" %}
5179   interface(REG_INTER);
5180 %}
5181 
5182 // Flags register, used as output of unsigned compare instructions
5183 operand rFlagsRegU()
5184 %{
5185   constraint(ALLOC_IN_RC(int_flags));
5186   match(RegFlags);
5187 
5188   op_cost(0);
5189   format %{ "RFLAGSU" %}
5190   interface(REG_INTER);
5191 %}
5192 
5193 // Special Registers
5194 
5195 // Method Register
5196 operand inline_cache_RegP(iRegP reg)
5197 %{
5198   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5199   match(reg);
5200   match(iRegPNoSp);
5201   op_cost(0);
5202   format %{ %}
5203   interface(REG_INTER);
5204 %}
5205 
5206 operand interpreter_method_oop_RegP(iRegP reg)
5207 %{
5208   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5209   match(reg);
5210   match(iRegPNoSp);
5211   op_cost(0);
5212   format %{ %}
5213   interface(REG_INTER);
5214 %}
5215 
5216 // Thread Register
5217 operand thread_RegP(iRegP reg)
5218 %{
5219   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5220   match(reg);
5221   op_cost(0);
5222   format %{ %}
5223   interface(REG_INTER);
5224 %}
5225 
5226 operand lr_RegP(iRegP reg)
5227 %{
5228   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5229   match(reg);
5230   op_cost(0);
5231   format %{ %}
5232   interface(REG_INTER);
5233 %}
5234 
5235 //----------Memory Operands----------------------------------------------------
5236 
5237 operand indirect(iRegP reg)
5238 %{
5239   constraint(ALLOC_IN_RC(ptr_reg));
5240   match(reg);
5241   op_cost(0);
5242   format %{ "[$reg]" %}
5243   interface(MEMORY_INTER) %{
5244     base($reg);
5245     index(0xffffffff);
5246     scale(0x0);
5247     disp(0x0);
5248   %}
5249 %}
5250 
5251 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5252 %{
5253   constraint(ALLOC_IN_RC(ptr_reg));
5254   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5255   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5256   op_cost(0);
5257   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5258   interface(MEMORY_INTER) %{
5259     base($reg);
5260     index($ireg);
5261     scale($scale);
5262     disp(0x0);
5263   %}
5264 %}
5265 
5266 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5267 %{
5268   constraint(ALLOC_IN_RC(ptr_reg));
5269   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5270   match(AddP reg (LShiftL lreg scale));
5271   op_cost(0);
5272   format %{ "$reg, $lreg lsl($scale)" %}
5273   interface(MEMORY_INTER) %{
5274     base($reg);
5275     index($lreg);
5276     scale($scale);
5277     disp(0x0);
5278   %}
5279 %}
5280 
5281 operand indIndexI2L(iRegP reg, iRegI ireg)
5282 %{
5283   constraint(ALLOC_IN_RC(ptr_reg));
5284   match(AddP reg (ConvI2L ireg));
5285   op_cost(0);
5286   format %{ "$reg, $ireg, 0, I2L" %}
5287   interface(MEMORY_INTER) %{
5288     base($reg);
5289     index($ireg);
5290     scale(0x0);
5291     disp(0x0);
5292   %}
5293 %}
5294 
5295 operand indIndex(iRegP reg, iRegL lreg)
5296 %{
5297   constraint(ALLOC_IN_RC(ptr_reg));
5298   match(AddP reg lreg);
5299   op_cost(0);
5300   format %{ "$reg, $lreg" %}
5301   interface(MEMORY_INTER) %{
5302     base($reg);
5303     index($lreg);
5304     scale(0x0);
5305     disp(0x0);
5306   %}
5307 %}
5308 
5309 operand indOffI(iRegP reg, immIOffset off)
5310 %{
5311   constraint(ALLOC_IN_RC(ptr_reg));
5312   match(AddP reg off);
5313   op_cost(0);
5314   format %{ "[$reg, $off]" %}
5315   interface(MEMORY_INTER) %{
5316     base($reg);
5317     index(0xffffffff);
5318     scale(0x0);
5319     disp($off);
5320   %}
5321 %}
5322 
5323 operand indOffI1(iRegP reg, immIOffset1 off)
5324 %{
5325   constraint(ALLOC_IN_RC(ptr_reg));
5326   match(AddP reg off);
5327   op_cost(0);
5328   format %{ "[$reg, $off]" %}
5329   interface(MEMORY_INTER) %{
5330     base($reg);
5331     index(0xffffffff);
5332     scale(0x0);
5333     disp($off);
5334   %}
5335 %}
5336 
5337 operand indOffI2(iRegP reg, immIOffset2 off)
5338 %{
5339   constraint(ALLOC_IN_RC(ptr_reg));
5340   match(AddP reg off);
5341   op_cost(0);
5342   format %{ "[$reg, $off]" %}
5343   interface(MEMORY_INTER) %{
5344     base($reg);
5345     index(0xffffffff);
5346     scale(0x0);
5347     disp($off);
5348   %}
5349 %}
5350 
5351 operand indOffI4(iRegP reg, immIOffset4 off)
5352 %{
5353   constraint(ALLOC_IN_RC(ptr_reg));
5354   match(AddP reg off);
5355   op_cost(0);
5356   format %{ "[$reg, $off]" %}
5357   interface(MEMORY_INTER) %{
5358     base($reg);
5359     index(0xffffffff);
5360     scale(0x0);
5361     disp($off);
5362   %}
5363 %}
5364 
5365 operand indOffI8(iRegP reg, immIOffset8 off)
5366 %{
5367   constraint(ALLOC_IN_RC(ptr_reg));
5368   match(AddP reg off);
5369   op_cost(0);
5370   format %{ "[$reg, $off]" %}
5371   interface(MEMORY_INTER) %{
5372     base($reg);
5373     index(0xffffffff);
5374     scale(0x0);
5375     disp($off);
5376   %}
5377 %}
5378 
5379 operand indOffI16(iRegP reg, immIOffset16 off)
5380 %{
5381   constraint(ALLOC_IN_RC(ptr_reg));
5382   match(AddP reg off);
5383   op_cost(0);
5384   format %{ "[$reg, $off]" %}
5385   interface(MEMORY_INTER) %{
5386     base($reg);
5387     index(0xffffffff);
5388     scale(0x0);
5389     disp($off);
5390   %}
5391 %}
5392 
5393 operand indOffL(iRegP reg, immLoffset off)
5394 %{
5395   constraint(ALLOC_IN_RC(ptr_reg));
5396   match(AddP reg off);
5397   op_cost(0);
5398   format %{ "[$reg, $off]" %}
5399   interface(MEMORY_INTER) %{
5400     base($reg);
5401     index(0xffffffff);
5402     scale(0x0);
5403     disp($off);
5404   %}
5405 %}
5406 
5407 operand indOffL1(iRegP reg, immLoffset1 off)
5408 %{
5409   constraint(ALLOC_IN_RC(ptr_reg));
5410   match(AddP reg off);
5411   op_cost(0);
5412   format %{ "[$reg, $off]" %}
5413   interface(MEMORY_INTER) %{
5414     base($reg);
5415     index(0xffffffff);
5416     scale(0x0);
5417     disp($off);
5418   %}
5419 %}
5420 
5421 operand indOffL2(iRegP reg, immLoffset2 off)
5422 %{
5423   constraint(ALLOC_IN_RC(ptr_reg));
5424   match(AddP reg off);
5425   op_cost(0);
5426   format %{ "[$reg, $off]" %}
5427   interface(MEMORY_INTER) %{
5428     base($reg);
5429     index(0xffffffff);
5430     scale(0x0);
5431     disp($off);
5432   %}
5433 %}
5434 
5435 operand indOffL4(iRegP reg, immLoffset4 off)
5436 %{
5437   constraint(ALLOC_IN_RC(ptr_reg));
5438   match(AddP reg off);
5439   op_cost(0);
5440   format %{ "[$reg, $off]" %}
5441   interface(MEMORY_INTER) %{
5442     base($reg);
5443     index(0xffffffff);
5444     scale(0x0);
5445     disp($off);
5446   %}
5447 %}
5448 
5449 operand indOffL8(iRegP reg, immLoffset8 off)
5450 %{
5451   constraint(ALLOC_IN_RC(ptr_reg));
5452   match(AddP reg off);
5453   op_cost(0);
5454   format %{ "[$reg, $off]" %}
5455   interface(MEMORY_INTER) %{
5456     base($reg);
5457     index(0xffffffff);
5458     scale(0x0);
5459     disp($off);
5460   %}
5461 %}
5462 
5463 operand indOffL16(iRegP reg, immLoffset16 off)
5464 %{
5465   constraint(ALLOC_IN_RC(ptr_reg));
5466   match(AddP reg off);
5467   op_cost(0);
5468   format %{ "[$reg, $off]" %}
5469   interface(MEMORY_INTER) %{
5470     base($reg);
5471     index(0xffffffff);
5472     scale(0x0);
5473     disp($off);
5474   %}
5475 %}
5476 
5477 operand indirectN(iRegN reg)
5478 %{
5479   predicate(CompressedOops::shift() == 0);
5480   constraint(ALLOC_IN_RC(ptr_reg));
5481   match(DecodeN reg);
5482   op_cost(0);
5483   format %{ "[$reg]\t# narrow" %}
5484   interface(MEMORY_INTER) %{
5485     base($reg);
5486     index(0xffffffff);
5487     scale(0x0);
5488     disp(0x0);
5489   %}
5490 %}
5491 
5492 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5493 %{
5494   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5495   constraint(ALLOC_IN_RC(ptr_reg));
5496   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5497   op_cost(0);
5498   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5499   interface(MEMORY_INTER) %{
5500     base($reg);
5501     index($ireg);
5502     scale($scale);
5503     disp(0x0);
5504   %}
5505 %}
5506 
5507 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5508 %{
5509   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5510   constraint(ALLOC_IN_RC(ptr_reg));
5511   match(AddP (DecodeN reg) (LShiftL lreg scale));
5512   op_cost(0);
5513   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5514   interface(MEMORY_INTER) %{
5515     base($reg);
5516     index($lreg);
5517     scale($scale);
5518     disp(0x0);
5519   %}
5520 %}
5521 
5522 operand indIndexI2LN(iRegN reg, iRegI ireg)
5523 %{
5524   predicate(CompressedOops::shift() == 0);
5525   constraint(ALLOC_IN_RC(ptr_reg));
5526   match(AddP (DecodeN reg) (ConvI2L ireg));
5527   op_cost(0);
5528   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5529   interface(MEMORY_INTER) %{
5530     base($reg);
5531     index($ireg);
5532     scale(0x0);
5533     disp(0x0);
5534   %}
5535 %}
5536 
5537 operand indIndexN(iRegN reg, iRegL lreg)
5538 %{
5539   predicate(CompressedOops::shift() == 0);
5540   constraint(ALLOC_IN_RC(ptr_reg));
5541   match(AddP (DecodeN reg) lreg);
5542   op_cost(0);
5543   format %{ "$reg, $lreg\t# narrow" %}
5544   interface(MEMORY_INTER) %{
5545     base($reg);
5546     index($lreg);
5547     scale(0x0);
5548     disp(0x0);
5549   %}
5550 %}
5551 
5552 operand indOffIN(iRegN reg, immIOffset off)
5553 %{
5554   predicate(CompressedOops::shift() == 0);
5555   constraint(ALLOC_IN_RC(ptr_reg));
5556   match(AddP (DecodeN reg) off);
5557   op_cost(0);
5558   format %{ "[$reg, $off]\t# narrow" %}
5559   interface(MEMORY_INTER) %{
5560     base($reg);
5561     index(0xffffffff);
5562     scale(0x0);
5563     disp($off);
5564   %}
5565 %}
5566 
5567 operand indOffLN(iRegN reg, immLoffset off)
5568 %{
5569   predicate(CompressedOops::shift() == 0);
5570   constraint(ALLOC_IN_RC(ptr_reg));
5571   match(AddP (DecodeN reg) off);
5572   op_cost(0);
5573   format %{ "[$reg, $off]\t# narrow" %}
5574   interface(MEMORY_INTER) %{
5575     base($reg);
5576     index(0xffffffff);
5577     scale(0x0);
5578     disp($off);
5579   %}
5580 %}
5581 
5582 
5583 
5584 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5585 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5586 %{
5587   constraint(ALLOC_IN_RC(ptr_reg));
5588   match(AddP reg off);
5589   op_cost(0);
5590   format %{ "[$reg, $off]" %}
5591   interface(MEMORY_INTER) %{
5592     base($reg);
5593     index(0xffffffff);
5594     scale(0x0);
5595     disp($off);
5596   %}
5597 %}
5598 
5599 //----------Special Memory Operands--------------------------------------------
5600 // Stack Slot Operand - This operand is used for loading and storing temporary
5601 //                      values on the stack where a match requires a value to
5602 //                      flow through memory.
5603 operand stackSlotP(sRegP reg)
5604 %{
5605   constraint(ALLOC_IN_RC(stack_slots));
5606   op_cost(100);
5607   // No match rule because this operand is only generated in matching
5608   // match(RegP);
5609   format %{ "[$reg]" %}
5610   interface(MEMORY_INTER) %{
5611     base(0x1e);  // RSP
5612     index(0x0);  // No Index
5613     scale(0x0);  // No Scale
5614     disp($reg);  // Stack Offset
5615   %}
5616 %}
5617 
5618 operand stackSlotI(sRegI reg)
5619 %{
5620   constraint(ALLOC_IN_RC(stack_slots));
5621   // No match rule because this operand is only generated in matching
5622   // match(RegI);
5623   format %{ "[$reg]" %}
5624   interface(MEMORY_INTER) %{
5625     base(0x1e);  // RSP
5626     index(0x0);  // No Index
5627     scale(0x0);  // No Scale
5628     disp($reg);  // Stack Offset
5629   %}
5630 %}
5631 
5632 operand stackSlotF(sRegF reg)
5633 %{
5634   constraint(ALLOC_IN_RC(stack_slots));
5635   // No match rule because this operand is only generated in matching
5636   // match(RegF);
5637   format %{ "[$reg]" %}
5638   interface(MEMORY_INTER) %{
5639     base(0x1e);  // RSP
5640     index(0x0);  // No Index
5641     scale(0x0);  // No Scale
5642     disp($reg);  // Stack Offset
5643   %}
5644 %}
5645 
5646 operand stackSlotD(sRegD reg)
5647 %{
5648   constraint(ALLOC_IN_RC(stack_slots));
5649   // No match rule because this operand is only generated in matching
5650   // match(RegD);
5651   format %{ "[$reg]" %}
5652   interface(MEMORY_INTER) %{
5653     base(0x1e);  // RSP
5654     index(0x0);  // No Index
5655     scale(0x0);  // No Scale
5656     disp($reg);  // Stack Offset
5657   %}
5658 %}
5659 
5660 operand stackSlotL(sRegL reg)
5661 %{
5662   constraint(ALLOC_IN_RC(stack_slots));
5663   // No match rule because this operand is only generated in matching
5664   // match(RegL);
5665   format %{ "[$reg]" %}
5666   interface(MEMORY_INTER) %{
5667     base(0x1e);  // RSP
5668     index(0x0);  // No Index
5669     scale(0x0);  // No Scale
5670     disp($reg);  // Stack Offset
5671   %}
5672 %}
5673 
5674 // Operands for expressing Control Flow
5675 // NOTE: Label is a predefined operand which should not be redefined in
5676 //       the AD file. It is generically handled within the ADLC.
5677 
5678 //----------Conditional Branch Operands----------------------------------------
5679 // Comparison Op  - This is the operation of the comparison, and is limited to
5680 //                  the following set of codes:
5681 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5682 //
5683 // Other attributes of the comparison, such as unsignedness, are specified
5684 // by the comparison instruction that sets a condition code flags register.
5685 // That result is represented by a flags operand whose subtype is appropriate
5686 // to the unsignedness (etc.) of the comparison.
5687 //
5688 // Later, the instruction which matches both the Comparison Op (a Bool) and
5689 // the flags (produced by the Cmp) specifies the coding of the comparison op
5690 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5691 
5692 // used for signed integral comparisons and fp comparisons
5693 
5694 operand cmpOp()
5695 %{
5696   match(Bool);
5697 
5698   format %{ "" %}
5699   interface(COND_INTER) %{
5700     equal(0x0, "eq");
5701     not_equal(0x1, "ne");
5702     less(0xb, "lt");
5703     greater_equal(0xa, "ge");
5704     less_equal(0xd, "le");
5705     greater(0xc, "gt");
5706     overflow(0x6, "vs");
5707     no_overflow(0x7, "vc");
5708   %}
5709 %}
5710 
5711 // used for unsigned integral comparisons
5712 
5713 operand cmpOpU()
5714 %{
5715   match(Bool);
5716 
5717   format %{ "" %}
5718   interface(COND_INTER) %{
5719     equal(0x0, "eq");
5720     not_equal(0x1, "ne");
5721     less(0x3, "lo");
5722     greater_equal(0x2, "hs");
5723     less_equal(0x9, "ls");
5724     greater(0x8, "hi");
5725     overflow(0x6, "vs");
5726     no_overflow(0x7, "vc");
5727   %}
5728 %}
5729 
5730 // used for certain integral comparisons which can be
5731 // converted to cbxx or tbxx instructions
5732 
5733 operand cmpOpEqNe()
5734 %{
5735   match(Bool);
5736   op_cost(0);
5737   predicate(n->as_Bool()->_test._test == BoolTest::ne
5738             || n->as_Bool()->_test._test == BoolTest::eq);
5739 
5740   format %{ "" %}
5741   interface(COND_INTER) %{
5742     equal(0x0, "eq");
5743     not_equal(0x1, "ne");
5744     less(0xb, "lt");
5745     greater_equal(0xa, "ge");
5746     less_equal(0xd, "le");
5747     greater(0xc, "gt");
5748     overflow(0x6, "vs");
5749     no_overflow(0x7, "vc");
5750   %}
5751 %}
5752 
5753 // used for certain integral comparisons which can be
5754 // converted to cbxx or tbxx instructions
5755 
5756 operand cmpOpLtGe()
5757 %{
5758   match(Bool);
5759   op_cost(0);
5760 
5761   predicate(n->as_Bool()->_test._test == BoolTest::lt
5762             || n->as_Bool()->_test._test == BoolTest::ge);
5763 
5764   format %{ "" %}
5765   interface(COND_INTER) %{
5766     equal(0x0, "eq");
5767     not_equal(0x1, "ne");
5768     less(0xb, "lt");
5769     greater_equal(0xa, "ge");
5770     less_equal(0xd, "le");
5771     greater(0xc, "gt");
5772     overflow(0x6, "vs");
5773     no_overflow(0x7, "vc");
5774   %}
5775 %}
5776 
5777 // used for certain unsigned integral comparisons which can be
5778 // converted to cbxx or tbxx instructions
5779 
5780 operand cmpOpUEqNeLtGe()
5781 %{
5782   match(Bool);
5783   op_cost(0);
5784 
5785   predicate(n->as_Bool()->_test._test == BoolTest::eq
5786             || n->as_Bool()->_test._test == BoolTest::ne
5787             || n->as_Bool()->_test._test == BoolTest::lt
5788             || n->as_Bool()->_test._test == BoolTest::ge);
5789 
5790   format %{ "" %}
5791   interface(COND_INTER) %{
5792     equal(0x0, "eq");
5793     not_equal(0x1, "ne");
5794     less(0xb, "lt");
5795     greater_equal(0xa, "ge");
5796     less_equal(0xd, "le");
5797     greater(0xc, "gt");
5798     overflow(0x6, "vs");
5799     no_overflow(0x7, "vc");
5800   %}
5801 %}
5802 
5803 // Special operand allowing long args to int ops to be truncated for free
5804 
5805 operand iRegL2I(iRegL reg) %{
5806 
5807   op_cost(0);
5808 
5809   match(ConvL2I reg);
5810 
5811   format %{ "l2i($reg)" %}
5812 
5813   interface(REG_INTER)
5814 %}
5815 
5816 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5817 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5818 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5819 
5820 //----------OPERAND CLASSES----------------------------------------------------
5821 // Operand Classes are groups of operands that are used as to simplify
5822 // instruction definitions by not requiring the AD writer to specify
5823 // separate instructions for every form of operand when the
5824 // instruction accepts multiple operand types with the same basic
5825 // encoding and format. The classic case of this is memory operands.
5826 
5827 // memory is used to define read/write location for load/store
5828 // instruction defs. we can turn a memory op into an Address
5829 
5830 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
5831                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5832 
5833 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
5834                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5835 
5836 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
5837                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5838 
5839 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
5840                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5841 
5842 // All of the memory operands. For the pipeline description.
5843 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
5844                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
5845                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5846 
5847 
5848 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5849 // operations. it allows the src to be either an iRegI or a (ConvL2I
5850 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5851 // can be elided because the 32-bit instruction will just employ the
5852 // lower 32 bits anyway.
5853 //
5854 // n.b. this does not elide all L2I conversions. if the truncated
5855 // value is consumed by more than one operation then the ConvL2I
5856 // cannot be bundled into the consuming nodes so an l2i gets planted
5857 // (actually a movw $dst $src) and the downstream instructions consume
5858 // the result of the l2i as an iRegI input. That's a shame since the
5859 // movw is actually redundant but its not too costly.
5860 
5861 opclass iRegIorL2I(iRegI, iRegL2I);
5862 
5863 //----------PIPELINE-----------------------------------------------------------
5864 // Rules which define the behavior of the target architectures pipeline.
5865 
5866 // For specific pipelines, eg A53, define the stages of that pipeline
5867 //pipe_desc(ISS, EX1, EX2, WR);
5868 #define ISS S0
5869 #define EX1 S1
5870 #define EX2 S2
5871 #define WR  S3
5872 
5873 // Integer ALU reg operation
5874 pipeline %{
5875 
5876 attributes %{
5877   // ARM instructions are of fixed length
5878   fixed_size_instructions;        // Fixed size instructions TODO does
5879   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5880   // ARM instructions come in 32-bit word units
5881   instruction_unit_size = 4;         // An instruction is 4 bytes long
5882   instruction_fetch_unit_size = 64;  // The processor fetches one line
5883   instruction_fetch_units = 1;       // of 64 bytes
5884 
5885   // List of nop instructions
5886   nops( MachNop );
5887 %}
5888 
5889 // We don't use an actual pipeline model so don't care about resources
5890 // or description. we do use pipeline classes to introduce fixed
5891 // latencies
5892 
5893 //----------RESOURCES----------------------------------------------------------
5894 // Resources are the functional units available to the machine
5895 
5896 resources( INS0, INS1, INS01 = INS0 | INS1,
5897            ALU0, ALU1, ALU = ALU0 | ALU1,
5898            MAC,
5899            DIV,
5900            BRANCH,
5901            LDST,
5902            NEON_FP);
5903 
5904 //----------PIPELINE DESCRIPTION-----------------------------------------------
5905 // Pipeline Description specifies the stages in the machine's pipeline
5906 
5907 // Define the pipeline as a generic 6 stage pipeline
5908 pipe_desc(S0, S1, S2, S3, S4, S5);
5909 
5910 //----------PIPELINE CLASSES---------------------------------------------------
5911 // Pipeline Classes describe the stages in which input and output are
5912 // referenced by the hardware pipeline.
5913 
5914 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5915 %{
5916   single_instruction;
5917   src1   : S1(read);
5918   src2   : S2(read);
5919   dst    : S5(write);
5920   INS01  : ISS;
5921   NEON_FP : S5;
5922 %}
5923 
5924 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5925 %{
5926   single_instruction;
5927   src1   : S1(read);
5928   src2   : S2(read);
5929   dst    : S5(write);
5930   INS01  : ISS;
5931   NEON_FP : S5;
5932 %}
5933 
5934 pipe_class fp_uop_s(vRegF dst, vRegF src)
5935 %{
5936   single_instruction;
5937   src    : S1(read);
5938   dst    : S5(write);
5939   INS01  : ISS;
5940   NEON_FP : S5;
5941 %}
5942 
5943 pipe_class fp_uop_d(vRegD dst, vRegD src)
5944 %{
5945   single_instruction;
5946   src    : S1(read);
5947   dst    : S5(write);
5948   INS01  : ISS;
5949   NEON_FP : S5;
5950 %}
5951 
5952 pipe_class fp_d2f(vRegF dst, vRegD src)
5953 %{
5954   single_instruction;
5955   src    : S1(read);
5956   dst    : S5(write);
5957   INS01  : ISS;
5958   NEON_FP : S5;
5959 %}
5960 
5961 pipe_class fp_f2d(vRegD dst, vRegF src)
5962 %{
5963   single_instruction;
5964   src    : S1(read);
5965   dst    : S5(write);
5966   INS01  : ISS;
5967   NEON_FP : S5;
5968 %}
5969 
5970 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5971 %{
5972   single_instruction;
5973   src    : S1(read);
5974   dst    : S5(write);
5975   INS01  : ISS;
5976   NEON_FP : S5;
5977 %}
5978 
5979 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5980 %{
5981   single_instruction;
5982   src    : S1(read);
5983   dst    : S5(write);
5984   INS01  : ISS;
5985   NEON_FP : S5;
5986 %}
5987 
5988 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5989 %{
5990   single_instruction;
5991   src    : S1(read);
5992   dst    : S5(write);
5993   INS01  : ISS;
5994   NEON_FP : S5;
5995 %}
5996 
5997 pipe_class fp_l2f(vRegF dst, iRegL src)
5998 %{
5999   single_instruction;
6000   src    : S1(read);
6001   dst    : S5(write);
6002   INS01  : ISS;
6003   NEON_FP : S5;
6004 %}
6005 
6006 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6007 %{
6008   single_instruction;
6009   src    : S1(read);
6010   dst    : S5(write);
6011   INS01  : ISS;
6012   NEON_FP : S5;
6013 %}
6014 
6015 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6016 %{
6017   single_instruction;
6018   src    : S1(read);
6019   dst    : S5(write);
6020   INS01  : ISS;
6021   NEON_FP : S5;
6022 %}
6023 
6024 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6025 %{
6026   single_instruction;
6027   src    : S1(read);
6028   dst    : S5(write);
6029   INS01  : ISS;
6030   NEON_FP : S5;
6031 %}
6032 
6033 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6034 %{
6035   single_instruction;
6036   src    : S1(read);
6037   dst    : S5(write);
6038   INS01  : ISS;
6039   NEON_FP : S5;
6040 %}
6041 
6042 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6043 %{
6044   single_instruction;
6045   src1   : S1(read);
6046   src2   : S2(read);
6047   dst    : S5(write);
6048   INS0   : ISS;
6049   NEON_FP : S5;
6050 %}
6051 
6052 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6053 %{
6054   single_instruction;
6055   src1   : S1(read);
6056   src2   : S2(read);
6057   dst    : S5(write);
6058   INS0   : ISS;
6059   NEON_FP : S5;
6060 %}
6061 
6062 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6063 %{
6064   single_instruction;
6065   cr     : S1(read);
6066   src1   : S1(read);
6067   src2   : S1(read);
6068   dst    : S3(write);
6069   INS01  : ISS;
6070   NEON_FP : S3;
6071 %}
6072 
6073 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6074 %{
6075   single_instruction;
6076   cr     : S1(read);
6077   src1   : S1(read);
6078   src2   : S1(read);
6079   dst    : S3(write);
6080   INS01  : ISS;
6081   NEON_FP : S3;
6082 %}
6083 
6084 pipe_class fp_imm_s(vRegF dst)
6085 %{
6086   single_instruction;
6087   dst    : S3(write);
6088   INS01  : ISS;
6089   NEON_FP : S3;
6090 %}
6091 
6092 pipe_class fp_imm_d(vRegD dst)
6093 %{
6094   single_instruction;
6095   dst    : S3(write);
6096   INS01  : ISS;
6097   NEON_FP : S3;
6098 %}
6099 
6100 pipe_class fp_load_constant_s(vRegF dst)
6101 %{
6102   single_instruction;
6103   dst    : S4(write);
6104   INS01  : ISS;
6105   NEON_FP : S4;
6106 %}
6107 
6108 pipe_class fp_load_constant_d(vRegD dst)
6109 %{
6110   single_instruction;
6111   dst    : S4(write);
6112   INS01  : ISS;
6113   NEON_FP : S4;
6114 %}
6115 
6116 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6117 %{
6118   single_instruction;
6119   dst    : S5(write);
6120   src1   : S1(read);
6121   src2   : S1(read);
6122   INS01  : ISS;
6123   NEON_FP : S5;
6124 %}
6125 
6126 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6127 %{
6128   single_instruction;
6129   dst    : S5(write);
6130   src1   : S1(read);
6131   src2   : S1(read);
6132   INS0   : ISS;
6133   NEON_FP : S5;
6134 %}
6135 
6136 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6137 %{
6138   single_instruction;
6139   dst    : S5(write);
6140   src1   : S1(read);
6141   src2   : S1(read);
6142   dst    : S1(read);
6143   INS01  : ISS;
6144   NEON_FP : S5;
6145 %}
6146 
6147 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6148 %{
6149   single_instruction;
6150   dst    : S5(write);
6151   src1   : S1(read);
6152   src2   : S1(read);
6153   dst    : S1(read);
6154   INS0   : ISS;
6155   NEON_FP : S5;
6156 %}
6157 
6158 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6159 %{
6160   single_instruction;
6161   dst    : S4(write);
6162   src1   : S2(read);
6163   src2   : S2(read);
6164   INS01  : ISS;
6165   NEON_FP : S4;
6166 %}
6167 
6168 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6169 %{
6170   single_instruction;
6171   dst    : S4(write);
6172   src1   : S2(read);
6173   src2   : S2(read);
6174   INS0   : ISS;
6175   NEON_FP : S4;
6176 %}
6177 
6178 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6179 %{
6180   single_instruction;
6181   dst    : S3(write);
6182   src1   : S2(read);
6183   src2   : S2(read);
6184   INS01  : ISS;
6185   NEON_FP : S3;
6186 %}
6187 
6188 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6189 %{
6190   single_instruction;
6191   dst    : S3(write);
6192   src1   : S2(read);
6193   src2   : S2(read);
6194   INS0   : ISS;
6195   NEON_FP : S3;
6196 %}
6197 
6198 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6199 %{
6200   single_instruction;
6201   dst    : S3(write);
6202   src    : S1(read);
6203   shift  : S1(read);
6204   INS01  : ISS;
6205   NEON_FP : S3;
6206 %}
6207 
6208 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6209 %{
6210   single_instruction;
6211   dst    : S3(write);
6212   src    : S1(read);
6213   shift  : S1(read);
6214   INS0   : ISS;
6215   NEON_FP : S3;
6216 %}
6217 
6218 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6219 %{
6220   single_instruction;
6221   dst    : S3(write);
6222   src    : S1(read);
6223   INS01  : ISS;
6224   NEON_FP : S3;
6225 %}
6226 
6227 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6228 %{
6229   single_instruction;
6230   dst    : S3(write);
6231   src    : S1(read);
6232   INS0   : ISS;
6233   NEON_FP : S3;
6234 %}
6235 
6236 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6237 %{
6238   single_instruction;
6239   dst    : S5(write);
6240   src1   : S1(read);
6241   src2   : S1(read);
6242   INS01  : ISS;
6243   NEON_FP : S5;
6244 %}
6245 
6246 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6247 %{
6248   single_instruction;
6249   dst    : S5(write);
6250   src1   : S1(read);
6251   src2   : S1(read);
6252   INS0   : ISS;
6253   NEON_FP : S5;
6254 %}
6255 
6256 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6257 %{
6258   single_instruction;
6259   dst    : S5(write);
6260   src1   : S1(read);
6261   src2   : S1(read);
6262   INS0   : ISS;
6263   NEON_FP : S5;
6264 %}
6265 
6266 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6267 %{
6268   single_instruction;
6269   dst    : S5(write);
6270   src1   : S1(read);
6271   src2   : S1(read);
6272   INS0   : ISS;
6273   NEON_FP : S5;
6274 %}
6275 
6276 pipe_class vsqrt_fp128(vecX dst, vecX src)
6277 %{
6278   single_instruction;
6279   dst    : S5(write);
6280   src    : S1(read);
6281   INS0   : ISS;
6282   NEON_FP : S5;
6283 %}
6284 
6285 pipe_class vunop_fp64(vecD dst, vecD src)
6286 %{
6287   single_instruction;
6288   dst    : S5(write);
6289   src    : S1(read);
6290   INS01  : ISS;
6291   NEON_FP : S5;
6292 %}
6293 
6294 pipe_class vunop_fp128(vecX dst, vecX src)
6295 %{
6296   single_instruction;
6297   dst    : S5(write);
6298   src    : S1(read);
6299   INS0   : ISS;
6300   NEON_FP : S5;
6301 %}
6302 
6303 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6304 %{
6305   single_instruction;
6306   dst    : S3(write);
6307   src    : S1(read);
6308   INS01  : ISS;
6309   NEON_FP : S3;
6310 %}
6311 
6312 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6313 %{
6314   single_instruction;
6315   dst    : S3(write);
6316   src    : S1(read);
6317   INS01  : ISS;
6318   NEON_FP : S3;
6319 %}
6320 
6321 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6322 %{
6323   single_instruction;
6324   dst    : S3(write);
6325   src    : S1(read);
6326   INS01  : ISS;
6327   NEON_FP : S3;
6328 %}
6329 
6330 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6331 %{
6332   single_instruction;
6333   dst    : S3(write);
6334   src    : S1(read);
6335   INS01  : ISS;
6336   NEON_FP : S3;
6337 %}
6338 
6339 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6340 %{
6341   single_instruction;
6342   dst    : S3(write);
6343   src    : S1(read);
6344   INS01  : ISS;
6345   NEON_FP : S3;
6346 %}
6347 
6348 pipe_class vmovi_reg_imm64(vecD dst)
6349 %{
6350   single_instruction;
6351   dst    : S3(write);
6352   INS01  : ISS;
6353   NEON_FP : S3;
6354 %}
6355 
6356 pipe_class vmovi_reg_imm128(vecX dst)
6357 %{
6358   single_instruction;
6359   dst    : S3(write);
6360   INS0   : ISS;
6361   NEON_FP : S3;
6362 %}
6363 
6364 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6365 %{
6366   single_instruction;
6367   dst    : S5(write);
6368   mem    : ISS(read);
6369   INS01  : ISS;
6370   NEON_FP : S3;
6371 %}
6372 
6373 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6374 %{
6375   single_instruction;
6376   dst    : S5(write);
6377   mem    : ISS(read);
6378   INS01  : ISS;
6379   NEON_FP : S3;
6380 %}
6381 
6382 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6383 %{
6384   single_instruction;
6385   mem    : ISS(read);
6386   src    : S2(read);
6387   INS01  : ISS;
6388   NEON_FP : S3;
6389 %}
6390 
6391 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6392 %{
6393   single_instruction;
6394   mem    : ISS(read);
6395   src    : S2(read);
6396   INS01  : ISS;
6397   NEON_FP : S3;
6398 %}
6399 
6400 //------- Integer ALU operations --------------------------
6401 
6402 // Integer ALU reg-reg operation
6403 // Operands needed in EX1, result generated in EX2
6404 // Eg.  ADD     x0, x1, x2
6405 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6406 %{
6407   single_instruction;
6408   dst    : EX2(write);
6409   src1   : EX1(read);
6410   src2   : EX1(read);
6411   INS01  : ISS; // Dual issue as instruction 0 or 1
6412   ALU    : EX2;
6413 %}
6414 
6415 // Integer ALU reg-reg operation with constant shift
6416 // Shifted register must be available in LATE_ISS instead of EX1
6417 // Eg.  ADD     x0, x1, x2, LSL #2
6418 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6419 %{
6420   single_instruction;
6421   dst    : EX2(write);
6422   src1   : EX1(read);
6423   src2   : ISS(read);
6424   INS01  : ISS;
6425   ALU    : EX2;
6426 %}
6427 
6428 // Integer ALU reg operation with constant shift
6429 // Eg.  LSL     x0, x1, #shift
6430 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6431 %{
6432   single_instruction;
6433   dst    : EX2(write);
6434   src1   : ISS(read);
6435   INS01  : ISS;
6436   ALU    : EX2;
6437 %}
6438 
6439 // Integer ALU reg-reg operation with variable shift
6440 // Both operands must be available in LATE_ISS instead of EX1
6441 // Result is available in EX1 instead of EX2
6442 // Eg.  LSLV    x0, x1, x2
6443 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6444 %{
6445   single_instruction;
6446   dst    : EX1(write);
6447   src1   : ISS(read);
6448   src2   : ISS(read);
6449   INS01  : ISS;
6450   ALU    : EX1;
6451 %}
6452 
6453 // Integer ALU reg-reg operation with extract
6454 // As for _vshift above, but result generated in EX2
6455 // Eg.  EXTR    x0, x1, x2, #N
6456 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6457 %{
6458   single_instruction;
6459   dst    : EX2(write);
6460   src1   : ISS(read);
6461   src2   : ISS(read);
6462   INS1   : ISS; // Can only dual issue as Instruction 1
6463   ALU    : EX1;
6464 %}
6465 
6466 // Integer ALU reg operation
6467 // Eg.  NEG     x0, x1
6468 pipe_class ialu_reg(iRegI dst, iRegI src)
6469 %{
6470   single_instruction;
6471   dst    : EX2(write);
6472   src    : EX1(read);
6473   INS01  : ISS;
6474   ALU    : EX2;
6475 %}
6476 
6477 // Integer ALU reg mmediate operation
6478 // Eg.  ADD     x0, x1, #N
6479 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6480 %{
6481   single_instruction;
6482   dst    : EX2(write);
6483   src1   : EX1(read);
6484   INS01  : ISS;
6485   ALU    : EX2;
6486 %}
6487 
6488 // Integer ALU immediate operation (no source operands)
6489 // Eg.  MOV     x0, #N
6490 pipe_class ialu_imm(iRegI dst)
6491 %{
6492   single_instruction;
6493   dst    : EX1(write);
6494   INS01  : ISS;
6495   ALU    : EX1;
6496 %}
6497 
6498 //------- Compare operation -------------------------------
6499 
6500 // Compare reg-reg
6501 // Eg.  CMP     x0, x1
6502 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6503 %{
6504   single_instruction;
6505 //  fixed_latency(16);
6506   cr     : EX2(write);
6507   op1    : EX1(read);
6508   op2    : EX1(read);
6509   INS01  : ISS;
6510   ALU    : EX2;
6511 %}
6512 
6513 // Compare reg-reg
6514 // Eg.  CMP     x0, #N
6515 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6516 %{
6517   single_instruction;
6518 //  fixed_latency(16);
6519   cr     : EX2(write);
6520   op1    : EX1(read);
6521   INS01  : ISS;
6522   ALU    : EX2;
6523 %}
6524 
6525 //------- Conditional instructions ------------------------
6526 
6527 // Conditional no operands
6528 // Eg.  CSINC   x0, zr, zr, <cond>
6529 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6530 %{
6531   single_instruction;
6532   cr     : EX1(read);
6533   dst    : EX2(write);
6534   INS01  : ISS;
6535   ALU    : EX2;
6536 %}
6537 
6538 // Conditional 2 operand
6539 // EG.  CSEL    X0, X1, X2, <cond>
6540 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6541 %{
6542   single_instruction;
6543   cr     : EX1(read);
6544   src1   : EX1(read);
6545   src2   : EX1(read);
6546   dst    : EX2(write);
6547   INS01  : ISS;
6548   ALU    : EX2;
6549 %}
6550 
6551 // Conditional 2 operand
6552 // EG.  CSEL    X0, X1, X2, <cond>
6553 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6554 %{
6555   single_instruction;
6556   cr     : EX1(read);
6557   src    : EX1(read);
6558   dst    : EX2(write);
6559   INS01  : ISS;
6560   ALU    : EX2;
6561 %}
6562 
6563 //------- Multiply pipeline operations --------------------
6564 
6565 // Multiply reg-reg
6566 // Eg.  MUL     w0, w1, w2
6567 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6568 %{
6569   single_instruction;
6570   dst    : WR(write);
6571   src1   : ISS(read);
6572   src2   : ISS(read);
6573   INS01  : ISS;
6574   MAC    : WR;
6575 %}
6576 
6577 // Multiply accumulate
6578 // Eg.  MADD    w0, w1, w2, w3
6579 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6580 %{
6581   single_instruction;
6582   dst    : WR(write);
6583   src1   : ISS(read);
6584   src2   : ISS(read);
6585   src3   : ISS(read);
6586   INS01  : ISS;
6587   MAC    : WR;
6588 %}
6589 
6590 // Eg.  MUL     w0, w1, w2
6591 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6592 %{
6593   single_instruction;
6594   fixed_latency(3); // Maximum latency for 64 bit mul
6595   dst    : WR(write);
6596   src1   : ISS(read);
6597   src2   : ISS(read);
6598   INS01  : ISS;
6599   MAC    : WR;
6600 %}
6601 
6602 // Multiply accumulate
6603 // Eg.  MADD    w0, w1, w2, w3
6604 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6605 %{
6606   single_instruction;
6607   fixed_latency(3); // Maximum latency for 64 bit mul
6608   dst    : WR(write);
6609   src1   : ISS(read);
6610   src2   : ISS(read);
6611   src3   : ISS(read);
6612   INS01  : ISS;
6613   MAC    : WR;
6614 %}
6615 
6616 //------- Divide pipeline operations --------------------
6617 
6618 // Eg.  SDIV    w0, w1, w2
6619 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6620 %{
6621   single_instruction;
6622   fixed_latency(8); // Maximum latency for 32 bit divide
6623   dst    : WR(write);
6624   src1   : ISS(read);
6625   src2   : ISS(read);
6626   INS0   : ISS; // Can only dual issue as instruction 0
6627   DIV    : WR;
6628 %}
6629 
6630 // Eg.  SDIV    x0, x1, x2
6631 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6632 %{
6633   single_instruction;
6634   fixed_latency(16); // Maximum latency for 64 bit divide
6635   dst    : WR(write);
6636   src1   : ISS(read);
6637   src2   : ISS(read);
6638   INS0   : ISS; // Can only dual issue as instruction 0
6639   DIV    : WR;
6640 %}
6641 
6642 //------- Load pipeline operations ------------------------
6643 
6644 // Load - prefetch
6645 // Eg.  PFRM    <mem>
6646 pipe_class iload_prefetch(memory mem)
6647 %{
6648   single_instruction;
6649   mem    : ISS(read);
6650   INS01  : ISS;
6651   LDST   : WR;
6652 %}
6653 
6654 // Load - reg, mem
6655 // Eg.  LDR     x0, <mem>
6656 pipe_class iload_reg_mem(iRegI dst, memory mem)
6657 %{
6658   single_instruction;
6659   dst    : WR(write);
6660   mem    : ISS(read);
6661   INS01  : ISS;
6662   LDST   : WR;
6663 %}
6664 
6665 // Load - reg, reg
6666 // Eg.  LDR     x0, [sp, x1]
6667 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6668 %{
6669   single_instruction;
6670   dst    : WR(write);
6671   src    : ISS(read);
6672   INS01  : ISS;
6673   LDST   : WR;
6674 %}
6675 
6676 //------- Store pipeline operations -----------------------
6677 
6678 // Store - zr, mem
6679 // Eg.  STR     zr, <mem>
6680 pipe_class istore_mem(memory mem)
6681 %{
6682   single_instruction;
6683   mem    : ISS(read);
6684   INS01  : ISS;
6685   LDST   : WR;
6686 %}
6687 
6688 // Store - reg, mem
6689 // Eg.  STR     x0, <mem>
6690 pipe_class istore_reg_mem(iRegI src, memory mem)
6691 %{
6692   single_instruction;
6693   mem    : ISS(read);
6694   src    : EX2(read);
6695   INS01  : ISS;
6696   LDST   : WR;
6697 %}
6698 
6699 // Store - reg, reg
6700 // Eg. STR      x0, [sp, x1]
6701 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6702 %{
6703   single_instruction;
6704   dst    : ISS(read);
6705   src    : EX2(read);
6706   INS01  : ISS;
6707   LDST   : WR;
6708 %}
6709 
6710 //------- Store pipeline operations -----------------------
6711 
6712 // Branch
6713 pipe_class pipe_branch()
6714 %{
6715   single_instruction;
6716   INS01  : ISS;
6717   BRANCH : EX1;
6718 %}
6719 
6720 // Conditional branch
6721 pipe_class pipe_branch_cond(rFlagsReg cr)
6722 %{
6723   single_instruction;
6724   cr     : EX1(read);
6725   INS01  : ISS;
6726   BRANCH : EX1;
6727 %}
6728 
6729 // Compare & Branch
6730 // EG.  CBZ/CBNZ
6731 pipe_class pipe_cmp_branch(iRegI op1)
6732 %{
6733   single_instruction;
6734   op1    : EX1(read);
6735   INS01  : ISS;
6736   BRANCH : EX1;
6737 %}
6738 
6739 //------- Synchronisation operations ----------------------
6740 
6741 // Any operation requiring serialization.
6742 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6743 pipe_class pipe_serial()
6744 %{
6745   single_instruction;
6746   force_serialization;
6747   fixed_latency(16);
6748   INS01  : ISS(2); // Cannot dual issue with any other instruction
6749   LDST   : WR;
6750 %}
6751 
6752 // Generic big/slow expanded idiom - also serialized
6753 pipe_class pipe_slow()
6754 %{
6755   instruction_count(10);
6756   multiple_bundles;
6757   force_serialization;
6758   fixed_latency(16);
6759   INS01  : ISS(2); // Cannot dual issue with any other instruction
6760   LDST   : WR;
6761 %}
6762 
6763 // Empty pipeline class
6764 pipe_class pipe_class_empty()
6765 %{
6766   single_instruction;
6767   fixed_latency(0);
6768 %}
6769 
6770 // Default pipeline class.
6771 pipe_class pipe_class_default()
6772 %{
6773   single_instruction;
6774   fixed_latency(2);
6775 %}
6776 
6777 // Pipeline class for compares.
6778 pipe_class pipe_class_compare()
6779 %{
6780   single_instruction;
6781   fixed_latency(16);
6782 %}
6783 
6784 // Pipeline class for memory operations.
6785 pipe_class pipe_class_memory()
6786 %{
6787   single_instruction;
6788   fixed_latency(16);
6789 %}
6790 
6791 // Pipeline class for call.
6792 pipe_class pipe_class_call()
6793 %{
6794   single_instruction;
6795   fixed_latency(100);
6796 %}
6797 
6798 // Define the class for the Nop node.
6799 define %{
6800    MachNop = pipe_class_empty;
6801 %}
6802 
6803 %}
6804 //----------INSTRUCTIONS-------------------------------------------------------
6805 //
6806 // match      -- States which machine-independent subtree may be replaced
6807 //               by this instruction.
6808 // ins_cost   -- The estimated cost of this instruction is used by instruction
6809 //               selection to identify a minimum cost tree of machine
6810 //               instructions that matches a tree of machine-independent
6811 //               instructions.
6812 // format     -- A string providing the disassembly for this instruction.
6813 //               The value of an instruction's operand may be inserted
6814 //               by referring to it with a '$' prefix.
6815 // opcode     -- Three instruction opcodes may be provided.  These are referred
6816 //               to within an encode class as $primary, $secondary, and $tertiary
6817 //               rrspectively.  The primary opcode is commonly used to
6818 //               indicate the type of machine instruction, while secondary
6819 //               and tertiary are often used for prefix options or addressing
6820 //               modes.
6821 // ins_encode -- A list of encode classes with parameters. The encode class
6822 //               name must have been defined in an 'enc_class' specification
6823 //               in the encode section of the architecture description.
6824 
6825 // ============================================================================
6826 // Memory (Load/Store) Instructions
6827 
6828 // Load Instructions
6829 
6830 // Load Byte (8 bit signed)
6831 instruct loadB(iRegINoSp dst, memory1 mem)
6832 %{
6833   match(Set dst (LoadB mem));
6834   predicate(!needs_acquiring_load(n));
6835 
6836   ins_cost(4 * INSN_COST);
6837   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6838 
6839   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6840 
6841   ins_pipe(iload_reg_mem);
6842 %}
6843 
6844 // Load Byte (8 bit signed) into long
6845 instruct loadB2L(iRegLNoSp dst, memory1 mem)
6846 %{
6847   match(Set dst (ConvI2L (LoadB mem)));
6848   predicate(!needs_acquiring_load(n->in(1)));
6849 
6850   ins_cost(4 * INSN_COST);
6851   format %{ "ldrsb  $dst, $mem\t# byte" %}
6852 
6853   ins_encode(aarch64_enc_ldrsb(dst, mem));
6854 
6855   ins_pipe(iload_reg_mem);
6856 %}
6857 
6858 // Load Byte (8 bit unsigned)
6859 instruct loadUB(iRegINoSp dst, memory1 mem)
6860 %{
6861   match(Set dst (LoadUB mem));
6862   predicate(!needs_acquiring_load(n));
6863 
6864   ins_cost(4 * INSN_COST);
6865   format %{ "ldrbw  $dst, $mem\t# byte" %}
6866 
6867   ins_encode(aarch64_enc_ldrb(dst, mem));
6868 
6869   ins_pipe(iload_reg_mem);
6870 %}
6871 
6872 // Load Byte (8 bit unsigned) into long
6873 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
6874 %{
6875   match(Set dst (ConvI2L (LoadUB mem)));
6876   predicate(!needs_acquiring_load(n->in(1)));
6877 
6878   ins_cost(4 * INSN_COST);
6879   format %{ "ldrb  $dst, $mem\t# byte" %}
6880 
6881   ins_encode(aarch64_enc_ldrb(dst, mem));
6882 
6883   ins_pipe(iload_reg_mem);
6884 %}
6885 
6886 // Load Short (16 bit signed)
6887 instruct loadS(iRegINoSp dst, memory2 mem)
6888 %{
6889   match(Set dst (LoadS mem));
6890   predicate(!needs_acquiring_load(n));
6891 
6892   ins_cost(4 * INSN_COST);
6893   format %{ "ldrshw  $dst, $mem\t# short" %}
6894 
6895   ins_encode(aarch64_enc_ldrshw(dst, mem));
6896 
6897   ins_pipe(iload_reg_mem);
6898 %}
6899 
6900 // Load Short (16 bit signed) into long
6901 instruct loadS2L(iRegLNoSp dst, memory2 mem)
6902 %{
6903   match(Set dst (ConvI2L (LoadS mem)));
6904   predicate(!needs_acquiring_load(n->in(1)));
6905 
6906   ins_cost(4 * INSN_COST);
6907   format %{ "ldrsh  $dst, $mem\t# short" %}
6908 
6909   ins_encode(aarch64_enc_ldrsh(dst, mem));
6910 
6911   ins_pipe(iload_reg_mem);
6912 %}
6913 
6914 // Load Char (16 bit unsigned)
6915 instruct loadUS(iRegINoSp dst, memory2 mem)
6916 %{
6917   match(Set dst (LoadUS mem));
6918   predicate(!needs_acquiring_load(n));
6919 
6920   ins_cost(4 * INSN_COST);
6921   format %{ "ldrh  $dst, $mem\t# short" %}
6922 
6923   ins_encode(aarch64_enc_ldrh(dst, mem));
6924 
6925   ins_pipe(iload_reg_mem);
6926 %}
6927 
6928 // Load Short/Char (16 bit unsigned) into long
6929 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
6930 %{
6931   match(Set dst (ConvI2L (LoadUS mem)));
6932   predicate(!needs_acquiring_load(n->in(1)));
6933 
6934   ins_cost(4 * INSN_COST);
6935   format %{ "ldrh  $dst, $mem\t# short" %}
6936 
6937   ins_encode(aarch64_enc_ldrh(dst, mem));
6938 
6939   ins_pipe(iload_reg_mem);
6940 %}
6941 
6942 // Load Integer (32 bit signed)
6943 instruct loadI(iRegINoSp dst, memory4 mem)
6944 %{
6945   match(Set dst (LoadI mem));
6946   predicate(!needs_acquiring_load(n));
6947 
6948   ins_cost(4 * INSN_COST);
6949   format %{ "ldrw  $dst, $mem\t# int" %}
6950 
6951   ins_encode(aarch64_enc_ldrw(dst, mem));
6952 
6953   ins_pipe(iload_reg_mem);
6954 %}
6955 
6956 // Load Integer (32 bit signed) into long
6957 instruct loadI2L(iRegLNoSp dst, memory4 mem)
6958 %{
6959   match(Set dst (ConvI2L (LoadI mem)));
6960   predicate(!needs_acquiring_load(n->in(1)));
6961 
6962   ins_cost(4 * INSN_COST);
6963   format %{ "ldrsw  $dst, $mem\t# int" %}
6964 
6965   ins_encode(aarch64_enc_ldrsw(dst, mem));
6966 
6967   ins_pipe(iload_reg_mem);
6968 %}
6969 
6970 // Load Integer (32 bit unsigned) into long
6971 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
6972 %{
6973   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6974   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6975 
6976   ins_cost(4 * INSN_COST);
6977   format %{ "ldrw  $dst, $mem\t# int" %}
6978 
6979   ins_encode(aarch64_enc_ldrw(dst, mem));
6980 
6981   ins_pipe(iload_reg_mem);
6982 %}
6983 
6984 // Load Long (64 bit signed)
6985 instruct loadL(iRegLNoSp dst, memory8 mem)
6986 %{
6987   match(Set dst (LoadL mem));
6988   predicate(!needs_acquiring_load(n));
6989 
6990   ins_cost(4 * INSN_COST);
6991   format %{ "ldr  $dst, $mem\t# int" %}
6992 
6993   ins_encode(aarch64_enc_ldr(dst, mem));
6994 
6995   ins_pipe(iload_reg_mem);
6996 %}
6997 
6998 // Load Range
6999 instruct loadRange(iRegINoSp dst, memory4 mem)
7000 %{
7001   match(Set dst (LoadRange mem));
7002 
7003   ins_cost(4 * INSN_COST);
7004   format %{ "ldrw  $dst, $mem\t# range" %}
7005 
7006   ins_encode(aarch64_enc_ldrw(dst, mem));
7007 
7008   ins_pipe(iload_reg_mem);
7009 %}
7010 
7011 // Load Pointer
7012 instruct loadP(iRegPNoSp dst, memory8 mem)
7013 %{
7014   match(Set dst (LoadP mem));
7015   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
7016 
7017   ins_cost(4 * INSN_COST);
7018   format %{ "ldr  $dst, $mem\t# ptr" %}
7019 
7020   ins_encode(aarch64_enc_ldr(dst, mem));
7021 
7022   ins_pipe(iload_reg_mem);
7023 %}
7024 
7025 // Load Compressed Pointer
7026 instruct loadN(iRegNNoSp dst, memory4 mem)
7027 %{
7028   match(Set dst (LoadN mem));
7029   predicate(!needs_acquiring_load(n));
7030 
7031   ins_cost(4 * INSN_COST);
7032   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7033 
7034   ins_encode(aarch64_enc_ldrw(dst, mem));
7035 
7036   ins_pipe(iload_reg_mem);
7037 %}
7038 
7039 // Load Klass Pointer
7040 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7041 %{
7042   match(Set dst (LoadKlass mem));
7043   predicate(!needs_acquiring_load(n));
7044 
7045   ins_cost(4 * INSN_COST);
7046   format %{ "ldr  $dst, $mem\t# class" %}
7047 
7048   ins_encode(aarch64_enc_ldr(dst, mem));
7049 
7050   ins_pipe(iload_reg_mem);
7051 %}
7052 
7053 // Load Narrow Klass Pointer
7054 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7055 %{
7056   match(Set dst (LoadNKlass mem));
7057   predicate(!needs_acquiring_load(n));
7058 
7059   ins_cost(4 * INSN_COST);
7060   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7061 
7062   ins_encode(aarch64_enc_ldrw(dst, mem));
7063 
7064   ins_pipe(iload_reg_mem);
7065 %}
7066 
7067 // Load Float
7068 instruct loadF(vRegF dst, memory4 mem)
7069 %{
7070   match(Set dst (LoadF mem));
7071   predicate(!needs_acquiring_load(n));
7072 
7073   ins_cost(4 * INSN_COST);
7074   format %{ "ldrs  $dst, $mem\t# float" %}
7075 
7076   ins_encode( aarch64_enc_ldrs(dst, mem) );
7077 
7078   ins_pipe(pipe_class_memory);
7079 %}
7080 
7081 // Load Double
7082 instruct loadD(vRegD dst, memory8 mem)
7083 %{
7084   match(Set dst (LoadD mem));
7085   predicate(!needs_acquiring_load(n));
7086 
7087   ins_cost(4 * INSN_COST);
7088   format %{ "ldrd  $dst, $mem\t# double" %}
7089 
7090   ins_encode( aarch64_enc_ldrd(dst, mem) );
7091 
7092   ins_pipe(pipe_class_memory);
7093 %}
7094 
7095 
7096 // Load Int Constant
7097 instruct loadConI(iRegINoSp dst, immI src)
7098 %{
7099   match(Set dst src);
7100 
7101   ins_cost(INSN_COST);
7102   format %{ "mov $dst, $src\t# int" %}
7103 
7104   ins_encode( aarch64_enc_movw_imm(dst, src) );
7105 
7106   ins_pipe(ialu_imm);
7107 %}
7108 
7109 // Load Long Constant
7110 instruct loadConL(iRegLNoSp dst, immL src)
7111 %{
7112   match(Set dst src);
7113 
7114   ins_cost(INSN_COST);
7115   format %{ "mov $dst, $src\t# int64_t" %}
7116 
7117   ins_encode( aarch64_enc_mov_imm(dst, src) );
7118 
7119   ins_pipe(ialu_imm);
7120 %}
7121 
7122 // Load Pointer Constant
7123 
7124 instruct loadConP(iRegPNoSp dst, immP con)
7125 %{
7126   match(Set dst con);
7127 
7128   ins_cost(INSN_COST * 4);
7129   format %{
7130     "mov  $dst, $con\t# ptr\n\t"
7131   %}
7132 
7133   ins_encode(aarch64_enc_mov_p(dst, con));
7134 
7135   ins_pipe(ialu_imm);
7136 %}
7137 
7138 // Load Null Pointer Constant
7139 
7140 instruct loadConP0(iRegPNoSp dst, immP0 con)
7141 %{
7142   match(Set dst con);
7143 
7144   ins_cost(INSN_COST);
7145   format %{ "mov  $dst, $con\t# NULL ptr" %}
7146 
7147   ins_encode(aarch64_enc_mov_p0(dst, con));
7148 
7149   ins_pipe(ialu_imm);
7150 %}
7151 
7152 // Load Pointer Constant One
7153 
7154 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7155 %{
7156   match(Set dst con);
7157 
7158   ins_cost(INSN_COST);
7159   format %{ "mov  $dst, $con\t# NULL ptr" %}
7160 
7161   ins_encode(aarch64_enc_mov_p1(dst, con));
7162 
7163   ins_pipe(ialu_imm);
7164 %}
7165 
7166 // Load Byte Map Base Constant
7167 
7168 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7169 %{
7170   match(Set dst con);
7171 
7172   ins_cost(INSN_COST);
7173   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7174 
7175   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7176 
7177   ins_pipe(ialu_imm);
7178 %}
7179 
7180 // Load Narrow Pointer Constant
7181 
7182 instruct loadConN(iRegNNoSp dst, immN con)
7183 %{
7184   match(Set dst con);
7185 
7186   ins_cost(INSN_COST * 4);
7187   format %{ "mov  $dst, $con\t# compressed ptr" %}
7188 
7189   ins_encode(aarch64_enc_mov_n(dst, con));
7190 
7191   ins_pipe(ialu_imm);
7192 %}
7193 
7194 // Load Narrow Null Pointer Constant
7195 
7196 instruct loadConN0(iRegNNoSp dst, immN0 con)
7197 %{
7198   match(Set dst con);
7199 
7200   ins_cost(INSN_COST);
7201   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7202 
7203   ins_encode(aarch64_enc_mov_n0(dst, con));
7204 
7205   ins_pipe(ialu_imm);
7206 %}
7207 
7208 // Load Narrow Klass Constant
7209 
7210 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7211 %{
7212   match(Set dst con);
7213 
7214   ins_cost(INSN_COST);
7215   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7216 
7217   ins_encode(aarch64_enc_mov_nk(dst, con));
7218 
7219   ins_pipe(ialu_imm);
7220 %}
7221 
7222 // Load Packed Float Constant
7223 
7224 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7225   match(Set dst con);
7226   ins_cost(INSN_COST * 4);
7227   format %{ "fmovs  $dst, $con"%}
7228   ins_encode %{
7229     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7230   %}
7231 
7232   ins_pipe(fp_imm_s);
7233 %}
7234 
7235 // Load Float Constant
7236 
7237 instruct loadConF(vRegF dst, immF con) %{
7238   match(Set dst con);
7239 
7240   ins_cost(INSN_COST * 4);
7241 
7242   format %{
7243     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7244   %}
7245 
7246   ins_encode %{
7247     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7248   %}
7249 
7250   ins_pipe(fp_load_constant_s);
7251 %}
7252 
7253 // Load Packed Double Constant
7254 
7255 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7256   match(Set dst con);
7257   ins_cost(INSN_COST);
7258   format %{ "fmovd  $dst, $con"%}
7259   ins_encode %{
7260     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7261   %}
7262 
7263   ins_pipe(fp_imm_d);
7264 %}
7265 
7266 // Load Double Constant
7267 
7268 instruct loadConD(vRegD dst, immD con) %{
7269   match(Set dst con);
7270 
7271   ins_cost(INSN_COST * 5);
7272   format %{
7273     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7274   %}
7275 
7276   ins_encode %{
7277     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7278   %}
7279 
7280   ins_pipe(fp_load_constant_d);
7281 %}
7282 
7283 // Store Instructions
7284 
7285 // Store CMS card-mark Immediate
7286 instruct storeimmCM0(immI0 zero, memory1 mem)
7287 %{
7288   match(Set mem (StoreCM mem zero));
7289 
7290   ins_cost(INSN_COST);
7291   format %{ "storestore (elided)\n\t"
7292             "strb zr, $mem\t# byte" %}
7293 
7294   ins_encode(aarch64_enc_strb0(mem));
7295 
7296   ins_pipe(istore_mem);
7297 %}
7298 
7299 // Store CMS card-mark Immediate with intervening StoreStore
7300 // needed when using CMS with no conditional card marking
7301 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
7302 %{
7303   match(Set mem (StoreCM mem zero));
7304 
7305   ins_cost(INSN_COST * 2);
7306   format %{ "storestore\n\t"
7307             "dmb ishst"
7308             "\n\tstrb zr, $mem\t# byte" %}
7309 
7310   ins_encode(aarch64_enc_strb0_ordered(mem));
7311 
7312   ins_pipe(istore_mem);
7313 %}
7314 
7315 // Store Byte
7316 instruct storeB(iRegIorL2I src, memory1 mem)
7317 %{
7318   match(Set mem (StoreB mem src));
7319   predicate(!needs_releasing_store(n));
7320 
7321   ins_cost(INSN_COST);
7322   format %{ "strb  $src, $mem\t# byte" %}
7323 
7324   ins_encode(aarch64_enc_strb(src, mem));
7325 
7326   ins_pipe(istore_reg_mem);
7327 %}
7328 
7329 
7330 instruct storeimmB0(immI0 zero, memory1 mem)
7331 %{
7332   match(Set mem (StoreB mem zero));
7333   predicate(!needs_releasing_store(n));
7334 
7335   ins_cost(INSN_COST);
7336   format %{ "strb rscractch2, $mem\t# byte" %}
7337 
7338   ins_encode(aarch64_enc_strb0(mem));
7339 
7340   ins_pipe(istore_mem);
7341 %}
7342 
7343 // Store Char/Short
7344 instruct storeC(iRegIorL2I src, memory2 mem)
7345 %{
7346   match(Set mem (StoreC mem src));
7347   predicate(!needs_releasing_store(n));
7348 
7349   ins_cost(INSN_COST);
7350   format %{ "strh  $src, $mem\t# short" %}
7351 
7352   ins_encode(aarch64_enc_strh(src, mem));
7353 
7354   ins_pipe(istore_reg_mem);
7355 %}
7356 
7357 instruct storeimmC0(immI0 zero, memory2 mem)
7358 %{
7359   match(Set mem (StoreC mem zero));
7360   predicate(!needs_releasing_store(n));
7361 
7362   ins_cost(INSN_COST);
7363   format %{ "strh  zr, $mem\t# short" %}
7364 
7365   ins_encode(aarch64_enc_strh0(mem));
7366 
7367   ins_pipe(istore_mem);
7368 %}
7369 
7370 // Store Integer
7371 
7372 instruct storeI(iRegIorL2I src, memory4 mem)
7373 %{
7374   match(Set mem(StoreI mem src));
7375   predicate(!needs_releasing_store(n));
7376 
7377   ins_cost(INSN_COST);
7378   format %{ "strw  $src, $mem\t# int" %}
7379 
7380   ins_encode(aarch64_enc_strw(src, mem));
7381 
7382   ins_pipe(istore_reg_mem);
7383 %}
7384 
7385 instruct storeimmI0(immI0 zero, memory4 mem)
7386 %{
7387   match(Set mem(StoreI mem zero));
7388   predicate(!needs_releasing_store(n));
7389 
7390   ins_cost(INSN_COST);
7391   format %{ "strw  zr, $mem\t# int" %}
7392 
7393   ins_encode(aarch64_enc_strw0(mem));
7394 
7395   ins_pipe(istore_mem);
7396 %}
7397 
7398 // Store Long (64 bit signed)
7399 instruct storeL(iRegL src, memory8 mem)
7400 %{
7401   match(Set mem (StoreL mem src));
7402   predicate(!needs_releasing_store(n));
7403 
7404   ins_cost(INSN_COST);
7405   format %{ "str  $src, $mem\t# int" %}
7406 
7407   ins_encode(aarch64_enc_str(src, mem));
7408 
7409   ins_pipe(istore_reg_mem);
7410 %}
7411 
7412 // Store Long (64 bit signed)
7413 instruct storeimmL0(immL0 zero, memory8 mem)
7414 %{
7415   match(Set mem (StoreL mem zero));
7416   predicate(!needs_releasing_store(n));
7417 
7418   ins_cost(INSN_COST);
7419   format %{ "str  zr, $mem\t# int" %}
7420 
7421   ins_encode(aarch64_enc_str0(mem));
7422 
7423   ins_pipe(istore_mem);
7424 %}
7425 
7426 // Store Pointer
7427 instruct storeP(iRegP src, memory8 mem)
7428 %{
7429   match(Set mem (StoreP mem src));
7430   predicate(!needs_releasing_store(n));
7431 
7432   ins_cost(INSN_COST);
7433   format %{ "str  $src, $mem\t# ptr" %}
7434 
7435   ins_encode(aarch64_enc_str(src, mem));
7436 
7437   ins_pipe(istore_reg_mem);
7438 %}
7439 
7440 // Store Pointer
7441 instruct storeimmP0(immP0 zero, memory8 mem)
7442 %{
7443   match(Set mem (StoreP mem zero));
7444   predicate(!needs_releasing_store(n));
7445 
7446   ins_cost(INSN_COST);
7447   format %{ "str zr, $mem\t# ptr" %}
7448 
7449   ins_encode(aarch64_enc_str0(mem));
7450 
7451   ins_pipe(istore_mem);
7452 %}
7453 
7454 // Store Compressed Pointer
7455 instruct storeN(iRegN src, memory4 mem)
7456 %{
7457   match(Set mem (StoreN mem src));
7458   predicate(!needs_releasing_store(n));
7459 
7460   ins_cost(INSN_COST);
7461   format %{ "strw  $src, $mem\t# compressed ptr" %}
7462 
7463   ins_encode(aarch64_enc_strw(src, mem));
7464 
7465   ins_pipe(istore_reg_mem);
7466 %}
7467 
7468 instruct storeImmN0(immN0 zero, memory4 mem)
7469 %{
7470   match(Set mem (StoreN mem zero));
7471   predicate(!needs_releasing_store(n));
7472 
7473   ins_cost(INSN_COST);
7474   format %{ "strw  zr, $mem\t# compressed ptr" %}
7475 
7476   ins_encode(aarch64_enc_strw0(mem));
7477 
7478   ins_pipe(istore_mem);
7479 %}
7480 
7481 // Store Float
7482 instruct storeF(vRegF src, memory4 mem)
7483 %{
7484   match(Set mem (StoreF mem src));
7485   predicate(!needs_releasing_store(n));
7486 
7487   ins_cost(INSN_COST);
7488   format %{ "strs  $src, $mem\t# float" %}
7489 
7490   ins_encode( aarch64_enc_strs(src, mem) );
7491 
7492   ins_pipe(pipe_class_memory);
7493 %}
7494 
7495 // TODO
7496 // implement storeImmF0 and storeFImmPacked
7497 
7498 // Store Double
7499 instruct storeD(vRegD src, memory8 mem)
7500 %{
7501   match(Set mem (StoreD mem src));
7502   predicate(!needs_releasing_store(n));
7503 
7504   ins_cost(INSN_COST);
7505   format %{ "strd  $src, $mem\t# double" %}
7506 
7507   ins_encode( aarch64_enc_strd(src, mem) );
7508 
7509   ins_pipe(pipe_class_memory);
7510 %}
7511 
7512 // Store Compressed Klass Pointer
7513 instruct storeNKlass(iRegN src, memory4 mem)
7514 %{
7515   predicate(!needs_releasing_store(n));
7516   match(Set mem (StoreNKlass mem src));
7517 
7518   ins_cost(INSN_COST);
7519   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7520 
7521   ins_encode(aarch64_enc_strw(src, mem));
7522 
7523   ins_pipe(istore_reg_mem);
7524 %}
7525 
7526 // TODO
7527 // implement storeImmD0 and storeDImmPacked
7528 
7529 // prefetch instructions
7530 // Must be safe to execute with invalid address (cannot fault).
7531 
7532 instruct prefetchalloc( memory8 mem ) %{
7533   match(PrefetchAllocation mem);
7534 
7535   ins_cost(INSN_COST);
7536   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7537 
7538   ins_encode( aarch64_enc_prefetchw(mem) );
7539 
7540   ins_pipe(iload_prefetch);
7541 %}
7542 
7543 //  ---------------- volatile loads and stores ----------------
7544 
7545 // Load Byte (8 bit signed)
7546 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7547 %{
7548   match(Set dst (LoadB mem));
7549 
7550   ins_cost(VOLATILE_REF_COST);
7551   format %{ "ldarsb  $dst, $mem\t# byte" %}
7552 
7553   ins_encode(aarch64_enc_ldarsb(dst, mem));
7554 
7555   ins_pipe(pipe_serial);
7556 %}
7557 
7558 // Load Byte (8 bit signed) into long
7559 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7560 %{
7561   match(Set dst (ConvI2L (LoadB mem)));
7562 
7563   ins_cost(VOLATILE_REF_COST);
7564   format %{ "ldarsb  $dst, $mem\t# byte" %}
7565 
7566   ins_encode(aarch64_enc_ldarsb(dst, mem));
7567 
7568   ins_pipe(pipe_serial);
7569 %}
7570 
7571 // Load Byte (8 bit unsigned)
7572 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7573 %{
7574   match(Set dst (LoadUB mem));
7575 
7576   ins_cost(VOLATILE_REF_COST);
7577   format %{ "ldarb  $dst, $mem\t# byte" %}
7578 
7579   ins_encode(aarch64_enc_ldarb(dst, mem));
7580 
7581   ins_pipe(pipe_serial);
7582 %}
7583 
7584 // Load Byte (8 bit unsigned) into long
7585 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7586 %{
7587   match(Set dst (ConvI2L (LoadUB mem)));
7588 
7589   ins_cost(VOLATILE_REF_COST);
7590   format %{ "ldarb  $dst, $mem\t# byte" %}
7591 
7592   ins_encode(aarch64_enc_ldarb(dst, mem));
7593 
7594   ins_pipe(pipe_serial);
7595 %}
7596 
7597 // Load Short (16 bit signed)
7598 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7599 %{
7600   match(Set dst (LoadS mem));
7601 
7602   ins_cost(VOLATILE_REF_COST);
7603   format %{ "ldarshw  $dst, $mem\t# short" %}
7604 
7605   ins_encode(aarch64_enc_ldarshw(dst, mem));
7606 
7607   ins_pipe(pipe_serial);
7608 %}
7609 
7610 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7611 %{
7612   match(Set dst (LoadUS mem));
7613 
7614   ins_cost(VOLATILE_REF_COST);
7615   format %{ "ldarhw  $dst, $mem\t# short" %}
7616 
7617   ins_encode(aarch64_enc_ldarhw(dst, mem));
7618 
7619   ins_pipe(pipe_serial);
7620 %}
7621 
7622 // Load Short/Char (16 bit unsigned) into long
7623 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7624 %{
7625   match(Set dst (ConvI2L (LoadUS mem)));
7626 
7627   ins_cost(VOLATILE_REF_COST);
7628   format %{ "ldarh  $dst, $mem\t# short" %}
7629 
7630   ins_encode(aarch64_enc_ldarh(dst, mem));
7631 
7632   ins_pipe(pipe_serial);
7633 %}
7634 
7635 // Load Short/Char (16 bit signed) into long
7636 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7637 %{
7638   match(Set dst (ConvI2L (LoadS mem)));
7639 
7640   ins_cost(VOLATILE_REF_COST);
7641   format %{ "ldarh  $dst, $mem\t# short" %}
7642 
7643   ins_encode(aarch64_enc_ldarsh(dst, mem));
7644 
7645   ins_pipe(pipe_serial);
7646 %}
7647 
7648 // Load Integer (32 bit signed)
7649 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7650 %{
7651   match(Set dst (LoadI mem));
7652 
7653   ins_cost(VOLATILE_REF_COST);
7654   format %{ "ldarw  $dst, $mem\t# int" %}
7655 
7656   ins_encode(aarch64_enc_ldarw(dst, mem));
7657 
7658   ins_pipe(pipe_serial);
7659 %}
7660 
7661 // Load Integer (32 bit unsigned) into long
7662 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7663 %{
7664   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7665 
7666   ins_cost(VOLATILE_REF_COST);
7667   format %{ "ldarw  $dst, $mem\t# int" %}
7668 
7669   ins_encode(aarch64_enc_ldarw(dst, mem));
7670 
7671   ins_pipe(pipe_serial);
7672 %}
7673 
7674 // Load Long (64 bit signed)
7675 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7676 %{
7677   match(Set dst (LoadL mem));
7678 
7679   ins_cost(VOLATILE_REF_COST);
7680   format %{ "ldar  $dst, $mem\t# int" %}
7681 
7682   ins_encode(aarch64_enc_ldar(dst, mem));
7683 
7684   ins_pipe(pipe_serial);
7685 %}
7686 
7687 // Load Pointer
7688 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7689 %{
7690   match(Set dst (LoadP mem));
7691   predicate(n->as_Load()->barrier_data() == 0);
7692 
7693   ins_cost(VOLATILE_REF_COST);
7694   format %{ "ldar  $dst, $mem\t# ptr" %}
7695 
7696   ins_encode(aarch64_enc_ldar(dst, mem));
7697 
7698   ins_pipe(pipe_serial);
7699 %}
7700 
7701 // Load Compressed Pointer
7702 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7703 %{
7704   match(Set dst (LoadN mem));
7705 
7706   ins_cost(VOLATILE_REF_COST);
7707   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7708 
7709   ins_encode(aarch64_enc_ldarw(dst, mem));
7710 
7711   ins_pipe(pipe_serial);
7712 %}
7713 
7714 // Load Float
7715 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7716 %{
7717   match(Set dst (LoadF mem));
7718 
7719   ins_cost(VOLATILE_REF_COST);
7720   format %{ "ldars  $dst, $mem\t# float" %}
7721 
7722   ins_encode( aarch64_enc_fldars(dst, mem) );
7723 
7724   ins_pipe(pipe_serial);
7725 %}
7726 
7727 // Load Double
7728 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7729 %{
7730   match(Set dst (LoadD mem));
7731 
7732   ins_cost(VOLATILE_REF_COST);
7733   format %{ "ldard  $dst, $mem\t# double" %}
7734 
7735   ins_encode( aarch64_enc_fldard(dst, mem) );
7736 
7737   ins_pipe(pipe_serial);
7738 %}
7739 
7740 // Store Byte
7741 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7742 %{
7743   match(Set mem (StoreB mem src));
7744 
7745   ins_cost(VOLATILE_REF_COST);
7746   format %{ "stlrb  $src, $mem\t# byte" %}
7747 
7748   ins_encode(aarch64_enc_stlrb(src, mem));
7749 
7750   ins_pipe(pipe_class_memory);
7751 %}
7752 
7753 // Store Char/Short
7754 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7755 %{
7756   match(Set mem (StoreC mem src));
7757 
7758   ins_cost(VOLATILE_REF_COST);
7759   format %{ "stlrh  $src, $mem\t# short" %}
7760 
7761   ins_encode(aarch64_enc_stlrh(src, mem));
7762 
7763   ins_pipe(pipe_class_memory);
7764 %}
7765 
7766 // Store Integer
7767 
7768 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7769 %{
7770   match(Set mem(StoreI mem src));
7771 
7772   ins_cost(VOLATILE_REF_COST);
7773   format %{ "stlrw  $src, $mem\t# int" %}
7774 
7775   ins_encode(aarch64_enc_stlrw(src, mem));
7776 
7777   ins_pipe(pipe_class_memory);
7778 %}
7779 
7780 // Store Long (64 bit signed)
7781 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7782 %{
7783   match(Set mem (StoreL mem src));
7784 
7785   ins_cost(VOLATILE_REF_COST);
7786   format %{ "stlr  $src, $mem\t# int" %}
7787 
7788   ins_encode(aarch64_enc_stlr(src, mem));
7789 
7790   ins_pipe(pipe_class_memory);
7791 %}
7792 
7793 // Store Pointer
7794 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7795 %{
7796   match(Set mem (StoreP mem src));
7797 
7798   ins_cost(VOLATILE_REF_COST);
7799   format %{ "stlr  $src, $mem\t# ptr" %}
7800 
7801   ins_encode(aarch64_enc_stlr(src, mem));
7802 
7803   ins_pipe(pipe_class_memory);
7804 %}
7805 
7806 // Store Compressed Pointer
7807 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7808 %{
7809   match(Set mem (StoreN mem src));
7810 
7811   ins_cost(VOLATILE_REF_COST);
7812   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7813 
7814   ins_encode(aarch64_enc_stlrw(src, mem));
7815 
7816   ins_pipe(pipe_class_memory);
7817 %}
7818 
7819 // Store Float
7820 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7821 %{
7822   match(Set mem (StoreF mem src));
7823 
7824   ins_cost(VOLATILE_REF_COST);
7825   format %{ "stlrs  $src, $mem\t# float" %}
7826 
7827   ins_encode( aarch64_enc_fstlrs(src, mem) );
7828 
7829   ins_pipe(pipe_class_memory);
7830 %}
7831 
7832 // TODO
7833 // implement storeImmF0 and storeFImmPacked
7834 
7835 // Store Double
7836 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7837 %{
7838   match(Set mem (StoreD mem src));
7839 
7840   ins_cost(VOLATILE_REF_COST);
7841   format %{ "stlrd  $src, $mem\t# double" %}
7842 
7843   ins_encode( aarch64_enc_fstlrd(src, mem) );
7844 
7845   ins_pipe(pipe_class_memory);
7846 %}
7847 
7848 //  ---------------- end of volatile loads and stores ----------------
7849 
7850 instruct cacheWB(indirect addr)
7851 %{
7852   predicate(VM_Version::supports_data_cache_line_flush());
7853   match(CacheWB addr);
7854 
7855   ins_cost(100);
7856   format %{"cache wb $addr" %}
7857   ins_encode %{
7858     assert($addr->index_position() < 0, "should be");
7859     assert($addr$$disp == 0, "should be");
7860     __ cache_wb(Address($addr$$base$$Register, 0));
7861   %}
7862   ins_pipe(pipe_slow); // XXX
7863 %}
7864 
7865 instruct cacheWBPreSync()
7866 %{
7867   predicate(VM_Version::supports_data_cache_line_flush());
7868   match(CacheWBPreSync);
7869 
7870   ins_cost(100);
7871   format %{"cache wb presync" %}
7872   ins_encode %{
7873     __ cache_wbsync(true);
7874   %}
7875   ins_pipe(pipe_slow); // XXX
7876 %}
7877 
7878 instruct cacheWBPostSync()
7879 %{
7880   predicate(VM_Version::supports_data_cache_line_flush());
7881   match(CacheWBPostSync);
7882 
7883   ins_cost(100);
7884   format %{"cache wb postsync" %}
7885   ins_encode %{
7886     __ cache_wbsync(false);
7887   %}
7888   ins_pipe(pipe_slow); // XXX
7889 %}
7890 
7891 // ============================================================================
7892 // BSWAP Instructions
7893 
7894 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7895   match(Set dst (ReverseBytesI src));
7896 
7897   ins_cost(INSN_COST);
7898   format %{ "revw  $dst, $src" %}
7899 
7900   ins_encode %{
7901     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7902   %}
7903 
7904   ins_pipe(ialu_reg);
7905 %}
7906 
7907 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7908   match(Set dst (ReverseBytesL src));
7909 
7910   ins_cost(INSN_COST);
7911   format %{ "rev  $dst, $src" %}
7912 
7913   ins_encode %{
7914     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7915   %}
7916 
7917   ins_pipe(ialu_reg);
7918 %}
7919 
7920 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7921   match(Set dst (ReverseBytesUS src));
7922 
7923   ins_cost(INSN_COST);
7924   format %{ "rev16w  $dst, $src" %}
7925 
7926   ins_encode %{
7927     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7928   %}
7929 
7930   ins_pipe(ialu_reg);
7931 %}
7932 
7933 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7934   match(Set dst (ReverseBytesS src));
7935 
7936   ins_cost(INSN_COST);
7937   format %{ "rev16w  $dst, $src\n\t"
7938             "sbfmw $dst, $dst, #0, #15" %}
7939 
7940   ins_encode %{
7941     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7942     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7943   %}
7944 
7945   ins_pipe(ialu_reg);
7946 %}
7947 
7948 // ============================================================================
7949 // Zero Count Instructions
7950 
7951 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7952   match(Set dst (CountLeadingZerosI src));
7953 
7954   ins_cost(INSN_COST);
7955   format %{ "clzw  $dst, $src" %}
7956   ins_encode %{
7957     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7958   %}
7959 
7960   ins_pipe(ialu_reg);
7961 %}
7962 
7963 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7964   match(Set dst (CountLeadingZerosL src));
7965 
7966   ins_cost(INSN_COST);
7967   format %{ "clz   $dst, $src" %}
7968   ins_encode %{
7969     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7970   %}
7971 
7972   ins_pipe(ialu_reg);
7973 %}
7974 
7975 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7976   match(Set dst (CountTrailingZerosI src));
7977 
7978   ins_cost(INSN_COST * 2);
7979   format %{ "rbitw  $dst, $src\n\t"
7980             "clzw   $dst, $dst" %}
7981   ins_encode %{
7982     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7983     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7984   %}
7985 
7986   ins_pipe(ialu_reg);
7987 %}
7988 
7989 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7990   match(Set dst (CountTrailingZerosL src));
7991 
7992   ins_cost(INSN_COST * 2);
7993   format %{ "rbit   $dst, $src\n\t"
7994             "clz    $dst, $dst" %}
7995   ins_encode %{
7996     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7997     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7998   %}
7999 
8000   ins_pipe(ialu_reg);
8001 %}
8002 
8003 //---------- Population Count Instructions -------------------------------------
8004 //
8005 
8006 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8007   predicate(UsePopCountInstruction);
8008   match(Set dst (PopCountI src));
8009   effect(TEMP tmp);
8010   ins_cost(INSN_COST * 13);
8011 
8012   format %{ "movw   $src, $src\n\t"
8013             "mov    $tmp, $src\t# vector (1D)\n\t"
8014             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8015             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8016             "mov    $dst, $tmp\t# vector (1D)" %}
8017   ins_encode %{
8018     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8019     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8020     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8021     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8022     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8023   %}
8024 
8025   ins_pipe(pipe_class_default);
8026 %}
8027 
8028 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
8029   predicate(UsePopCountInstruction);
8030   match(Set dst (PopCountI (LoadI mem)));
8031   effect(TEMP tmp);
8032   ins_cost(INSN_COST * 13);
8033 
8034   format %{ "ldrs   $tmp, $mem\n\t"
8035             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8036             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8037             "mov    $dst, $tmp\t# vector (1D)" %}
8038   ins_encode %{
8039     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8040     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8041               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
8042     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8043     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8044     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8045   %}
8046 
8047   ins_pipe(pipe_class_default);
8048 %}
8049 
8050 // Note: Long.bitCount(long) returns an int.
8051 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8052   predicate(UsePopCountInstruction);
8053   match(Set dst (PopCountL src));
8054   effect(TEMP tmp);
8055   ins_cost(INSN_COST * 13);
8056 
8057   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8058             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8059             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8060             "mov    $dst, $tmp\t# vector (1D)" %}
8061   ins_encode %{
8062     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8063     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8064     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8065     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8066   %}
8067 
8068   ins_pipe(pipe_class_default);
8069 %}
8070 
8071 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
8072   predicate(UsePopCountInstruction);
8073   match(Set dst (PopCountL (LoadL mem)));
8074   effect(TEMP tmp);
8075   ins_cost(INSN_COST * 13);
8076 
8077   format %{ "ldrd   $tmp, $mem\n\t"
8078             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8079             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8080             "mov    $dst, $tmp\t# vector (1D)" %}
8081   ins_encode %{
8082     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8083     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8084               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
8085     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8086     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8087     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8088   %}
8089 
8090   ins_pipe(pipe_class_default);
8091 %}
8092 
8093 // ============================================================================
8094 // MemBar Instruction
8095 
8096 instruct load_fence() %{
8097   match(LoadFence);
8098   ins_cost(VOLATILE_REF_COST);
8099 
8100   format %{ "load_fence" %}
8101 
8102   ins_encode %{
8103     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8104   %}
8105   ins_pipe(pipe_serial);
8106 %}
8107 
8108 instruct unnecessary_membar_acquire() %{
8109   predicate(unnecessary_acquire(n));
8110   match(MemBarAcquire);
8111   ins_cost(0);
8112 
8113   format %{ "membar_acquire (elided)" %}
8114 
8115   ins_encode %{
8116     __ block_comment("membar_acquire (elided)");
8117   %}
8118 
8119   ins_pipe(pipe_class_empty);
8120 %}
8121 
8122 instruct membar_acquire() %{
8123   match(MemBarAcquire);
8124   ins_cost(VOLATILE_REF_COST);
8125 
8126   format %{ "membar_acquire\n\t"
8127             "dmb ish" %}
8128 
8129   ins_encode %{
8130     __ block_comment("membar_acquire");
8131     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8132   %}
8133 
8134   ins_pipe(pipe_serial);
8135 %}
8136 
8137 
8138 instruct membar_acquire_lock() %{
8139   match(MemBarAcquireLock);
8140   ins_cost(VOLATILE_REF_COST);
8141 
8142   format %{ "membar_acquire_lock (elided)" %}
8143 
8144   ins_encode %{
8145     __ block_comment("membar_acquire_lock (elided)");
8146   %}
8147 
8148   ins_pipe(pipe_serial);
8149 %}
8150 
8151 instruct store_fence() %{
8152   match(StoreFence);
8153   ins_cost(VOLATILE_REF_COST);
8154 
8155   format %{ "store_fence" %}
8156 
8157   ins_encode %{
8158     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8159   %}
8160   ins_pipe(pipe_serial);
8161 %}
8162 
8163 instruct unnecessary_membar_release() %{
8164   predicate(unnecessary_release(n));
8165   match(MemBarRelease);
8166   ins_cost(0);
8167 
8168   format %{ "membar_release (elided)" %}
8169 
8170   ins_encode %{
8171     __ block_comment("membar_release (elided)");
8172   %}
8173   ins_pipe(pipe_serial);
8174 %}
8175 
8176 instruct membar_release() %{
8177   match(MemBarRelease);
8178   ins_cost(VOLATILE_REF_COST);
8179 
8180   format %{ "membar_release\n\t"
8181             "dmb ish" %}
8182 
8183   ins_encode %{
8184     __ block_comment("membar_release");
8185     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8186   %}
8187   ins_pipe(pipe_serial);
8188 %}
8189 
8190 instruct membar_storestore() %{
8191   match(MemBarStoreStore);
8192   ins_cost(VOLATILE_REF_COST);
8193 
8194   format %{ "MEMBAR-store-store" %}
8195 
8196   ins_encode %{
8197     __ membar(Assembler::StoreStore);
8198   %}
8199   ins_pipe(pipe_serial);
8200 %}
8201 
8202 instruct membar_release_lock() %{
8203   match(MemBarReleaseLock);
8204   ins_cost(VOLATILE_REF_COST);
8205 
8206   format %{ "membar_release_lock (elided)" %}
8207 
8208   ins_encode %{
8209     __ block_comment("membar_release_lock (elided)");
8210   %}
8211 
8212   ins_pipe(pipe_serial);
8213 %}
8214 
8215 instruct unnecessary_membar_volatile() %{
8216   predicate(unnecessary_volatile(n));
8217   match(MemBarVolatile);
8218   ins_cost(0);
8219 
8220   format %{ "membar_volatile (elided)" %}
8221 
8222   ins_encode %{
8223     __ block_comment("membar_volatile (elided)");
8224   %}
8225 
8226   ins_pipe(pipe_serial);
8227 %}
8228 
8229 instruct membar_volatile() %{
8230   match(MemBarVolatile);
8231   ins_cost(VOLATILE_REF_COST*100);
8232 
8233   format %{ "membar_volatile\n\t"
8234              "dmb ish"%}
8235 
8236   ins_encode %{
8237     __ block_comment("membar_volatile");
8238     __ membar(Assembler::StoreLoad);
8239   %}
8240 
8241   ins_pipe(pipe_serial);
8242 %}
8243 
8244 // ============================================================================
8245 // Cast/Convert Instructions
8246 
8247 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8248   match(Set dst (CastX2P src));
8249 
8250   ins_cost(INSN_COST);
8251   format %{ "mov $dst, $src\t# int64_t -> ptr" %}
8252 
8253   ins_encode %{
8254     if ($dst$$reg != $src$$reg) {
8255       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8256     }
8257   %}
8258 
8259   ins_pipe(ialu_reg);
8260 %}
8261 
8262 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8263   match(Set dst (CastP2X src));
8264 
8265   ins_cost(INSN_COST);
8266   format %{ "mov $dst, $src\t# ptr -> int64_t" %}
8267 
8268   ins_encode %{
8269     if ($dst$$reg != $src$$reg) {
8270       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8271     }
8272   %}
8273 
8274   ins_pipe(ialu_reg);
8275 %}
8276 
8277 // Convert oop into int for vectors alignment masking
8278 instruct convP2I(iRegINoSp dst, iRegP src) %{
8279   match(Set dst (ConvL2I (CastP2X src)));
8280 
8281   ins_cost(INSN_COST);
8282   format %{ "movw $dst, $src\t# ptr -> int" %}
8283   ins_encode %{
8284     __ movw($dst$$Register, $src$$Register);
8285   %}
8286 
8287   ins_pipe(ialu_reg);
8288 %}
8289 
8290 // Convert compressed oop into int for vectors alignment masking
8291 // in case of 32bit oops (heap < 4Gb).
8292 instruct convN2I(iRegINoSp dst, iRegN src)
8293 %{
8294   predicate(CompressedOops::shift() == 0);
8295   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8296 
8297   ins_cost(INSN_COST);
8298   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8299   ins_encode %{
8300     __ movw($dst$$Register, $src$$Register);
8301   %}
8302 
8303   ins_pipe(ialu_reg);
8304 %}
8305 
8306 
8307 // Convert oop pointer into compressed form
8308 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8309   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8310   match(Set dst (EncodeP src));
8311   effect(KILL cr);
8312   ins_cost(INSN_COST * 3);
8313   format %{ "encode_heap_oop $dst, $src" %}
8314   ins_encode %{
8315     Register s = $src$$Register;
8316     Register d = $dst$$Register;
8317     __ encode_heap_oop(d, s);
8318   %}
8319   ins_pipe(ialu_reg);
8320 %}
8321 
8322 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8323   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8324   match(Set dst (EncodeP src));
8325   ins_cost(INSN_COST * 3);
8326   format %{ "encode_heap_oop_not_null $dst, $src" %}
8327   ins_encode %{
8328     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8329   %}
8330   ins_pipe(ialu_reg);
8331 %}
8332 
8333 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8334   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8335             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8336   match(Set dst (DecodeN src));
8337   ins_cost(INSN_COST * 3);
8338   format %{ "decode_heap_oop $dst, $src" %}
8339   ins_encode %{
8340     Register s = $src$$Register;
8341     Register d = $dst$$Register;
8342     __ decode_heap_oop(d, s);
8343   %}
8344   ins_pipe(ialu_reg);
8345 %}
8346 
8347 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8348   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8349             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8350   match(Set dst (DecodeN src));
8351   ins_cost(INSN_COST * 3);
8352   format %{ "decode_heap_oop_not_null $dst, $src" %}
8353   ins_encode %{
8354     Register s = $src$$Register;
8355     Register d = $dst$$Register;
8356     __ decode_heap_oop_not_null(d, s);
8357   %}
8358   ins_pipe(ialu_reg);
8359 %}
8360 
8361 // n.b. AArch64 implementations of encode_klass_not_null and
8362 // decode_klass_not_null do not modify the flags register so, unlike
8363 // Intel, we don't kill CR as a side effect here
8364 
8365 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8366   match(Set dst (EncodePKlass src));
8367 
8368   ins_cost(INSN_COST * 3);
8369   format %{ "encode_klass_not_null $dst,$src" %}
8370 
8371   ins_encode %{
8372     Register src_reg = as_Register($src$$reg);
8373     Register dst_reg = as_Register($dst$$reg);
8374     __ encode_klass_not_null(dst_reg, src_reg);
8375   %}
8376 
8377    ins_pipe(ialu_reg);
8378 %}
8379 
8380 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8381   match(Set dst (DecodeNKlass src));
8382 
8383   ins_cost(INSN_COST * 3);
8384   format %{ "decode_klass_not_null $dst,$src" %}
8385 
8386   ins_encode %{
8387     Register src_reg = as_Register($src$$reg);
8388     Register dst_reg = as_Register($dst$$reg);
8389     if (dst_reg != src_reg) {
8390       __ decode_klass_not_null(dst_reg, src_reg);
8391     } else {
8392       __ decode_klass_not_null(dst_reg);
8393     }
8394   %}
8395 
8396    ins_pipe(ialu_reg);
8397 %}
8398 
8399 instruct checkCastPP(iRegPNoSp dst)
8400 %{
8401   match(Set dst (CheckCastPP dst));
8402 
8403   size(0);
8404   format %{ "# checkcastPP of $dst" %}
8405   ins_encode(/* empty encoding */);
8406   ins_pipe(pipe_class_empty);
8407 %}
8408 
8409 instruct castPP(iRegPNoSp dst)
8410 %{
8411   match(Set dst (CastPP dst));
8412 
8413   size(0);
8414   format %{ "# castPP of $dst" %}
8415   ins_encode(/* empty encoding */);
8416   ins_pipe(pipe_class_empty);
8417 %}
8418 
8419 instruct castII(iRegI dst)
8420 %{
8421   match(Set dst (CastII dst));
8422 
8423   size(0);
8424   format %{ "# castII of $dst" %}
8425   ins_encode(/* empty encoding */);
8426   ins_cost(0);
8427   ins_pipe(pipe_class_empty);
8428 %}
8429 
8430 // ============================================================================
8431 // Atomic operation instructions
8432 //
8433 // Intel and SPARC both implement Ideal Node LoadPLocked and
8434 // Store{PIL}Conditional instructions using a normal load for the
8435 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8436 //
8437 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8438 // pair to lock object allocations from Eden space when not using
8439 // TLABs.
8440 //
8441 // There does not appear to be a Load{IL}Locked Ideal Node and the
8442 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8443 // and to use StoreIConditional only for 32-bit and StoreLConditional
8444 // only for 64-bit.
8445 //
8446 // We implement LoadPLocked and StorePLocked instructions using,
8447 // respectively the AArch64 hw load-exclusive and store-conditional
8448 // instructions. Whereas we must implement each of
8449 // Store{IL}Conditional using a CAS which employs a pair of
8450 // instructions comprising a load-exclusive followed by a
8451 // store-conditional.
8452 
8453 
8454 // Locked-load (linked load) of the current heap-top
8455 // used when updating the eden heap top
8456 // implemented using ldaxr on AArch64
8457 
8458 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8459 %{
8460   match(Set dst (LoadPLocked mem));
8461 
8462   ins_cost(VOLATILE_REF_COST);
8463 
8464   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8465 
8466   ins_encode(aarch64_enc_ldaxr(dst, mem));
8467 
8468   ins_pipe(pipe_serial);
8469 %}
8470 
8471 // Conditional-store of the updated heap-top.
8472 // Used during allocation of the shared heap.
8473 // Sets flag (EQ) on success.
8474 // implemented using stlxr on AArch64.
8475 
8476 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8477 %{
8478   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8479 
8480   ins_cost(VOLATILE_REF_COST);
8481 
8482  // TODO
8483  // do we need to do a store-conditional release or can we just use a
8484  // plain store-conditional?
8485 
8486   format %{
8487     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8488     "cmpw rscratch1, zr\t# EQ on successful write"
8489   %}
8490 
8491   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8492 
8493   ins_pipe(pipe_serial);
8494 %}
8495 
8496 
8497 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8498 // when attempting to rebias a lock towards the current thread.  We
8499 // must use the acquire form of cmpxchg in order to guarantee acquire
8500 // semantics in this case.
8501 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8502 %{
8503   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8504 
8505   ins_cost(VOLATILE_REF_COST);
8506 
8507   format %{
8508     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8509     "cmpw rscratch1, zr\t# EQ on successful write"
8510   %}
8511 
8512   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8513 
8514   ins_pipe(pipe_slow);
8515 %}
8516 
8517 // storeIConditional also has acquire semantics, for no better reason
8518 // than matching storeLConditional.  At the time of writing this
8519 // comment storeIConditional was not used anywhere by AArch64.
8520 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8521 %{
8522   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8523 
8524   ins_cost(VOLATILE_REF_COST);
8525 
8526   format %{
8527     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8528     "cmpw rscratch1, zr\t# EQ on successful write"
8529   %}
8530 
8531   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8532 
8533   ins_pipe(pipe_slow);
8534 %}
8535 
8536 // standard CompareAndSwapX when we are using barriers
8537 // these have higher priority than the rules selected by a predicate
8538 
8539 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8540 // can't match them
8541 
8542 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8543 
8544   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8545   ins_cost(2 * VOLATILE_REF_COST);
8546 
8547   effect(KILL cr);
8548 
8549   format %{
8550     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8551     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8552   %}
8553 
8554   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8555             aarch64_enc_cset_eq(res));
8556 
8557   ins_pipe(pipe_slow);
8558 %}
8559 
8560 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8561 
8562   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8563   ins_cost(2 * VOLATILE_REF_COST);
8564 
8565   effect(KILL cr);
8566 
8567   format %{
8568     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8569     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8570   %}
8571 
8572   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8573             aarch64_enc_cset_eq(res));
8574 
8575   ins_pipe(pipe_slow);
8576 %}
8577 
8578 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8579 
8580   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8581   ins_cost(2 * VOLATILE_REF_COST);
8582 
8583   effect(KILL cr);
8584 
8585  format %{
8586     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8587     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8588  %}
8589 
8590  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8591             aarch64_enc_cset_eq(res));
8592 
8593   ins_pipe(pipe_slow);
8594 %}
8595 
8596 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8597 
8598   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8599   ins_cost(2 * VOLATILE_REF_COST);
8600 
8601   effect(KILL cr);
8602 
8603  format %{
8604     "cmpxchg $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval"
8605     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8606  %}
8607 
8608  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8609             aarch64_enc_cset_eq(res));
8610 
8611   ins_pipe(pipe_slow);
8612 %}
8613 
8614 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8615 
8616   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8617   predicate(n->as_LoadStore()->barrier_data() == 0);
8618   ins_cost(2 * VOLATILE_REF_COST);
8619 
8620   effect(KILL cr);
8621 
8622  format %{
8623     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8624     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8625  %}
8626 
8627  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8628             aarch64_enc_cset_eq(res));
8629 
8630   ins_pipe(pipe_slow);
8631 %}
8632 
8633 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8634 
8635   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8636   ins_cost(2 * VOLATILE_REF_COST);
8637 
8638   effect(KILL cr);
8639 
8640  format %{
8641     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8642     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8643  %}
8644 
8645  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8646             aarch64_enc_cset_eq(res));
8647 
8648   ins_pipe(pipe_slow);
8649 %}
8650 
8651 // alternative CompareAndSwapX when we are eliding barriers
8652 
8653 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8654 
8655   predicate(needs_acquiring_load_exclusive(n));
8656   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8657   ins_cost(VOLATILE_REF_COST);
8658 
8659   effect(KILL cr);
8660 
8661   format %{
8662     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8663     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8664   %}
8665 
8666   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8667             aarch64_enc_cset_eq(res));
8668 
8669   ins_pipe(pipe_slow);
8670 %}
8671 
8672 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8673 
8674   predicate(needs_acquiring_load_exclusive(n));
8675   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8676   ins_cost(VOLATILE_REF_COST);
8677 
8678   effect(KILL cr);
8679 
8680   format %{
8681     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8682     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8683   %}
8684 
8685   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8686             aarch64_enc_cset_eq(res));
8687 
8688   ins_pipe(pipe_slow);
8689 %}
8690 
8691 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8692 
8693   predicate(needs_acquiring_load_exclusive(n));
8694   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8695   ins_cost(VOLATILE_REF_COST);
8696 
8697   effect(KILL cr);
8698 
8699  format %{
8700     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8701     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8702  %}
8703 
8704  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8705             aarch64_enc_cset_eq(res));
8706 
8707   ins_pipe(pipe_slow);
8708 %}
8709 
8710 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8711 
8712   predicate(needs_acquiring_load_exclusive(n));
8713   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8714   ins_cost(VOLATILE_REF_COST);
8715 
8716   effect(KILL cr);
8717 
8718  format %{
8719     "cmpxchg_acq $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval"
8720     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8721  %}
8722 
8723  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8724             aarch64_enc_cset_eq(res));
8725 
8726   ins_pipe(pipe_slow);
8727 %}
8728 
8729 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8730 
8731   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8732   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8733   ins_cost(VOLATILE_REF_COST);
8734 
8735   effect(KILL cr);
8736 
8737  format %{
8738     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8739     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8740  %}
8741 
8742  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8743             aarch64_enc_cset_eq(res));
8744 
8745   ins_pipe(pipe_slow);
8746 %}
8747 
8748 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8749 
8750   predicate(needs_acquiring_load_exclusive(n));
8751   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8752   ins_cost(VOLATILE_REF_COST);
8753 
8754   effect(KILL cr);
8755 
8756  format %{
8757     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8758     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8759  %}
8760 
8761  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8762             aarch64_enc_cset_eq(res));
8763 
8764   ins_pipe(pipe_slow);
8765 %}
8766 
8767 
8768 // ---------------------------------------------------------------------
8769 
8770 
8771 // BEGIN This section of the file is automatically generated. Do not edit --------------
8772 
8773 // Sundry CAS operations.  Note that release is always true,
8774 // regardless of the memory ordering of the CAS.  This is because we
8775 // need the volatile case to be sequentially consistent but there is
8776 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8777 // can't check the type of memory ordering here, so we always emit a
8778 // STLXR.
8779 
8780 // This section is generated from aarch64_ad_cas.m4
8781 
8782 
8783 
8784 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8785   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8786   ins_cost(2 * VOLATILE_REF_COST);
8787   effect(TEMP_DEF res, KILL cr);
8788   format %{
8789     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8790   %}
8791   ins_encode %{
8792     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8793                Assembler::byte, /*acquire*/ false, /*release*/ true,
8794                /*weak*/ false, $res$$Register);
8795     __ sxtbw($res$$Register, $res$$Register);
8796   %}
8797   ins_pipe(pipe_slow);
8798 %}
8799 
8800 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8801   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8802   ins_cost(2 * VOLATILE_REF_COST);
8803   effect(TEMP_DEF res, KILL cr);
8804   format %{
8805     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8806   %}
8807   ins_encode %{
8808     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8809                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8810                /*weak*/ false, $res$$Register);
8811     __ sxthw($res$$Register, $res$$Register);
8812   %}
8813   ins_pipe(pipe_slow);
8814 %}
8815 
8816 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8817   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8818   ins_cost(2 * VOLATILE_REF_COST);
8819   effect(TEMP_DEF res, KILL cr);
8820   format %{
8821     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8822   %}
8823   ins_encode %{
8824     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8825                Assembler::word, /*acquire*/ false, /*release*/ true,
8826                /*weak*/ false, $res$$Register);
8827   %}
8828   ins_pipe(pipe_slow);
8829 %}
8830 
8831 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8832   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8833   ins_cost(2 * VOLATILE_REF_COST);
8834   effect(TEMP_DEF res, KILL cr);
8835   format %{
8836     "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
8837   %}
8838   ins_encode %{
8839     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8840                Assembler::xword, /*acquire*/ false, /*release*/ true,
8841                /*weak*/ false, $res$$Register);
8842   %}
8843   ins_pipe(pipe_slow);
8844 %}
8845 
8846 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8847   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8848   ins_cost(2 * VOLATILE_REF_COST);
8849   effect(TEMP_DEF res, KILL cr);
8850   format %{
8851     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8852   %}
8853   ins_encode %{
8854     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8855                Assembler::word, /*acquire*/ false, /*release*/ true,
8856                /*weak*/ false, $res$$Register);
8857   %}
8858   ins_pipe(pipe_slow);
8859 %}
8860 
8861 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8862   predicate(n->as_LoadStore()->barrier_data() == 0);
8863   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8864   ins_cost(2 * VOLATILE_REF_COST);
8865   effect(TEMP_DEF res, KILL cr);
8866   format %{
8867     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8868   %}
8869   ins_encode %{
8870     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8871                Assembler::xword, /*acquire*/ false, /*release*/ true,
8872                /*weak*/ false, $res$$Register);
8873   %}
8874   ins_pipe(pipe_slow);
8875 %}
8876 
8877 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8878   predicate(needs_acquiring_load_exclusive(n));
8879   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8880   ins_cost(VOLATILE_REF_COST);
8881   effect(TEMP_DEF res, KILL cr);
8882   format %{
8883     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8884   %}
8885   ins_encode %{
8886     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8887                Assembler::byte, /*acquire*/ true, /*release*/ true,
8888                /*weak*/ false, $res$$Register);
8889     __ sxtbw($res$$Register, $res$$Register);
8890   %}
8891   ins_pipe(pipe_slow);
8892 %}
8893 
8894 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8895   predicate(needs_acquiring_load_exclusive(n));
8896   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8897   ins_cost(VOLATILE_REF_COST);
8898   effect(TEMP_DEF res, KILL cr);
8899   format %{
8900     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8901   %}
8902   ins_encode %{
8903     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8904                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8905                /*weak*/ false, $res$$Register);
8906     __ sxthw($res$$Register, $res$$Register);
8907   %}
8908   ins_pipe(pipe_slow);
8909 %}
8910 
8911 
8912 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8913   predicate(needs_acquiring_load_exclusive(n));
8914   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8915   ins_cost(VOLATILE_REF_COST);
8916   effect(TEMP_DEF res, KILL cr);
8917   format %{
8918     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8919   %}
8920   ins_encode %{
8921     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8922                Assembler::word, /*acquire*/ true, /*release*/ true,
8923                /*weak*/ false, $res$$Register);
8924   %}
8925   ins_pipe(pipe_slow);
8926 %}
8927 
8928 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8929   predicate(needs_acquiring_load_exclusive(n));
8930   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8931   ins_cost(VOLATILE_REF_COST);
8932   effect(TEMP_DEF res, KILL cr);
8933   format %{
8934     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
8935   %}
8936   ins_encode %{
8937     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8938                Assembler::xword, /*acquire*/ true, /*release*/ true,
8939                /*weak*/ false, $res$$Register);
8940   %}
8941   ins_pipe(pipe_slow);
8942 %}
8943 
8944 
8945 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8946   predicate(needs_acquiring_load_exclusive(n));
8947   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8948   ins_cost(VOLATILE_REF_COST);
8949   effect(TEMP_DEF res, KILL cr);
8950   format %{
8951     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8952   %}
8953   ins_encode %{
8954     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8955                Assembler::word, /*acquire*/ true, /*release*/ true,
8956                /*weak*/ false, $res$$Register);
8957   %}
8958   ins_pipe(pipe_slow);
8959 %}
8960 
8961 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8962   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8963   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8964   ins_cost(VOLATILE_REF_COST);
8965   effect(TEMP_DEF res, KILL cr);
8966   format %{
8967     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8968   %}
8969   ins_encode %{
8970     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8971                Assembler::xword, /*acquire*/ true, /*release*/ true,
8972                /*weak*/ false, $res$$Register);
8973   %}
8974   ins_pipe(pipe_slow);
8975 %}
8976 
8977 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8978   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8979   ins_cost(2 * VOLATILE_REF_COST);
8980   effect(KILL cr);
8981   format %{
8982     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8983     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8984   %}
8985   ins_encode %{
8986     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8987                Assembler::byte, /*acquire*/ false, /*release*/ true,
8988                /*weak*/ true, noreg);
8989     __ csetw($res$$Register, Assembler::EQ);
8990   %}
8991   ins_pipe(pipe_slow);
8992 %}
8993 
8994 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8995   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8996   ins_cost(2 * VOLATILE_REF_COST);
8997   effect(KILL cr);
8998   format %{
8999     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9000     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9001   %}
9002   ins_encode %{
9003     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9004                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9005                /*weak*/ true, noreg);
9006     __ csetw($res$$Register, Assembler::EQ);
9007   %}
9008   ins_pipe(pipe_slow);
9009 %}
9010 
9011 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9012   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9013   ins_cost(2 * VOLATILE_REF_COST);
9014   effect(KILL cr);
9015   format %{
9016     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9017     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018   %}
9019   ins_encode %{
9020     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9021                Assembler::word, /*acquire*/ false, /*release*/ true,
9022                /*weak*/ true, noreg);
9023     __ csetw($res$$Register, Assembler::EQ);
9024   %}
9025   ins_pipe(pipe_slow);
9026 %}
9027 
9028 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9029   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9030   ins_cost(2 * VOLATILE_REF_COST);
9031   effect(KILL cr);
9032   format %{
9033     "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
9034     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9035   %}
9036   ins_encode %{
9037     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9038                Assembler::xword, /*acquire*/ false, /*release*/ true,
9039                /*weak*/ true, noreg);
9040     __ csetw($res$$Register, Assembler::EQ);
9041   %}
9042   ins_pipe(pipe_slow);
9043 %}
9044 
9045 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9046   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9047   ins_cost(2 * VOLATILE_REF_COST);
9048   effect(KILL cr);
9049   format %{
9050     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9051     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9052   %}
9053   ins_encode %{
9054     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9055                Assembler::word, /*acquire*/ false, /*release*/ true,
9056                /*weak*/ true, noreg);
9057     __ csetw($res$$Register, Assembler::EQ);
9058   %}
9059   ins_pipe(pipe_slow);
9060 %}
9061 
9062 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9063   predicate(n->as_LoadStore()->barrier_data() == 0);
9064   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9065   ins_cost(2 * VOLATILE_REF_COST);
9066   effect(KILL cr);
9067   format %{
9068     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9069     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9070   %}
9071   ins_encode %{
9072     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9073                Assembler::xword, /*acquire*/ false, /*release*/ true,
9074                /*weak*/ true, noreg);
9075     __ csetw($res$$Register, Assembler::EQ);
9076   %}
9077   ins_pipe(pipe_slow);
9078 %}
9079 
9080 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9081   predicate(needs_acquiring_load_exclusive(n));
9082   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9083   ins_cost(VOLATILE_REF_COST);
9084   effect(KILL cr);
9085   format %{
9086     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9087     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9088   %}
9089   ins_encode %{
9090     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9091                Assembler::byte, /*acquire*/ true, /*release*/ true,
9092                /*weak*/ true, noreg);
9093     __ csetw($res$$Register, Assembler::EQ);
9094   %}
9095   ins_pipe(pipe_slow);
9096 %}
9097 
9098 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9099   predicate(needs_acquiring_load_exclusive(n));
9100   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9101   ins_cost(VOLATILE_REF_COST);
9102   effect(KILL cr);
9103   format %{
9104     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9105     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9106   %}
9107   ins_encode %{
9108     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9109                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9110                /*weak*/ true, noreg);
9111     __ csetw($res$$Register, Assembler::EQ);
9112   %}
9113   ins_pipe(pipe_slow);
9114 %}
9115 
9116 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9117   predicate(needs_acquiring_load_exclusive(n));
9118   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9119   ins_cost(VOLATILE_REF_COST);
9120   effect(KILL cr);
9121   format %{
9122     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9123     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9124   %}
9125   ins_encode %{
9126     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9127                Assembler::word, /*acquire*/ true, /*release*/ true,
9128                /*weak*/ true, noreg);
9129     __ csetw($res$$Register, Assembler::EQ);
9130   %}
9131   ins_pipe(pipe_slow);
9132 %}
9133 
9134 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9135   predicate(needs_acquiring_load_exclusive(n));
9136   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9137   ins_cost(VOLATILE_REF_COST);
9138   effect(KILL cr);
9139   format %{
9140     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
9141     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9142   %}
9143   ins_encode %{
9144     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9145                Assembler::xword, /*acquire*/ true, /*release*/ true,
9146                /*weak*/ true, noreg);
9147     __ csetw($res$$Register, Assembler::EQ);
9148   %}
9149   ins_pipe(pipe_slow);
9150 %}
9151 
9152 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9153   predicate(needs_acquiring_load_exclusive(n));
9154   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9155   ins_cost(VOLATILE_REF_COST);
9156   effect(KILL cr);
9157   format %{
9158     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9159     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9160   %}
9161   ins_encode %{
9162     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9163                Assembler::word, /*acquire*/ true, /*release*/ true,
9164                /*weak*/ true, noreg);
9165     __ csetw($res$$Register, Assembler::EQ);
9166   %}
9167   ins_pipe(pipe_slow);
9168 %}
9169 
9170 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9171   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9172   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9173   ins_cost(VOLATILE_REF_COST);
9174   effect(KILL cr);
9175   format %{
9176     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9177     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9178   %}
9179   ins_encode %{
9180     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9181                Assembler::xword, /*acquire*/ true, /*release*/ true,
9182                /*weak*/ true, noreg);
9183     __ csetw($res$$Register, Assembler::EQ);
9184   %}
9185   ins_pipe(pipe_slow);
9186 %}
9187 
9188 // END This section of the file is automatically generated. Do not edit --------------
9189 // ---------------------------------------------------------------------
9190 
9191 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9192   match(Set prev (GetAndSetI mem newv));
9193   ins_cost(2 * VOLATILE_REF_COST);
9194   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9195   ins_encode %{
9196     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9197   %}
9198   ins_pipe(pipe_serial);
9199 %}
9200 
9201 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9202   match(Set prev (GetAndSetL mem newv));
9203   ins_cost(2 * VOLATILE_REF_COST);
9204   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9205   ins_encode %{
9206     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9207   %}
9208   ins_pipe(pipe_serial);
9209 %}
9210 
9211 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9212   match(Set prev (GetAndSetN mem newv));
9213   ins_cost(2 * VOLATILE_REF_COST);
9214   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9215   ins_encode %{
9216     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9217   %}
9218   ins_pipe(pipe_serial);
9219 %}
9220 
9221 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9222   predicate(n->as_LoadStore()->barrier_data() == 0);
9223   match(Set prev (GetAndSetP mem newv));
9224   ins_cost(2 * VOLATILE_REF_COST);
9225   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9226   ins_encode %{
9227     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9228   %}
9229   ins_pipe(pipe_serial);
9230 %}
9231 
9232 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9233   predicate(needs_acquiring_load_exclusive(n));
9234   match(Set prev (GetAndSetI mem newv));
9235   ins_cost(VOLATILE_REF_COST);
9236   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9237   ins_encode %{
9238     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9239   %}
9240   ins_pipe(pipe_serial);
9241 %}
9242 
9243 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9244   predicate(needs_acquiring_load_exclusive(n));
9245   match(Set prev (GetAndSetL mem newv));
9246   ins_cost(VOLATILE_REF_COST);
9247   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9248   ins_encode %{
9249     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9250   %}
9251   ins_pipe(pipe_serial);
9252 %}
9253 
9254 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9255   predicate(needs_acquiring_load_exclusive(n));
9256   match(Set prev (GetAndSetN mem newv));
9257   ins_cost(VOLATILE_REF_COST);
9258   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9259   ins_encode %{
9260     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9261   %}
9262   ins_pipe(pipe_serial);
9263 %}
9264 
9265 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9266   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9267   match(Set prev (GetAndSetP mem newv));
9268   ins_cost(VOLATILE_REF_COST);
9269   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9270   ins_encode %{
9271     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9272   %}
9273   ins_pipe(pipe_serial);
9274 %}
9275 
9276 
9277 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9278   match(Set newval (GetAndAddL mem incr));
9279   ins_cost(2 * VOLATILE_REF_COST + 1);
9280   format %{ "get_and_addL $newval, [$mem], $incr" %}
9281   ins_encode %{
9282     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9283   %}
9284   ins_pipe(pipe_serial);
9285 %}
9286 
9287 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9288   predicate(n->as_LoadStore()->result_not_used());
9289   match(Set dummy (GetAndAddL mem incr));
9290   ins_cost(2 * VOLATILE_REF_COST);
9291   format %{ "get_and_addL [$mem], $incr" %}
9292   ins_encode %{
9293     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9294   %}
9295   ins_pipe(pipe_serial);
9296 %}
9297 
9298 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9299   match(Set newval (GetAndAddL mem incr));
9300   ins_cost(2 * VOLATILE_REF_COST + 1);
9301   format %{ "get_and_addL $newval, [$mem], $incr" %}
9302   ins_encode %{
9303     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9304   %}
9305   ins_pipe(pipe_serial);
9306 %}
9307 
9308 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9309   predicate(n->as_LoadStore()->result_not_used());
9310   match(Set dummy (GetAndAddL mem incr));
9311   ins_cost(2 * VOLATILE_REF_COST);
9312   format %{ "get_and_addL [$mem], $incr" %}
9313   ins_encode %{
9314     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9315   %}
9316   ins_pipe(pipe_serial);
9317 %}
9318 
9319 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9320   match(Set newval (GetAndAddI mem incr));
9321   ins_cost(2 * VOLATILE_REF_COST + 1);
9322   format %{ "get_and_addI $newval, [$mem], $incr" %}
9323   ins_encode %{
9324     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9325   %}
9326   ins_pipe(pipe_serial);
9327 %}
9328 
9329 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9330   predicate(n->as_LoadStore()->result_not_used());
9331   match(Set dummy (GetAndAddI mem incr));
9332   ins_cost(2 * VOLATILE_REF_COST);
9333   format %{ "get_and_addI [$mem], $incr" %}
9334   ins_encode %{
9335     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9336   %}
9337   ins_pipe(pipe_serial);
9338 %}
9339 
9340 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9341   match(Set newval (GetAndAddI mem incr));
9342   ins_cost(2 * VOLATILE_REF_COST + 1);
9343   format %{ "get_and_addI $newval, [$mem], $incr" %}
9344   ins_encode %{
9345     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9346   %}
9347   ins_pipe(pipe_serial);
9348 %}
9349 
9350 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9351   predicate(n->as_LoadStore()->result_not_used());
9352   match(Set dummy (GetAndAddI mem incr));
9353   ins_cost(2 * VOLATILE_REF_COST);
9354   format %{ "get_and_addI [$mem], $incr" %}
9355   ins_encode %{
9356     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9357   %}
9358   ins_pipe(pipe_serial);
9359 %}
9360 
9361 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9362   predicate(needs_acquiring_load_exclusive(n));
9363   match(Set newval (GetAndAddL mem incr));
9364   ins_cost(VOLATILE_REF_COST + 1);
9365   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9366   ins_encode %{
9367     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9368   %}
9369   ins_pipe(pipe_serial);
9370 %}
9371 
9372 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9373   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9374   match(Set dummy (GetAndAddL mem incr));
9375   ins_cost(VOLATILE_REF_COST);
9376   format %{ "get_and_addL_acq [$mem], $incr" %}
9377   ins_encode %{
9378     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9379   %}
9380   ins_pipe(pipe_serial);
9381 %}
9382 
9383 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9384   predicate(needs_acquiring_load_exclusive(n));
9385   match(Set newval (GetAndAddL mem incr));
9386   ins_cost(VOLATILE_REF_COST + 1);
9387   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9388   ins_encode %{
9389     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9390   %}
9391   ins_pipe(pipe_serial);
9392 %}
9393 
9394 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9395   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9396   match(Set dummy (GetAndAddL mem incr));
9397   ins_cost(VOLATILE_REF_COST);
9398   format %{ "get_and_addL_acq [$mem], $incr" %}
9399   ins_encode %{
9400     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9401   %}
9402   ins_pipe(pipe_serial);
9403 %}
9404 
9405 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9406   predicate(needs_acquiring_load_exclusive(n));
9407   match(Set newval (GetAndAddI mem incr));
9408   ins_cost(VOLATILE_REF_COST + 1);
9409   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9410   ins_encode %{
9411     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9412   %}
9413   ins_pipe(pipe_serial);
9414 %}
9415 
9416 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9417   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9418   match(Set dummy (GetAndAddI mem incr));
9419   ins_cost(VOLATILE_REF_COST);
9420   format %{ "get_and_addI_acq [$mem], $incr" %}
9421   ins_encode %{
9422     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9423   %}
9424   ins_pipe(pipe_serial);
9425 %}
9426 
9427 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9428   predicate(needs_acquiring_load_exclusive(n));
9429   match(Set newval (GetAndAddI mem incr));
9430   ins_cost(VOLATILE_REF_COST + 1);
9431   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9432   ins_encode %{
9433     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9434   %}
9435   ins_pipe(pipe_serial);
9436 %}
9437 
9438 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9439   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9440   match(Set dummy (GetAndAddI mem incr));
9441   ins_cost(VOLATILE_REF_COST);
9442   format %{ "get_and_addI_acq [$mem], $incr" %}
9443   ins_encode %{
9444     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9445   %}
9446   ins_pipe(pipe_serial);
9447 %}
9448 
9449 // Manifest a CmpL result in an integer register.
9450 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9451 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9452 %{
9453   match(Set dst (CmpL3 src1 src2));
9454   effect(KILL flags);
9455 
9456   ins_cost(INSN_COST * 6);
9457   format %{
9458       "cmp $src1, $src2"
9459       "csetw $dst, ne"
9460       "cnegw $dst, lt"
9461   %}
9462   // format %{ "CmpL3 $dst, $src1, $src2" %}
9463   ins_encode %{
9464     __ cmp($src1$$Register, $src2$$Register);
9465     __ csetw($dst$$Register, Assembler::NE);
9466     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9467   %}
9468 
9469   ins_pipe(pipe_class_default);
9470 %}
9471 
9472 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9473 %{
9474   match(Set dst (CmpL3 src1 src2));
9475   effect(KILL flags);
9476 
9477   ins_cost(INSN_COST * 6);
9478   format %{
9479       "cmp $src1, $src2"
9480       "csetw $dst, ne"
9481       "cnegw $dst, lt"
9482   %}
9483   ins_encode %{
9484     int32_t con = (int32_t)$src2$$constant;
9485      if (con < 0) {
9486       __ adds(zr, $src1$$Register, -con);
9487     } else {
9488       __ subs(zr, $src1$$Register, con);
9489     }
9490     __ csetw($dst$$Register, Assembler::NE);
9491     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9492   %}
9493 
9494   ins_pipe(pipe_class_default);
9495 %}
9496 
9497 // ============================================================================
9498 // Conditional Move Instructions
9499 
9500 // n.b. we have identical rules for both a signed compare op (cmpOp)
9501 // and an unsigned compare op (cmpOpU). it would be nice if we could
9502 // define an op class which merged both inputs and use it to type the
9503 // argument to a single rule. unfortunatelyt his fails because the
9504 // opclass does not live up to the COND_INTER interface of its
9505 // component operands. When the generic code tries to negate the
9506 // operand it ends up running the generci Machoper::negate method
9507 // which throws a ShouldNotHappen. So, we have to provide two flavours
9508 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9509 
9510 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9511   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9512 
9513   ins_cost(INSN_COST * 2);
9514   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9515 
9516   ins_encode %{
9517     __ cselw(as_Register($dst$$reg),
9518              as_Register($src2$$reg),
9519              as_Register($src1$$reg),
9520              (Assembler::Condition)$cmp$$cmpcode);
9521   %}
9522 
9523   ins_pipe(icond_reg_reg);
9524 %}
9525 
9526 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9527   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9528 
9529   ins_cost(INSN_COST * 2);
9530   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9531 
9532   ins_encode %{
9533     __ cselw(as_Register($dst$$reg),
9534              as_Register($src2$$reg),
9535              as_Register($src1$$reg),
9536              (Assembler::Condition)$cmp$$cmpcode);
9537   %}
9538 
9539   ins_pipe(icond_reg_reg);
9540 %}
9541 
9542 // special cases where one arg is zero
9543 
9544 // n.b. this is selected in preference to the rule above because it
9545 // avoids loading constant 0 into a source register
9546 
9547 // TODO
9548 // we ought only to be able to cull one of these variants as the ideal
9549 // transforms ought always to order the zero consistently (to left/right?)
9550 
9551 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9552   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9553 
9554   ins_cost(INSN_COST * 2);
9555   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9556 
9557   ins_encode %{
9558     __ cselw(as_Register($dst$$reg),
9559              as_Register($src$$reg),
9560              zr,
9561              (Assembler::Condition)$cmp$$cmpcode);
9562   %}
9563 
9564   ins_pipe(icond_reg);
9565 %}
9566 
9567 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9568   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9569 
9570   ins_cost(INSN_COST * 2);
9571   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9572 
9573   ins_encode %{
9574     __ cselw(as_Register($dst$$reg),
9575              as_Register($src$$reg),
9576              zr,
9577              (Assembler::Condition)$cmp$$cmpcode);
9578   %}
9579 
9580   ins_pipe(icond_reg);
9581 %}
9582 
9583 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9584   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9585 
9586   ins_cost(INSN_COST * 2);
9587   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9588 
9589   ins_encode %{
9590     __ cselw(as_Register($dst$$reg),
9591              zr,
9592              as_Register($src$$reg),
9593              (Assembler::Condition)$cmp$$cmpcode);
9594   %}
9595 
9596   ins_pipe(icond_reg);
9597 %}
9598 
9599 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9600   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9601 
9602   ins_cost(INSN_COST * 2);
9603   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9604 
9605   ins_encode %{
9606     __ cselw(as_Register($dst$$reg),
9607              zr,
9608              as_Register($src$$reg),
9609              (Assembler::Condition)$cmp$$cmpcode);
9610   %}
9611 
9612   ins_pipe(icond_reg);
9613 %}
9614 
9615 // special case for creating a boolean 0 or 1
9616 
9617 // n.b. this is selected in preference to the rule above because it
9618 // avoids loading constants 0 and 1 into a source register
9619 
9620 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9621   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9622 
9623   ins_cost(INSN_COST * 2);
9624   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9625 
9626   ins_encode %{
9627     // equivalently
9628     // cset(as_Register($dst$$reg),
9629     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9630     __ csincw(as_Register($dst$$reg),
9631              zr,
9632              zr,
9633              (Assembler::Condition)$cmp$$cmpcode);
9634   %}
9635 
9636   ins_pipe(icond_none);
9637 %}
9638 
9639 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9640   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9641 
9642   ins_cost(INSN_COST * 2);
9643   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9644 
9645   ins_encode %{
9646     // equivalently
9647     // cset(as_Register($dst$$reg),
9648     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9649     __ csincw(as_Register($dst$$reg),
9650              zr,
9651              zr,
9652              (Assembler::Condition)$cmp$$cmpcode);
9653   %}
9654 
9655   ins_pipe(icond_none);
9656 %}
9657 
9658 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9659   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9660 
9661   ins_cost(INSN_COST * 2);
9662   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, int64_t"  %}
9663 
9664   ins_encode %{
9665     __ csel(as_Register($dst$$reg),
9666             as_Register($src2$$reg),
9667             as_Register($src1$$reg),
9668             (Assembler::Condition)$cmp$$cmpcode);
9669   %}
9670 
9671   ins_pipe(icond_reg_reg);
9672 %}
9673 
9674 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9675   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9676 
9677   ins_cost(INSN_COST * 2);
9678   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, int64_t"  %}
9679 
9680   ins_encode %{
9681     __ csel(as_Register($dst$$reg),
9682             as_Register($src2$$reg),
9683             as_Register($src1$$reg),
9684             (Assembler::Condition)$cmp$$cmpcode);
9685   %}
9686 
9687   ins_pipe(icond_reg_reg);
9688 %}
9689 
9690 // special cases where one arg is zero
9691 
9692 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9693   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9694 
9695   ins_cost(INSN_COST * 2);
9696   format %{ "csel $dst, zr, $src $cmp\t# signed, int64_t"  %}
9697 
9698   ins_encode %{
9699     __ csel(as_Register($dst$$reg),
9700             zr,
9701             as_Register($src$$reg),
9702             (Assembler::Condition)$cmp$$cmpcode);
9703   %}
9704 
9705   ins_pipe(icond_reg);
9706 %}
9707 
9708 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9709   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9710 
9711   ins_cost(INSN_COST * 2);
9712   format %{ "csel $dst, zr, $src $cmp\t# unsigned, int64_t"  %}
9713 
9714   ins_encode %{
9715     __ csel(as_Register($dst$$reg),
9716             zr,
9717             as_Register($src$$reg),
9718             (Assembler::Condition)$cmp$$cmpcode);
9719   %}
9720 
9721   ins_pipe(icond_reg);
9722 %}
9723 
9724 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9725   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9726 
9727   ins_cost(INSN_COST * 2);
9728   format %{ "csel $dst, $src, zr $cmp\t# signed, int64_t"  %}
9729 
9730   ins_encode %{
9731     __ csel(as_Register($dst$$reg),
9732             as_Register($src$$reg),
9733             zr,
9734             (Assembler::Condition)$cmp$$cmpcode);
9735   %}
9736 
9737   ins_pipe(icond_reg);
9738 %}
9739 
9740 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9741   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9742 
9743   ins_cost(INSN_COST * 2);
9744   format %{ "csel $dst, $src, zr $cmp\t# unsigned, int64_t"  %}
9745 
9746   ins_encode %{
9747     __ csel(as_Register($dst$$reg),
9748             as_Register($src$$reg),
9749             zr,
9750             (Assembler::Condition)$cmp$$cmpcode);
9751   %}
9752 
9753   ins_pipe(icond_reg);
9754 %}
9755 
9756 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9757   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9758 
9759   ins_cost(INSN_COST * 2);
9760   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9761 
9762   ins_encode %{
9763     __ csel(as_Register($dst$$reg),
9764             as_Register($src2$$reg),
9765             as_Register($src1$$reg),
9766             (Assembler::Condition)$cmp$$cmpcode);
9767   %}
9768 
9769   ins_pipe(icond_reg_reg);
9770 %}
9771 
9772 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9773   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9774 
9775   ins_cost(INSN_COST * 2);
9776   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9777 
9778   ins_encode %{
9779     __ csel(as_Register($dst$$reg),
9780             as_Register($src2$$reg),
9781             as_Register($src1$$reg),
9782             (Assembler::Condition)$cmp$$cmpcode);
9783   %}
9784 
9785   ins_pipe(icond_reg_reg);
9786 %}
9787 
9788 // special cases where one arg is zero
9789 
9790 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9791   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9792 
9793   ins_cost(INSN_COST * 2);
9794   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9795 
9796   ins_encode %{
9797     __ csel(as_Register($dst$$reg),
9798             zr,
9799             as_Register($src$$reg),
9800             (Assembler::Condition)$cmp$$cmpcode);
9801   %}
9802 
9803   ins_pipe(icond_reg);
9804 %}
9805 
9806 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9807   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9808 
9809   ins_cost(INSN_COST * 2);
9810   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9811 
9812   ins_encode %{
9813     __ csel(as_Register($dst$$reg),
9814             zr,
9815             as_Register($src$$reg),
9816             (Assembler::Condition)$cmp$$cmpcode);
9817   %}
9818 
9819   ins_pipe(icond_reg);
9820 %}
9821 
9822 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9823   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9824 
9825   ins_cost(INSN_COST * 2);
9826   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9827 
9828   ins_encode %{
9829     __ csel(as_Register($dst$$reg),
9830             as_Register($src$$reg),
9831             zr,
9832             (Assembler::Condition)$cmp$$cmpcode);
9833   %}
9834 
9835   ins_pipe(icond_reg);
9836 %}
9837 
9838 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9839   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9840 
9841   ins_cost(INSN_COST * 2);
9842   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9843 
9844   ins_encode %{
9845     __ csel(as_Register($dst$$reg),
9846             as_Register($src$$reg),
9847             zr,
9848             (Assembler::Condition)$cmp$$cmpcode);
9849   %}
9850 
9851   ins_pipe(icond_reg);
9852 %}
9853 
9854 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9855   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9856 
9857   ins_cost(INSN_COST * 2);
9858   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9859 
9860   ins_encode %{
9861     __ cselw(as_Register($dst$$reg),
9862              as_Register($src2$$reg),
9863              as_Register($src1$$reg),
9864              (Assembler::Condition)$cmp$$cmpcode);
9865   %}
9866 
9867   ins_pipe(icond_reg_reg);
9868 %}
9869 
9870 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9871   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9872 
9873   ins_cost(INSN_COST * 2);
9874   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9875 
9876   ins_encode %{
9877     __ cselw(as_Register($dst$$reg),
9878              as_Register($src2$$reg),
9879              as_Register($src1$$reg),
9880              (Assembler::Condition)$cmp$$cmpcode);
9881   %}
9882 
9883   ins_pipe(icond_reg_reg);
9884 %}
9885 
9886 // special cases where one arg is zero
9887 
9888 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9889   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9890 
9891   ins_cost(INSN_COST * 2);
9892   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9893 
9894   ins_encode %{
9895     __ cselw(as_Register($dst$$reg),
9896              zr,
9897              as_Register($src$$reg),
9898              (Assembler::Condition)$cmp$$cmpcode);
9899   %}
9900 
9901   ins_pipe(icond_reg);
9902 %}
9903 
9904 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9905   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9906 
9907   ins_cost(INSN_COST * 2);
9908   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9909 
9910   ins_encode %{
9911     __ cselw(as_Register($dst$$reg),
9912              zr,
9913              as_Register($src$$reg),
9914              (Assembler::Condition)$cmp$$cmpcode);
9915   %}
9916 
9917   ins_pipe(icond_reg);
9918 %}
9919 
9920 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9921   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9922 
9923   ins_cost(INSN_COST * 2);
9924   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9925 
9926   ins_encode %{
9927     __ cselw(as_Register($dst$$reg),
9928              as_Register($src$$reg),
9929              zr,
9930              (Assembler::Condition)$cmp$$cmpcode);
9931   %}
9932 
9933   ins_pipe(icond_reg);
9934 %}
9935 
9936 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9937   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9938 
9939   ins_cost(INSN_COST * 2);
9940   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9941 
9942   ins_encode %{
9943     __ cselw(as_Register($dst$$reg),
9944              as_Register($src$$reg),
9945              zr,
9946              (Assembler::Condition)$cmp$$cmpcode);
9947   %}
9948 
9949   ins_pipe(icond_reg);
9950 %}
9951 
9952 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9953 %{
9954   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9955 
9956   ins_cost(INSN_COST * 3);
9957 
9958   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9959   ins_encode %{
9960     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9961     __ fcsels(as_FloatRegister($dst$$reg),
9962               as_FloatRegister($src2$$reg),
9963               as_FloatRegister($src1$$reg),
9964               cond);
9965   %}
9966 
9967   ins_pipe(fp_cond_reg_reg_s);
9968 %}
9969 
9970 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9971 %{
9972   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9973 
9974   ins_cost(INSN_COST * 3);
9975 
9976   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9977   ins_encode %{
9978     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9979     __ fcsels(as_FloatRegister($dst$$reg),
9980               as_FloatRegister($src2$$reg),
9981               as_FloatRegister($src1$$reg),
9982               cond);
9983   %}
9984 
9985   ins_pipe(fp_cond_reg_reg_s);
9986 %}
9987 
9988 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9989 %{
9990   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9991 
9992   ins_cost(INSN_COST * 3);
9993 
9994   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9995   ins_encode %{
9996     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9997     __ fcseld(as_FloatRegister($dst$$reg),
9998               as_FloatRegister($src2$$reg),
9999               as_FloatRegister($src1$$reg),
10000               cond);
10001   %}
10002 
10003   ins_pipe(fp_cond_reg_reg_d);
10004 %}
10005 
10006 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10007 %{
10008   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10009 
10010   ins_cost(INSN_COST * 3);
10011 
10012   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10013   ins_encode %{
10014     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10015     __ fcseld(as_FloatRegister($dst$$reg),
10016               as_FloatRegister($src2$$reg),
10017               as_FloatRegister($src1$$reg),
10018               cond);
10019   %}
10020 
10021   ins_pipe(fp_cond_reg_reg_d);
10022 %}
10023 
10024 // ============================================================================
10025 // Arithmetic Instructions
10026 //
10027 
10028 // Integer Addition
10029 
10030 // TODO
10031 // these currently employ operations which do not set CR and hence are
10032 // not flagged as killing CR but we would like to isolate the cases
10033 // where we want to set flags from those where we don't. need to work
10034 // out how to do that.
10035 
10036 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10037   match(Set dst (AddI src1 src2));
10038 
10039   ins_cost(INSN_COST);
10040   format %{ "addw  $dst, $src1, $src2" %}
10041 
10042   ins_encode %{
10043     __ addw(as_Register($dst$$reg),
10044             as_Register($src1$$reg),
10045             as_Register($src2$$reg));
10046   %}
10047 
10048   ins_pipe(ialu_reg_reg);
10049 %}
10050 
10051 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10052   match(Set dst (AddI src1 src2));
10053 
10054   ins_cost(INSN_COST);
10055   format %{ "addw $dst, $src1, $src2" %}
10056 
10057   // use opcode to indicate that this is an add not a sub
10058   opcode(0x0);
10059 
10060   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10061 
10062   ins_pipe(ialu_reg_imm);
10063 %}
10064 
10065 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10066   match(Set dst (AddI (ConvL2I src1) src2));
10067 
10068   ins_cost(INSN_COST);
10069   format %{ "addw $dst, $src1, $src2" %}
10070 
10071   // use opcode to indicate that this is an add not a sub
10072   opcode(0x0);
10073 
10074   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10075 
10076   ins_pipe(ialu_reg_imm);
10077 %}
10078 
10079 // Pointer Addition
10080 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10081   match(Set dst (AddP src1 src2));
10082 
10083   ins_cost(INSN_COST);
10084   format %{ "add $dst, $src1, $src2\t# ptr" %}
10085 
10086   ins_encode %{
10087     __ add(as_Register($dst$$reg),
10088            as_Register($src1$$reg),
10089            as_Register($src2$$reg));
10090   %}
10091 
10092   ins_pipe(ialu_reg_reg);
10093 %}
10094 
10095 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10096   match(Set dst (AddP src1 (ConvI2L src2)));
10097 
10098   ins_cost(1.9 * INSN_COST);
10099   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10100 
10101   ins_encode %{
10102     __ add(as_Register($dst$$reg),
10103            as_Register($src1$$reg),
10104            as_Register($src2$$reg), ext::sxtw);
10105   %}
10106 
10107   ins_pipe(ialu_reg_reg);
10108 %}
10109 
10110 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10111   match(Set dst (AddP src1 (LShiftL src2 scale)));
10112 
10113   ins_cost(1.9 * INSN_COST);
10114   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10115 
10116   ins_encode %{
10117     __ lea(as_Register($dst$$reg),
10118            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10119                    Address::lsl($scale$$constant)));
10120   %}
10121 
10122   ins_pipe(ialu_reg_reg_shift);
10123 %}
10124 
10125 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10126   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10127 
10128   ins_cost(1.9 * INSN_COST);
10129   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10130 
10131   ins_encode %{
10132     __ lea(as_Register($dst$$reg),
10133            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10134                    Address::sxtw($scale$$constant)));
10135   %}
10136 
10137   ins_pipe(ialu_reg_reg_shift);
10138 %}
10139 
10140 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10141   match(Set dst (LShiftL (ConvI2L src) scale));
10142 
10143   ins_cost(INSN_COST);
10144   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10145 
10146   ins_encode %{
10147     __ sbfiz(as_Register($dst$$reg),
10148           as_Register($src$$reg),
10149           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10150   %}
10151 
10152   ins_pipe(ialu_reg_shift);
10153 %}
10154 
10155 // Pointer Immediate Addition
10156 // n.b. this needs to be more expensive than using an indirect memory
10157 // operand
10158 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10159   match(Set dst (AddP src1 src2));
10160 
10161   ins_cost(INSN_COST);
10162   format %{ "add $dst, $src1, $src2\t# ptr" %}
10163 
10164   // use opcode to indicate that this is an add not a sub
10165   opcode(0x0);
10166 
10167   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10168 
10169   ins_pipe(ialu_reg_imm);
10170 %}
10171 
10172 // Long Addition
10173 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10174 
10175   match(Set dst (AddL src1 src2));
10176 
10177   ins_cost(INSN_COST);
10178   format %{ "add  $dst, $src1, $src2" %}
10179 
10180   ins_encode %{
10181     __ add(as_Register($dst$$reg),
10182            as_Register($src1$$reg),
10183            as_Register($src2$$reg));
10184   %}
10185 
10186   ins_pipe(ialu_reg_reg);
10187 %}
10188 
10189 // No constant pool entries requiredLong Immediate Addition.
10190 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10191   match(Set dst (AddL src1 src2));
10192 
10193   ins_cost(INSN_COST);
10194   format %{ "add $dst, $src1, $src2" %}
10195 
10196   // use opcode to indicate that this is an add not a sub
10197   opcode(0x0);
10198 
10199   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10200 
10201   ins_pipe(ialu_reg_imm);
10202 %}
10203 
10204 // Integer Subtraction
10205 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10206   match(Set dst (SubI src1 src2));
10207 
10208   ins_cost(INSN_COST);
10209   format %{ "subw  $dst, $src1, $src2" %}
10210 
10211   ins_encode %{
10212     __ subw(as_Register($dst$$reg),
10213             as_Register($src1$$reg),
10214             as_Register($src2$$reg));
10215   %}
10216 
10217   ins_pipe(ialu_reg_reg);
10218 %}
10219 
10220 // Immediate Subtraction
10221 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10222   match(Set dst (SubI src1 src2));
10223 
10224   ins_cost(INSN_COST);
10225   format %{ "subw $dst, $src1, $src2" %}
10226 
10227   // use opcode to indicate that this is a sub not an add
10228   opcode(0x1);
10229 
10230   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10231 
10232   ins_pipe(ialu_reg_imm);
10233 %}
10234 
10235 // Long Subtraction
10236 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10237 
10238   match(Set dst (SubL src1 src2));
10239 
10240   ins_cost(INSN_COST);
10241   format %{ "sub  $dst, $src1, $src2" %}
10242 
10243   ins_encode %{
10244     __ sub(as_Register($dst$$reg),
10245            as_Register($src1$$reg),
10246            as_Register($src2$$reg));
10247   %}
10248 
10249   ins_pipe(ialu_reg_reg);
10250 %}
10251 
10252 // No constant pool entries requiredLong Immediate Subtraction.
10253 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10254   match(Set dst (SubL src1 src2));
10255 
10256   ins_cost(INSN_COST);
10257   format %{ "sub$dst, $src1, $src2" %}
10258 
10259   // use opcode to indicate that this is a sub not an add
10260   opcode(0x1);
10261 
10262   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10263 
10264   ins_pipe(ialu_reg_imm);
10265 %}
10266 
10267 // Integer Negation (special case for sub)
10268 
10269 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10270   match(Set dst (SubI zero src));
10271 
10272   ins_cost(INSN_COST);
10273   format %{ "negw $dst, $src\t# int" %}
10274 
10275   ins_encode %{
10276     __ negw(as_Register($dst$$reg),
10277             as_Register($src$$reg));
10278   %}
10279 
10280   ins_pipe(ialu_reg);
10281 %}
10282 
10283 // Long Negation
10284 
10285 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10286   match(Set dst (SubL zero src));
10287 
10288   ins_cost(INSN_COST);
10289   format %{ "neg $dst, $src\t# int64_t" %}
10290 
10291   ins_encode %{
10292     __ neg(as_Register($dst$$reg),
10293            as_Register($src$$reg));
10294   %}
10295 
10296   ins_pipe(ialu_reg);
10297 %}
10298 
10299 // Integer Multiply
10300 
10301 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10302   match(Set dst (MulI src1 src2));
10303 
10304   ins_cost(INSN_COST * 3);
10305   format %{ "mulw  $dst, $src1, $src2" %}
10306 
10307   ins_encode %{
10308     __ mulw(as_Register($dst$$reg),
10309             as_Register($src1$$reg),
10310             as_Register($src2$$reg));
10311   %}
10312 
10313   ins_pipe(imul_reg_reg);
10314 %}
10315 
10316 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10317   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10318 
10319   ins_cost(INSN_COST * 3);
10320   format %{ "smull  $dst, $src1, $src2" %}
10321 
10322   ins_encode %{
10323     __ smull(as_Register($dst$$reg),
10324              as_Register($src1$$reg),
10325              as_Register($src2$$reg));
10326   %}
10327 
10328   ins_pipe(imul_reg_reg);
10329 %}
10330 
10331 // Long Multiply
10332 
10333 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10334   match(Set dst (MulL src1 src2));
10335 
10336   ins_cost(INSN_COST * 5);
10337   format %{ "mul  $dst, $src1, $src2" %}
10338 
10339   ins_encode %{
10340     __ mul(as_Register($dst$$reg),
10341            as_Register($src1$$reg),
10342            as_Register($src2$$reg));
10343   %}
10344 
10345   ins_pipe(lmul_reg_reg);
10346 %}
10347 
10348 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10349 %{
10350   match(Set dst (MulHiL src1 src2));
10351 
10352   ins_cost(INSN_COST * 7);
10353   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10354 
10355   ins_encode %{
10356     __ smulh(as_Register($dst$$reg),
10357              as_Register($src1$$reg),
10358              as_Register($src2$$reg));
10359   %}
10360 
10361   ins_pipe(lmul_reg_reg);
10362 %}
10363 
10364 // Combined Integer Multiply & Add/Sub
10365 
10366 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10367   match(Set dst (AddI src3 (MulI src1 src2)));
10368 
10369   ins_cost(INSN_COST * 3);
10370   format %{ "madd  $dst, $src1, $src2, $src3" %}
10371 
10372   ins_encode %{
10373     __ maddw(as_Register($dst$$reg),
10374              as_Register($src1$$reg),
10375              as_Register($src2$$reg),
10376              as_Register($src3$$reg));
10377   %}
10378 
10379   ins_pipe(imac_reg_reg);
10380 %}
10381 
10382 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10383   match(Set dst (SubI src3 (MulI src1 src2)));
10384 
10385   ins_cost(INSN_COST * 3);
10386   format %{ "msub  $dst, $src1, $src2, $src3" %}
10387 
10388   ins_encode %{
10389     __ msubw(as_Register($dst$$reg),
10390              as_Register($src1$$reg),
10391              as_Register($src2$$reg),
10392              as_Register($src3$$reg));
10393   %}
10394 
10395   ins_pipe(imac_reg_reg);
10396 %}
10397 
10398 // Combined Integer Multiply & Neg
10399 
10400 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10401   match(Set dst (MulI (SubI zero src1) src2));
10402   match(Set dst (MulI src1 (SubI zero src2)));
10403 
10404   ins_cost(INSN_COST * 3);
10405   format %{ "mneg  $dst, $src1, $src2" %}
10406 
10407   ins_encode %{
10408     __ mnegw(as_Register($dst$$reg),
10409              as_Register($src1$$reg),
10410              as_Register($src2$$reg));
10411   %}
10412 
10413   ins_pipe(imac_reg_reg);
10414 %}
10415 
10416 // Combined Long Multiply & Add/Sub
10417 
10418 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10419   match(Set dst (AddL src3 (MulL src1 src2)));
10420 
10421   ins_cost(INSN_COST * 5);
10422   format %{ "madd  $dst, $src1, $src2, $src3" %}
10423 
10424   ins_encode %{
10425     __ madd(as_Register($dst$$reg),
10426             as_Register($src1$$reg),
10427             as_Register($src2$$reg),
10428             as_Register($src3$$reg));
10429   %}
10430 
10431   ins_pipe(lmac_reg_reg);
10432 %}
10433 
10434 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10435   match(Set dst (SubL src3 (MulL src1 src2)));
10436 
10437   ins_cost(INSN_COST * 5);
10438   format %{ "msub  $dst, $src1, $src2, $src3" %}
10439 
10440   ins_encode %{
10441     __ msub(as_Register($dst$$reg),
10442             as_Register($src1$$reg),
10443             as_Register($src2$$reg),
10444             as_Register($src3$$reg));
10445   %}
10446 
10447   ins_pipe(lmac_reg_reg);
10448 %}
10449 
10450 // Combined Long Multiply & Neg
10451 
10452 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10453   match(Set dst (MulL (SubL zero src1) src2));
10454   match(Set dst (MulL src1 (SubL zero src2)));
10455 
10456   ins_cost(INSN_COST * 5);
10457   format %{ "mneg  $dst, $src1, $src2" %}
10458 
10459   ins_encode %{
10460     __ mneg(as_Register($dst$$reg),
10461             as_Register($src1$$reg),
10462             as_Register($src2$$reg));
10463   %}
10464 
10465   ins_pipe(lmac_reg_reg);
10466 %}
10467 
10468 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10469 
10470 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10471   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10472 
10473   ins_cost(INSN_COST * 3);
10474   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10475 
10476   ins_encode %{
10477     __ smaddl(as_Register($dst$$reg),
10478               as_Register($src1$$reg),
10479               as_Register($src2$$reg),
10480               as_Register($src3$$reg));
10481   %}
10482 
10483   ins_pipe(imac_reg_reg);
10484 %}
10485 
10486 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10487   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10488 
10489   ins_cost(INSN_COST * 3);
10490   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10491 
10492   ins_encode %{
10493     __ smsubl(as_Register($dst$$reg),
10494               as_Register($src1$$reg),
10495               as_Register($src2$$reg),
10496               as_Register($src3$$reg));
10497   %}
10498 
10499   ins_pipe(imac_reg_reg);
10500 %}
10501 
10502 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10503   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10504   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10505 
10506   ins_cost(INSN_COST * 3);
10507   format %{ "smnegl  $dst, $src1, $src2" %}
10508 
10509   ins_encode %{
10510     __ smnegl(as_Register($dst$$reg),
10511               as_Register($src1$$reg),
10512               as_Register($src2$$reg));
10513   %}
10514 
10515   ins_pipe(imac_reg_reg);
10516 %}
10517 
10518 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10519 
10520 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10521   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10522 
10523   ins_cost(INSN_COST * 5);
10524   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10525             "maddw $dst, $src3, $src4, rscratch1" %}
10526 
10527   ins_encode %{
10528     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10529     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10530 
10531   ins_pipe(imac_reg_reg);
10532 %}
10533 
10534 // Integer Divide
10535 
10536 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10537   match(Set dst (DivI src1 src2));
10538 
10539   ins_cost(INSN_COST * 19);
10540   format %{ "sdivw  $dst, $src1, $src2" %}
10541 
10542   ins_encode(aarch64_enc_divw(dst, src1, src2));
10543   ins_pipe(idiv_reg_reg);
10544 %}
10545 
10546 // Long Divide
10547 
10548 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10549   match(Set dst (DivL src1 src2));
10550 
10551   ins_cost(INSN_COST * 35);
10552   format %{ "sdiv   $dst, $src1, $src2" %}
10553 
10554   ins_encode(aarch64_enc_div(dst, src1, src2));
10555   ins_pipe(ldiv_reg_reg);
10556 %}
10557 
10558 // Integer Remainder
10559 
10560 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10561   match(Set dst (ModI src1 src2));
10562 
10563   ins_cost(INSN_COST * 22);
10564   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10565             "msubw($dst, rscratch1, $src2, $src1" %}
10566 
10567   ins_encode(aarch64_enc_modw(dst, src1, src2));
10568   ins_pipe(idiv_reg_reg);
10569 %}
10570 
10571 // Long Remainder
10572 
10573 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10574   match(Set dst (ModL src1 src2));
10575 
10576   ins_cost(INSN_COST * 38);
10577   format %{ "sdiv   rscratch1, $src1, $src2\n"
10578             "msub($dst, rscratch1, $src2, $src1" %}
10579 
10580   ins_encode(aarch64_enc_mod(dst, src1, src2));
10581   ins_pipe(ldiv_reg_reg);
10582 %}
10583 
10584 // Integer Shifts
10585 
10586 // Shift Left Register
10587 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10588   match(Set dst (LShiftI src1 src2));
10589 
10590   ins_cost(INSN_COST * 2);
10591   format %{ "lslvw  $dst, $src1, $src2" %}
10592 
10593   ins_encode %{
10594     __ lslvw(as_Register($dst$$reg),
10595              as_Register($src1$$reg),
10596              as_Register($src2$$reg));
10597   %}
10598 
10599   ins_pipe(ialu_reg_reg_vshift);
10600 %}
10601 
10602 // Shift Left Immediate
10603 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10604   match(Set dst (LShiftI src1 src2));
10605 
10606   ins_cost(INSN_COST);
10607   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10608 
10609   ins_encode %{
10610     __ lslw(as_Register($dst$$reg),
10611             as_Register($src1$$reg),
10612             $src2$$constant & 0x1f);
10613   %}
10614 
10615   ins_pipe(ialu_reg_shift);
10616 %}
10617 
10618 // Shift Right Logical Register
10619 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10620   match(Set dst (URShiftI src1 src2));
10621 
10622   ins_cost(INSN_COST * 2);
10623   format %{ "lsrvw  $dst, $src1, $src2" %}
10624 
10625   ins_encode %{
10626     __ lsrvw(as_Register($dst$$reg),
10627              as_Register($src1$$reg),
10628              as_Register($src2$$reg));
10629   %}
10630 
10631   ins_pipe(ialu_reg_reg_vshift);
10632 %}
10633 
10634 // Shift Right Logical Immediate
10635 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10636   match(Set dst (URShiftI src1 src2));
10637 
10638   ins_cost(INSN_COST);
10639   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10640 
10641   ins_encode %{
10642     __ lsrw(as_Register($dst$$reg),
10643             as_Register($src1$$reg),
10644             $src2$$constant & 0x1f);
10645   %}
10646 
10647   ins_pipe(ialu_reg_shift);
10648 %}
10649 
10650 // Shift Right Arithmetic Register
10651 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10652   match(Set dst (RShiftI src1 src2));
10653 
10654   ins_cost(INSN_COST * 2);
10655   format %{ "asrvw  $dst, $src1, $src2" %}
10656 
10657   ins_encode %{
10658     __ asrvw(as_Register($dst$$reg),
10659              as_Register($src1$$reg),
10660              as_Register($src2$$reg));
10661   %}
10662 
10663   ins_pipe(ialu_reg_reg_vshift);
10664 %}
10665 
10666 // Shift Right Arithmetic Immediate
10667 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10668   match(Set dst (RShiftI src1 src2));
10669 
10670   ins_cost(INSN_COST);
10671   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10672 
10673   ins_encode %{
10674     __ asrw(as_Register($dst$$reg),
10675             as_Register($src1$$reg),
10676             $src2$$constant & 0x1f);
10677   %}
10678 
10679   ins_pipe(ialu_reg_shift);
10680 %}
10681 
10682 // Combined Int Mask and Right Shift (using UBFM)
10683 // TODO
10684 
10685 // Long Shifts
10686 
10687 // Shift Left Register
10688 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10689   match(Set dst (LShiftL src1 src2));
10690 
10691   ins_cost(INSN_COST * 2);
10692   format %{ "lslv  $dst, $src1, $src2" %}
10693 
10694   ins_encode %{
10695     __ lslv(as_Register($dst$$reg),
10696             as_Register($src1$$reg),
10697             as_Register($src2$$reg));
10698   %}
10699 
10700   ins_pipe(ialu_reg_reg_vshift);
10701 %}
10702 
10703 // Shift Left Immediate
10704 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10705   match(Set dst (LShiftL src1 src2));
10706 
10707   ins_cost(INSN_COST);
10708   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10709 
10710   ins_encode %{
10711     __ lsl(as_Register($dst$$reg),
10712             as_Register($src1$$reg),
10713             $src2$$constant & 0x3f);
10714   %}
10715 
10716   ins_pipe(ialu_reg_shift);
10717 %}
10718 
10719 // Shift Right Logical Register
10720 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10721   match(Set dst (URShiftL src1 src2));
10722 
10723   ins_cost(INSN_COST * 2);
10724   format %{ "lsrv  $dst, $src1, $src2" %}
10725 
10726   ins_encode %{
10727     __ lsrv(as_Register($dst$$reg),
10728             as_Register($src1$$reg),
10729             as_Register($src2$$reg));
10730   %}
10731 
10732   ins_pipe(ialu_reg_reg_vshift);
10733 %}
10734 
10735 // Shift Right Logical Immediate
10736 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10737   match(Set dst (URShiftL src1 src2));
10738 
10739   ins_cost(INSN_COST);
10740   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10741 
10742   ins_encode %{
10743     __ lsr(as_Register($dst$$reg),
10744            as_Register($src1$$reg),
10745            $src2$$constant & 0x3f);
10746   %}
10747 
10748   ins_pipe(ialu_reg_shift);
10749 %}
10750 
10751 // A special-case pattern for card table stores.
10752 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10753   match(Set dst (URShiftL (CastP2X src1) src2));
10754 
10755   ins_cost(INSN_COST);
10756   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10757 
10758   ins_encode %{
10759     __ lsr(as_Register($dst$$reg),
10760            as_Register($src1$$reg),
10761            $src2$$constant & 0x3f);
10762   %}
10763 
10764   ins_pipe(ialu_reg_shift);
10765 %}
10766 
10767 // Shift Right Arithmetic Register
10768 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10769   match(Set dst (RShiftL src1 src2));
10770 
10771   ins_cost(INSN_COST * 2);
10772   format %{ "asrv  $dst, $src1, $src2" %}
10773 
10774   ins_encode %{
10775     __ asrv(as_Register($dst$$reg),
10776             as_Register($src1$$reg),
10777             as_Register($src2$$reg));
10778   %}
10779 
10780   ins_pipe(ialu_reg_reg_vshift);
10781 %}
10782 
10783 // Shift Right Arithmetic Immediate
10784 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10785   match(Set dst (RShiftL src1 src2));
10786 
10787   ins_cost(INSN_COST);
10788   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10789 
10790   ins_encode %{
10791     __ asr(as_Register($dst$$reg),
10792            as_Register($src1$$reg),
10793            $src2$$constant & 0x3f);
10794   %}
10795 
10796   ins_pipe(ialu_reg_shift);
10797 %}
10798 
10799 // BEGIN This section of the file is automatically generated. Do not edit --------------
10800 
10801 instruct regL_not_reg(iRegLNoSp dst,
10802                          iRegL src1, immL_M1 m1,
10803                          rFlagsReg cr) %{
10804   match(Set dst (XorL src1 m1));
10805   ins_cost(INSN_COST);
10806   format %{ "eon  $dst, $src1, zr" %}
10807 
10808   ins_encode %{
10809     __ eon(as_Register($dst$$reg),
10810               as_Register($src1$$reg),
10811               zr,
10812               Assembler::LSL, 0);
10813   %}
10814 
10815   ins_pipe(ialu_reg);
10816 %}
10817 instruct regI_not_reg(iRegINoSp dst,
10818                          iRegIorL2I src1, immI_M1 m1,
10819                          rFlagsReg cr) %{
10820   match(Set dst (XorI src1 m1));
10821   ins_cost(INSN_COST);
10822   format %{ "eonw  $dst, $src1, zr" %}
10823 
10824   ins_encode %{
10825     __ eonw(as_Register($dst$$reg),
10826               as_Register($src1$$reg),
10827               zr,
10828               Assembler::LSL, 0);
10829   %}
10830 
10831   ins_pipe(ialu_reg);
10832 %}
10833 
10834 instruct AndI_reg_not_reg(iRegINoSp dst,
10835                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10836                          rFlagsReg cr) %{
10837   match(Set dst (AndI src1 (XorI src2 m1)));
10838   ins_cost(INSN_COST);
10839   format %{ "bicw  $dst, $src1, $src2" %}
10840 
10841   ins_encode %{
10842     __ bicw(as_Register($dst$$reg),
10843               as_Register($src1$$reg),
10844               as_Register($src2$$reg),
10845               Assembler::LSL, 0);
10846   %}
10847 
10848   ins_pipe(ialu_reg_reg);
10849 %}
10850 
10851 instruct AndL_reg_not_reg(iRegLNoSp dst,
10852                          iRegL src1, iRegL src2, immL_M1 m1,
10853                          rFlagsReg cr) %{
10854   match(Set dst (AndL src1 (XorL src2 m1)));
10855   ins_cost(INSN_COST);
10856   format %{ "bic  $dst, $src1, $src2" %}
10857 
10858   ins_encode %{
10859     __ bic(as_Register($dst$$reg),
10860               as_Register($src1$$reg),
10861               as_Register($src2$$reg),
10862               Assembler::LSL, 0);
10863   %}
10864 
10865   ins_pipe(ialu_reg_reg);
10866 %}
10867 
10868 instruct OrI_reg_not_reg(iRegINoSp dst,
10869                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10870                          rFlagsReg cr) %{
10871   match(Set dst (OrI src1 (XorI src2 m1)));
10872   ins_cost(INSN_COST);
10873   format %{ "ornw  $dst, $src1, $src2" %}
10874 
10875   ins_encode %{
10876     __ ornw(as_Register($dst$$reg),
10877               as_Register($src1$$reg),
10878               as_Register($src2$$reg),
10879               Assembler::LSL, 0);
10880   %}
10881 
10882   ins_pipe(ialu_reg_reg);
10883 %}
10884 
10885 instruct OrL_reg_not_reg(iRegLNoSp dst,
10886                          iRegL src1, iRegL src2, immL_M1 m1,
10887                          rFlagsReg cr) %{
10888   match(Set dst (OrL src1 (XorL src2 m1)));
10889   ins_cost(INSN_COST);
10890   format %{ "orn  $dst, $src1, $src2" %}
10891 
10892   ins_encode %{
10893     __ orn(as_Register($dst$$reg),
10894               as_Register($src1$$reg),
10895               as_Register($src2$$reg),
10896               Assembler::LSL, 0);
10897   %}
10898 
10899   ins_pipe(ialu_reg_reg);
10900 %}
10901 
10902 instruct XorI_reg_not_reg(iRegINoSp dst,
10903                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10904                          rFlagsReg cr) %{
10905   match(Set dst (XorI m1 (XorI src2 src1)));
10906   ins_cost(INSN_COST);
10907   format %{ "eonw  $dst, $src1, $src2" %}
10908 
10909   ins_encode %{
10910     __ eonw(as_Register($dst$$reg),
10911               as_Register($src1$$reg),
10912               as_Register($src2$$reg),
10913               Assembler::LSL, 0);
10914   %}
10915 
10916   ins_pipe(ialu_reg_reg);
10917 %}
10918 
10919 instruct XorL_reg_not_reg(iRegLNoSp dst,
10920                          iRegL src1, iRegL src2, immL_M1 m1,
10921                          rFlagsReg cr) %{
10922   match(Set dst (XorL m1 (XorL src2 src1)));
10923   ins_cost(INSN_COST);
10924   format %{ "eon  $dst, $src1, $src2" %}
10925 
10926   ins_encode %{
10927     __ eon(as_Register($dst$$reg),
10928               as_Register($src1$$reg),
10929               as_Register($src2$$reg),
10930               Assembler::LSL, 0);
10931   %}
10932 
10933   ins_pipe(ialu_reg_reg);
10934 %}
10935 
10936 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10937                          iRegIorL2I src1, iRegIorL2I src2,
10938                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10939   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10940   ins_cost(1.9 * INSN_COST);
10941   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10942 
10943   ins_encode %{
10944     __ bicw(as_Register($dst$$reg),
10945               as_Register($src1$$reg),
10946               as_Register($src2$$reg),
10947               Assembler::LSR,
10948               $src3$$constant & 0x1f);
10949   %}
10950 
10951   ins_pipe(ialu_reg_reg_shift);
10952 %}
10953 
10954 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10955                          iRegL src1, iRegL src2,
10956                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10957   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10958   ins_cost(1.9 * INSN_COST);
10959   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10960 
10961   ins_encode %{
10962     __ bic(as_Register($dst$$reg),
10963               as_Register($src1$$reg),
10964               as_Register($src2$$reg),
10965               Assembler::LSR,
10966               $src3$$constant & 0x3f);
10967   %}
10968 
10969   ins_pipe(ialu_reg_reg_shift);
10970 %}
10971 
10972 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10973                          iRegIorL2I src1, iRegIorL2I src2,
10974                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10975   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10976   ins_cost(1.9 * INSN_COST);
10977   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10978 
10979   ins_encode %{
10980     __ bicw(as_Register($dst$$reg),
10981               as_Register($src1$$reg),
10982               as_Register($src2$$reg),
10983               Assembler::ASR,
10984               $src3$$constant & 0x1f);
10985   %}
10986 
10987   ins_pipe(ialu_reg_reg_shift);
10988 %}
10989 
10990 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10991                          iRegL src1, iRegL src2,
10992                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10993   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10994   ins_cost(1.9 * INSN_COST);
10995   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10996 
10997   ins_encode %{
10998     __ bic(as_Register($dst$$reg),
10999               as_Register($src1$$reg),
11000               as_Register($src2$$reg),
11001               Assembler::ASR,
11002               $src3$$constant & 0x3f);
11003   %}
11004 
11005   ins_pipe(ialu_reg_reg_shift);
11006 %}
11007 
11008 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11009                          iRegIorL2I src1, iRegIorL2I src2,
11010                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11011   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11012   ins_cost(1.9 * INSN_COST);
11013   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11014 
11015   ins_encode %{
11016     __ bicw(as_Register($dst$$reg),
11017               as_Register($src1$$reg),
11018               as_Register($src2$$reg),
11019               Assembler::LSL,
11020               $src3$$constant & 0x1f);
11021   %}
11022 
11023   ins_pipe(ialu_reg_reg_shift);
11024 %}
11025 
11026 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11027                          iRegL src1, iRegL src2,
11028                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11029   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11030   ins_cost(1.9 * INSN_COST);
11031   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11032 
11033   ins_encode %{
11034     __ bic(as_Register($dst$$reg),
11035               as_Register($src1$$reg),
11036               as_Register($src2$$reg),
11037               Assembler::LSL,
11038               $src3$$constant & 0x3f);
11039   %}
11040 
11041   ins_pipe(ialu_reg_reg_shift);
11042 %}
11043 
11044 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11045                          iRegIorL2I src1, iRegIorL2I src2,
11046                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11047   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11050 
11051   ins_encode %{
11052     __ eonw(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::LSR,
11056               $src3$$constant & 0x1f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11063                          iRegL src1, iRegL src2,
11064                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11065   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11066   ins_cost(1.9 * INSN_COST);
11067   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11068 
11069   ins_encode %{
11070     __ eon(as_Register($dst$$reg),
11071               as_Register($src1$$reg),
11072               as_Register($src2$$reg),
11073               Assembler::LSR,
11074               $src3$$constant & 0x3f);
11075   %}
11076 
11077   ins_pipe(ialu_reg_reg_shift);
11078 %}
11079 
11080 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11081                          iRegIorL2I src1, iRegIorL2I src2,
11082                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11083   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11084   ins_cost(1.9 * INSN_COST);
11085   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11086 
11087   ins_encode %{
11088     __ eonw(as_Register($dst$$reg),
11089               as_Register($src1$$reg),
11090               as_Register($src2$$reg),
11091               Assembler::ASR,
11092               $src3$$constant & 0x1f);
11093   %}
11094 
11095   ins_pipe(ialu_reg_reg_shift);
11096 %}
11097 
11098 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11099                          iRegL src1, iRegL src2,
11100                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11101   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11102   ins_cost(1.9 * INSN_COST);
11103   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11104 
11105   ins_encode %{
11106     __ eon(as_Register($dst$$reg),
11107               as_Register($src1$$reg),
11108               as_Register($src2$$reg),
11109               Assembler::ASR,
11110               $src3$$constant & 0x3f);
11111   %}
11112 
11113   ins_pipe(ialu_reg_reg_shift);
11114 %}
11115 
11116 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11117                          iRegIorL2I src1, iRegIorL2I src2,
11118                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11119   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11120   ins_cost(1.9 * INSN_COST);
11121   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11122 
11123   ins_encode %{
11124     __ eonw(as_Register($dst$$reg),
11125               as_Register($src1$$reg),
11126               as_Register($src2$$reg),
11127               Assembler::LSL,
11128               $src3$$constant & 0x1f);
11129   %}
11130 
11131   ins_pipe(ialu_reg_reg_shift);
11132 %}
11133 
11134 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11135                          iRegL src1, iRegL src2,
11136                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11137   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11138   ins_cost(1.9 * INSN_COST);
11139   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11140 
11141   ins_encode %{
11142     __ eon(as_Register($dst$$reg),
11143               as_Register($src1$$reg),
11144               as_Register($src2$$reg),
11145               Assembler::LSL,
11146               $src3$$constant & 0x3f);
11147   %}
11148 
11149   ins_pipe(ialu_reg_reg_shift);
11150 %}
11151 
11152 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11153                          iRegIorL2I src1, iRegIorL2I src2,
11154                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11155   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11156   ins_cost(1.9 * INSN_COST);
11157   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11158 
11159   ins_encode %{
11160     __ ornw(as_Register($dst$$reg),
11161               as_Register($src1$$reg),
11162               as_Register($src2$$reg),
11163               Assembler::LSR,
11164               $src3$$constant & 0x1f);
11165   %}
11166 
11167   ins_pipe(ialu_reg_reg_shift);
11168 %}
11169 
11170 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11171                          iRegL src1, iRegL src2,
11172                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11173   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11174   ins_cost(1.9 * INSN_COST);
11175   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11176 
11177   ins_encode %{
11178     __ orn(as_Register($dst$$reg),
11179               as_Register($src1$$reg),
11180               as_Register($src2$$reg),
11181               Assembler::LSR,
11182               $src3$$constant & 0x3f);
11183   %}
11184 
11185   ins_pipe(ialu_reg_reg_shift);
11186 %}
11187 
11188 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11189                          iRegIorL2I src1, iRegIorL2I src2,
11190                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11191   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11192   ins_cost(1.9 * INSN_COST);
11193   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11194 
11195   ins_encode %{
11196     __ ornw(as_Register($dst$$reg),
11197               as_Register($src1$$reg),
11198               as_Register($src2$$reg),
11199               Assembler::ASR,
11200               $src3$$constant & 0x1f);
11201   %}
11202 
11203   ins_pipe(ialu_reg_reg_shift);
11204 %}
11205 
11206 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11207                          iRegL src1, iRegL src2,
11208                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11209   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11210   ins_cost(1.9 * INSN_COST);
11211   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11212 
11213   ins_encode %{
11214     __ orn(as_Register($dst$$reg),
11215               as_Register($src1$$reg),
11216               as_Register($src2$$reg),
11217               Assembler::ASR,
11218               $src3$$constant & 0x3f);
11219   %}
11220 
11221   ins_pipe(ialu_reg_reg_shift);
11222 %}
11223 
11224 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11225                          iRegIorL2I src1, iRegIorL2I src2,
11226                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11227   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11228   ins_cost(1.9 * INSN_COST);
11229   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11230 
11231   ins_encode %{
11232     __ ornw(as_Register($dst$$reg),
11233               as_Register($src1$$reg),
11234               as_Register($src2$$reg),
11235               Assembler::LSL,
11236               $src3$$constant & 0x1f);
11237   %}
11238 
11239   ins_pipe(ialu_reg_reg_shift);
11240 %}
11241 
11242 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11243                          iRegL src1, iRegL src2,
11244                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11245   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11246   ins_cost(1.9 * INSN_COST);
11247   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11248 
11249   ins_encode %{
11250     __ orn(as_Register($dst$$reg),
11251               as_Register($src1$$reg),
11252               as_Register($src2$$reg),
11253               Assembler::LSL,
11254               $src3$$constant & 0x3f);
11255   %}
11256 
11257   ins_pipe(ialu_reg_reg_shift);
11258 %}
11259 
11260 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11261                          iRegIorL2I src1, iRegIorL2I src2,
11262                          immI src3, rFlagsReg cr) %{
11263   match(Set dst (AndI src1 (URShiftI src2 src3)));
11264 
11265   ins_cost(1.9 * INSN_COST);
11266   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11267 
11268   ins_encode %{
11269     __ andw(as_Register($dst$$reg),
11270               as_Register($src1$$reg),
11271               as_Register($src2$$reg),
11272               Assembler::LSR,
11273               $src3$$constant & 0x1f);
11274   %}
11275 
11276   ins_pipe(ialu_reg_reg_shift);
11277 %}
11278 
11279 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11280                          iRegL src1, iRegL src2,
11281                          immI src3, rFlagsReg cr) %{
11282   match(Set dst (AndL src1 (URShiftL src2 src3)));
11283 
11284   ins_cost(1.9 * INSN_COST);
11285   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11286 
11287   ins_encode %{
11288     __ andr(as_Register($dst$$reg),
11289               as_Register($src1$$reg),
11290               as_Register($src2$$reg),
11291               Assembler::LSR,
11292               $src3$$constant & 0x3f);
11293   %}
11294 
11295   ins_pipe(ialu_reg_reg_shift);
11296 %}
11297 
11298 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11299                          iRegIorL2I src1, iRegIorL2I src2,
11300                          immI src3, rFlagsReg cr) %{
11301   match(Set dst (AndI src1 (RShiftI src2 src3)));
11302 
11303   ins_cost(1.9 * INSN_COST);
11304   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11305 
11306   ins_encode %{
11307     __ andw(as_Register($dst$$reg),
11308               as_Register($src1$$reg),
11309               as_Register($src2$$reg),
11310               Assembler::ASR,
11311               $src3$$constant & 0x1f);
11312   %}
11313 
11314   ins_pipe(ialu_reg_reg_shift);
11315 %}
11316 
11317 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11318                          iRegL src1, iRegL src2,
11319                          immI src3, rFlagsReg cr) %{
11320   match(Set dst (AndL src1 (RShiftL src2 src3)));
11321 
11322   ins_cost(1.9 * INSN_COST);
11323   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11324 
11325   ins_encode %{
11326     __ andr(as_Register($dst$$reg),
11327               as_Register($src1$$reg),
11328               as_Register($src2$$reg),
11329               Assembler::ASR,
11330               $src3$$constant & 0x3f);
11331   %}
11332 
11333   ins_pipe(ialu_reg_reg_shift);
11334 %}
11335 
11336 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11337                          iRegIorL2I src1, iRegIorL2I src2,
11338                          immI src3, rFlagsReg cr) %{
11339   match(Set dst (AndI src1 (LShiftI src2 src3)));
11340 
11341   ins_cost(1.9 * INSN_COST);
11342   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11343 
11344   ins_encode %{
11345     __ andw(as_Register($dst$$reg),
11346               as_Register($src1$$reg),
11347               as_Register($src2$$reg),
11348               Assembler::LSL,
11349               $src3$$constant & 0x1f);
11350   %}
11351 
11352   ins_pipe(ialu_reg_reg_shift);
11353 %}
11354 
11355 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11356                          iRegL src1, iRegL src2,
11357                          immI src3, rFlagsReg cr) %{
11358   match(Set dst (AndL src1 (LShiftL src2 src3)));
11359 
11360   ins_cost(1.9 * INSN_COST);
11361   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11362 
11363   ins_encode %{
11364     __ andr(as_Register($dst$$reg),
11365               as_Register($src1$$reg),
11366               as_Register($src2$$reg),
11367               Assembler::LSL,
11368               $src3$$constant & 0x3f);
11369   %}
11370 
11371   ins_pipe(ialu_reg_reg_shift);
11372 %}
11373 
11374 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11375                          iRegIorL2I src1, iRegIorL2I src2,
11376                          immI src3, rFlagsReg cr) %{
11377   match(Set dst (XorI src1 (URShiftI src2 src3)));
11378 
11379   ins_cost(1.9 * INSN_COST);
11380   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11381 
11382   ins_encode %{
11383     __ eorw(as_Register($dst$$reg),
11384               as_Register($src1$$reg),
11385               as_Register($src2$$reg),
11386               Assembler::LSR,
11387               $src3$$constant & 0x1f);
11388   %}
11389 
11390   ins_pipe(ialu_reg_reg_shift);
11391 %}
11392 
11393 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11394                          iRegL src1, iRegL src2,
11395                          immI src3, rFlagsReg cr) %{
11396   match(Set dst (XorL src1 (URShiftL src2 src3)));
11397 
11398   ins_cost(1.9 * INSN_COST);
11399   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11400 
11401   ins_encode %{
11402     __ eor(as_Register($dst$$reg),
11403               as_Register($src1$$reg),
11404               as_Register($src2$$reg),
11405               Assembler::LSR,
11406               $src3$$constant & 0x3f);
11407   %}
11408 
11409   ins_pipe(ialu_reg_reg_shift);
11410 %}
11411 
11412 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11413                          iRegIorL2I src1, iRegIorL2I src2,
11414                          immI src3, rFlagsReg cr) %{
11415   match(Set dst (XorI src1 (RShiftI src2 src3)));
11416 
11417   ins_cost(1.9 * INSN_COST);
11418   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11419 
11420   ins_encode %{
11421     __ eorw(as_Register($dst$$reg),
11422               as_Register($src1$$reg),
11423               as_Register($src2$$reg),
11424               Assembler::ASR,
11425               $src3$$constant & 0x1f);
11426   %}
11427 
11428   ins_pipe(ialu_reg_reg_shift);
11429 %}
11430 
11431 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11432                          iRegL src1, iRegL src2,
11433                          immI src3, rFlagsReg cr) %{
11434   match(Set dst (XorL src1 (RShiftL src2 src3)));
11435 
11436   ins_cost(1.9 * INSN_COST);
11437   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11438 
11439   ins_encode %{
11440     __ eor(as_Register($dst$$reg),
11441               as_Register($src1$$reg),
11442               as_Register($src2$$reg),
11443               Assembler::ASR,
11444               $src3$$constant & 0x3f);
11445   %}
11446 
11447   ins_pipe(ialu_reg_reg_shift);
11448 %}
11449 
11450 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11451                          iRegIorL2I src1, iRegIorL2I src2,
11452                          immI src3, rFlagsReg cr) %{
11453   match(Set dst (XorI src1 (LShiftI src2 src3)));
11454 
11455   ins_cost(1.9 * INSN_COST);
11456   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11457 
11458   ins_encode %{
11459     __ eorw(as_Register($dst$$reg),
11460               as_Register($src1$$reg),
11461               as_Register($src2$$reg),
11462               Assembler::LSL,
11463               $src3$$constant & 0x1f);
11464   %}
11465 
11466   ins_pipe(ialu_reg_reg_shift);
11467 %}
11468 
11469 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11470                          iRegL src1, iRegL src2,
11471                          immI src3, rFlagsReg cr) %{
11472   match(Set dst (XorL src1 (LShiftL src2 src3)));
11473 
11474   ins_cost(1.9 * INSN_COST);
11475   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11476 
11477   ins_encode %{
11478     __ eor(as_Register($dst$$reg),
11479               as_Register($src1$$reg),
11480               as_Register($src2$$reg),
11481               Assembler::LSL,
11482               $src3$$constant & 0x3f);
11483   %}
11484 
11485   ins_pipe(ialu_reg_reg_shift);
11486 %}
11487 
11488 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11489                          iRegIorL2I src1, iRegIorL2I src2,
11490                          immI src3, rFlagsReg cr) %{
11491   match(Set dst (OrI src1 (URShiftI src2 src3)));
11492 
11493   ins_cost(1.9 * INSN_COST);
11494   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11495 
11496   ins_encode %{
11497     __ orrw(as_Register($dst$$reg),
11498               as_Register($src1$$reg),
11499               as_Register($src2$$reg),
11500               Assembler::LSR,
11501               $src3$$constant & 0x1f);
11502   %}
11503 
11504   ins_pipe(ialu_reg_reg_shift);
11505 %}
11506 
11507 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11508                          iRegL src1, iRegL src2,
11509                          immI src3, rFlagsReg cr) %{
11510   match(Set dst (OrL src1 (URShiftL src2 src3)));
11511 
11512   ins_cost(1.9 * INSN_COST);
11513   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11514 
11515   ins_encode %{
11516     __ orr(as_Register($dst$$reg),
11517               as_Register($src1$$reg),
11518               as_Register($src2$$reg),
11519               Assembler::LSR,
11520               $src3$$constant & 0x3f);
11521   %}
11522 
11523   ins_pipe(ialu_reg_reg_shift);
11524 %}
11525 
11526 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11527                          iRegIorL2I src1, iRegIorL2I src2,
11528                          immI src3, rFlagsReg cr) %{
11529   match(Set dst (OrI src1 (RShiftI src2 src3)));
11530 
11531   ins_cost(1.9 * INSN_COST);
11532   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11533 
11534   ins_encode %{
11535     __ orrw(as_Register($dst$$reg),
11536               as_Register($src1$$reg),
11537               as_Register($src2$$reg),
11538               Assembler::ASR,
11539               $src3$$constant & 0x1f);
11540   %}
11541 
11542   ins_pipe(ialu_reg_reg_shift);
11543 %}
11544 
11545 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11546                          iRegL src1, iRegL src2,
11547                          immI src3, rFlagsReg cr) %{
11548   match(Set dst (OrL src1 (RShiftL src2 src3)));
11549 
11550   ins_cost(1.9 * INSN_COST);
11551   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11552 
11553   ins_encode %{
11554     __ orr(as_Register($dst$$reg),
11555               as_Register($src1$$reg),
11556               as_Register($src2$$reg),
11557               Assembler::ASR,
11558               $src3$$constant & 0x3f);
11559   %}
11560 
11561   ins_pipe(ialu_reg_reg_shift);
11562 %}
11563 
11564 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11565                          iRegIorL2I src1, iRegIorL2I src2,
11566                          immI src3, rFlagsReg cr) %{
11567   match(Set dst (OrI src1 (LShiftI src2 src3)));
11568 
11569   ins_cost(1.9 * INSN_COST);
11570   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11571 
11572   ins_encode %{
11573     __ orrw(as_Register($dst$$reg),
11574               as_Register($src1$$reg),
11575               as_Register($src2$$reg),
11576               Assembler::LSL,
11577               $src3$$constant & 0x1f);
11578   %}
11579 
11580   ins_pipe(ialu_reg_reg_shift);
11581 %}
11582 
11583 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11584                          iRegL src1, iRegL src2,
11585                          immI src3, rFlagsReg cr) %{
11586   match(Set dst (OrL src1 (LShiftL src2 src3)));
11587 
11588   ins_cost(1.9 * INSN_COST);
11589   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11590 
11591   ins_encode %{
11592     __ orr(as_Register($dst$$reg),
11593               as_Register($src1$$reg),
11594               as_Register($src2$$reg),
11595               Assembler::LSL,
11596               $src3$$constant & 0x3f);
11597   %}
11598 
11599   ins_pipe(ialu_reg_reg_shift);
11600 %}
11601 
11602 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11603                          iRegIorL2I src1, iRegIorL2I src2,
11604                          immI src3, rFlagsReg cr) %{
11605   match(Set dst (AddI src1 (URShiftI src2 src3)));
11606 
11607   ins_cost(1.9 * INSN_COST);
11608   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11609 
11610   ins_encode %{
11611     __ addw(as_Register($dst$$reg),
11612               as_Register($src1$$reg),
11613               as_Register($src2$$reg),
11614               Assembler::LSR,
11615               $src3$$constant & 0x1f);
11616   %}
11617 
11618   ins_pipe(ialu_reg_reg_shift);
11619 %}
11620 
11621 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11622                          iRegL src1, iRegL src2,
11623                          immI src3, rFlagsReg cr) %{
11624   match(Set dst (AddL src1 (URShiftL src2 src3)));
11625 
11626   ins_cost(1.9 * INSN_COST);
11627   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11628 
11629   ins_encode %{
11630     __ add(as_Register($dst$$reg),
11631               as_Register($src1$$reg),
11632               as_Register($src2$$reg),
11633               Assembler::LSR,
11634               $src3$$constant & 0x3f);
11635   %}
11636 
11637   ins_pipe(ialu_reg_reg_shift);
11638 %}
11639 
11640 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11641                          iRegIorL2I src1, iRegIorL2I src2,
11642                          immI src3, rFlagsReg cr) %{
11643   match(Set dst (AddI src1 (RShiftI src2 src3)));
11644 
11645   ins_cost(1.9 * INSN_COST);
11646   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11647 
11648   ins_encode %{
11649     __ addw(as_Register($dst$$reg),
11650               as_Register($src1$$reg),
11651               as_Register($src2$$reg),
11652               Assembler::ASR,
11653               $src3$$constant & 0x1f);
11654   %}
11655 
11656   ins_pipe(ialu_reg_reg_shift);
11657 %}
11658 
11659 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11660                          iRegL src1, iRegL src2,
11661                          immI src3, rFlagsReg cr) %{
11662   match(Set dst (AddL src1 (RShiftL src2 src3)));
11663 
11664   ins_cost(1.9 * INSN_COST);
11665   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11666 
11667   ins_encode %{
11668     __ add(as_Register($dst$$reg),
11669               as_Register($src1$$reg),
11670               as_Register($src2$$reg),
11671               Assembler::ASR,
11672               $src3$$constant & 0x3f);
11673   %}
11674 
11675   ins_pipe(ialu_reg_reg_shift);
11676 %}
11677 
11678 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11679                          iRegIorL2I src1, iRegIorL2I src2,
11680                          immI src3, rFlagsReg cr) %{
11681   match(Set dst (AddI src1 (LShiftI src2 src3)));
11682 
11683   ins_cost(1.9 * INSN_COST);
11684   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11685 
11686   ins_encode %{
11687     __ addw(as_Register($dst$$reg),
11688               as_Register($src1$$reg),
11689               as_Register($src2$$reg),
11690               Assembler::LSL,
11691               $src3$$constant & 0x1f);
11692   %}
11693 
11694   ins_pipe(ialu_reg_reg_shift);
11695 %}
11696 
11697 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11698                          iRegL src1, iRegL src2,
11699                          immI src3, rFlagsReg cr) %{
11700   match(Set dst (AddL src1 (LShiftL src2 src3)));
11701 
11702   ins_cost(1.9 * INSN_COST);
11703   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11704 
11705   ins_encode %{
11706     __ add(as_Register($dst$$reg),
11707               as_Register($src1$$reg),
11708               as_Register($src2$$reg),
11709               Assembler::LSL,
11710               $src3$$constant & 0x3f);
11711   %}
11712 
11713   ins_pipe(ialu_reg_reg_shift);
11714 %}
11715 
11716 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11717                          iRegIorL2I src1, iRegIorL2I src2,
11718                          immI src3, rFlagsReg cr) %{
11719   match(Set dst (SubI src1 (URShiftI src2 src3)));
11720 
11721   ins_cost(1.9 * INSN_COST);
11722   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11723 
11724   ins_encode %{
11725     __ subw(as_Register($dst$$reg),
11726               as_Register($src1$$reg),
11727               as_Register($src2$$reg),
11728               Assembler::LSR,
11729               $src3$$constant & 0x1f);
11730   %}
11731 
11732   ins_pipe(ialu_reg_reg_shift);
11733 %}
11734 
11735 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11736                          iRegL src1, iRegL src2,
11737                          immI src3, rFlagsReg cr) %{
11738   match(Set dst (SubL src1 (URShiftL src2 src3)));
11739 
11740   ins_cost(1.9 * INSN_COST);
11741   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11742 
11743   ins_encode %{
11744     __ sub(as_Register($dst$$reg),
11745               as_Register($src1$$reg),
11746               as_Register($src2$$reg),
11747               Assembler::LSR,
11748               $src3$$constant & 0x3f);
11749   %}
11750 
11751   ins_pipe(ialu_reg_reg_shift);
11752 %}
11753 
11754 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11755                          iRegIorL2I src1, iRegIorL2I src2,
11756                          immI src3, rFlagsReg cr) %{
11757   match(Set dst (SubI src1 (RShiftI src2 src3)));
11758 
11759   ins_cost(1.9 * INSN_COST);
11760   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11761 
11762   ins_encode %{
11763     __ subw(as_Register($dst$$reg),
11764               as_Register($src1$$reg),
11765               as_Register($src2$$reg),
11766               Assembler::ASR,
11767               $src3$$constant & 0x1f);
11768   %}
11769 
11770   ins_pipe(ialu_reg_reg_shift);
11771 %}
11772 
11773 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11774                          iRegL src1, iRegL src2,
11775                          immI src3, rFlagsReg cr) %{
11776   match(Set dst (SubL src1 (RShiftL src2 src3)));
11777 
11778   ins_cost(1.9 * INSN_COST);
11779   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11780 
11781   ins_encode %{
11782     __ sub(as_Register($dst$$reg),
11783               as_Register($src1$$reg),
11784               as_Register($src2$$reg),
11785               Assembler::ASR,
11786               $src3$$constant & 0x3f);
11787   %}
11788 
11789   ins_pipe(ialu_reg_reg_shift);
11790 %}
11791 
11792 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11793                          iRegIorL2I src1, iRegIorL2I src2,
11794                          immI src3, rFlagsReg cr) %{
11795   match(Set dst (SubI src1 (LShiftI src2 src3)));
11796 
11797   ins_cost(1.9 * INSN_COST);
11798   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11799 
11800   ins_encode %{
11801     __ subw(as_Register($dst$$reg),
11802               as_Register($src1$$reg),
11803               as_Register($src2$$reg),
11804               Assembler::LSL,
11805               $src3$$constant & 0x1f);
11806   %}
11807 
11808   ins_pipe(ialu_reg_reg_shift);
11809 %}
11810 
11811 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11812                          iRegL src1, iRegL src2,
11813                          immI src3, rFlagsReg cr) %{
11814   match(Set dst (SubL src1 (LShiftL src2 src3)));
11815 
11816   ins_cost(1.9 * INSN_COST);
11817   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11818 
11819   ins_encode %{
11820     __ sub(as_Register($dst$$reg),
11821               as_Register($src1$$reg),
11822               as_Register($src2$$reg),
11823               Assembler::LSL,
11824               $src3$$constant & 0x3f);
11825   %}
11826 
11827   ins_pipe(ialu_reg_reg_shift);
11828 %}
11829 
11830 
11831 
11832 // Shift Left followed by Shift Right.
11833 // This idiom is used by the compiler for the i2b bytecode etc.
11834 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11835 %{
11836   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11837   ins_cost(INSN_COST * 2);
11838   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11839   ins_encode %{
11840     int lshift = $lshift_count$$constant & 63;
11841     int rshift = $rshift_count$$constant & 63;
11842     int s = 63 - lshift;
11843     int r = (rshift - lshift) & 63;
11844     __ sbfm(as_Register($dst$$reg),
11845             as_Register($src$$reg),
11846             r, s);
11847   %}
11848 
11849   ins_pipe(ialu_reg_shift);
11850 %}
11851 
11852 // Shift Left followed by Shift Right.
11853 // This idiom is used by the compiler for the i2b bytecode etc.
11854 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11855 %{
11856   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11857   ins_cost(INSN_COST * 2);
11858   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11859   ins_encode %{
11860     int lshift = $lshift_count$$constant & 31;
11861     int rshift = $rshift_count$$constant & 31;
11862     int s = 31 - lshift;
11863     int r = (rshift - lshift) & 31;
11864     __ sbfmw(as_Register($dst$$reg),
11865             as_Register($src$$reg),
11866             r, s);
11867   %}
11868 
11869   ins_pipe(ialu_reg_shift);
11870 %}
11871 
11872 // Shift Left followed by Shift Right.
11873 // This idiom is used by the compiler for the i2b bytecode etc.
11874 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11875 %{
11876   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11877   ins_cost(INSN_COST * 2);
11878   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11879   ins_encode %{
11880     int lshift = $lshift_count$$constant & 63;
11881     int rshift = $rshift_count$$constant & 63;
11882     int s = 63 - lshift;
11883     int r = (rshift - lshift) & 63;
11884     __ ubfm(as_Register($dst$$reg),
11885             as_Register($src$$reg),
11886             r, s);
11887   %}
11888 
11889   ins_pipe(ialu_reg_shift);
11890 %}
11891 
11892 // Shift Left followed by Shift Right.
11893 // This idiom is used by the compiler for the i2b bytecode etc.
11894 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11895 %{
11896   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11897   ins_cost(INSN_COST * 2);
11898   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11899   ins_encode %{
11900     int lshift = $lshift_count$$constant & 31;
11901     int rshift = $rshift_count$$constant & 31;
11902     int s = 31 - lshift;
11903     int r = (rshift - lshift) & 31;
11904     __ ubfmw(as_Register($dst$$reg),
11905             as_Register($src$$reg),
11906             r, s);
11907   %}
11908 
11909   ins_pipe(ialu_reg_shift);
11910 %}
11911 // Bitfield extract with shift & mask
11912 
11913 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11914 %{
11915   match(Set dst (AndI (URShiftI src rshift) mask));
11916   // Make sure we are not going to exceed what ubfxw can do.
11917   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11918 
11919   ins_cost(INSN_COST);
11920   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11921   ins_encode %{
11922     int rshift = $rshift$$constant & 31;
11923     int64_t mask = $mask$$constant;
11924     int width = exact_log2(mask+1);
11925     __ ubfxw(as_Register($dst$$reg),
11926             as_Register($src$$reg), rshift, width);
11927   %}
11928   ins_pipe(ialu_reg_shift);
11929 %}
11930 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11931 %{
11932   match(Set dst (AndL (URShiftL src rshift) mask));
11933   // Make sure we are not going to exceed what ubfx can do.
11934   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11935 
11936   ins_cost(INSN_COST);
11937   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11938   ins_encode %{
11939     int rshift = $rshift$$constant & 63;
11940     int64_t mask = $mask$$constant;
11941     int width = exact_log2_long(mask+1);
11942     __ ubfx(as_Register($dst$$reg),
11943             as_Register($src$$reg), rshift, width);
11944   %}
11945   ins_pipe(ialu_reg_shift);
11946 %}
11947 
11948 // We can use ubfx when extending an And with a mask when we know mask
11949 // is positive.  We know that because immI_bitmask guarantees it.
11950 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11951 %{
11952   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11953   // Make sure we are not going to exceed what ubfxw can do.
11954   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11955 
11956   ins_cost(INSN_COST * 2);
11957   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11958   ins_encode %{
11959     int rshift = $rshift$$constant & 31;
11960     int64_t mask = $mask$$constant;
11961     int width = exact_log2(mask+1);
11962     __ ubfx(as_Register($dst$$reg),
11963             as_Register($src$$reg), rshift, width);
11964   %}
11965   ins_pipe(ialu_reg_shift);
11966 %}
11967 
11968 // We can use ubfiz when masking by a positive number and then left shifting the result.
11969 // We know that the mask is positive because immI_bitmask guarantees it.
11970 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11971 %{
11972   match(Set dst (LShiftI (AndI src mask) lshift));
11973   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11974 
11975   ins_cost(INSN_COST);
11976   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11977   ins_encode %{
11978     int lshift = $lshift$$constant & 31;
11979     int64_t mask = $mask$$constant;
11980     int width = exact_log2(mask+1);
11981     __ ubfizw(as_Register($dst$$reg),
11982           as_Register($src$$reg), lshift, width);
11983   %}
11984   ins_pipe(ialu_reg_shift);
11985 %}
11986 // We can use ubfiz when masking by a positive number and then left shifting the result.
11987 // We know that the mask is positive because immL_bitmask guarantees it.
11988 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11989 %{
11990   match(Set dst (LShiftL (AndL src mask) lshift));
11991   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11992 
11993   ins_cost(INSN_COST);
11994   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11995   ins_encode %{
11996     int lshift = $lshift$$constant & 63;
11997     int64_t mask = $mask$$constant;
11998     int width = exact_log2_long(mask+1);
11999     __ ubfiz(as_Register($dst$$reg),
12000           as_Register($src$$reg), lshift, width);
12001   %}
12002   ins_pipe(ialu_reg_shift);
12003 %}
12004 
12005 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12006 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12007 %{
12008   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12009   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12010 
12011   ins_cost(INSN_COST);
12012   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12013   ins_encode %{
12014     int lshift = $lshift$$constant & 63;
12015     int64_t mask = $mask$$constant;
12016     int width = exact_log2(mask+1);
12017     __ ubfiz(as_Register($dst$$reg),
12018              as_Register($src$$reg), lshift, width);
12019   %}
12020   ins_pipe(ialu_reg_shift);
12021 %}
12022 
12023 // Rotations
12024 
12025 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12026 %{
12027   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12028   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12029 
12030   ins_cost(INSN_COST);
12031   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12032 
12033   ins_encode %{
12034     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12035             $rshift$$constant & 63);
12036   %}
12037   ins_pipe(ialu_reg_reg_extr);
12038 %}
12039 
12040 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12041 %{
12042   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12043   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12044 
12045   ins_cost(INSN_COST);
12046   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12047 
12048   ins_encode %{
12049     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12050             $rshift$$constant & 31);
12051   %}
12052   ins_pipe(ialu_reg_reg_extr);
12053 %}
12054 
12055 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12056 %{
12057   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12058   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12059 
12060   ins_cost(INSN_COST);
12061   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12062 
12063   ins_encode %{
12064     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12065             $rshift$$constant & 63);
12066   %}
12067   ins_pipe(ialu_reg_reg_extr);
12068 %}
12069 
12070 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12071 %{
12072   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12073   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12074 
12075   ins_cost(INSN_COST);
12076   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12077 
12078   ins_encode %{
12079     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12080             $rshift$$constant & 31);
12081   %}
12082   ins_pipe(ialu_reg_reg_extr);
12083 %}
12084 
12085 
12086 // rol expander
12087 
12088 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12089 %{
12090   effect(DEF dst, USE src, USE shift);
12091 
12092   format %{ "rol    $dst, $src, $shift" %}
12093   ins_cost(INSN_COST * 3);
12094   ins_encode %{
12095     __ subw(rscratch1, zr, as_Register($shift$$reg));
12096     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12097             rscratch1);
12098     %}
12099   ins_pipe(ialu_reg_reg_vshift);
12100 %}
12101 
12102 // rol expander
12103 
12104 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12105 %{
12106   effect(DEF dst, USE src, USE shift);
12107 
12108   format %{ "rol    $dst, $src, $shift" %}
12109   ins_cost(INSN_COST * 3);
12110   ins_encode %{
12111     __ subw(rscratch1, zr, as_Register($shift$$reg));
12112     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12113             rscratch1);
12114     %}
12115   ins_pipe(ialu_reg_reg_vshift);
12116 %}
12117 
12118 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12119 %{
12120   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12121 
12122   expand %{
12123     rolL_rReg(dst, src, shift, cr);
12124   %}
12125 %}
12126 
12127 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12128 %{
12129   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12130 
12131   expand %{
12132     rolL_rReg(dst, src, shift, cr);
12133   %}
12134 %}
12135 
12136 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12137 %{
12138   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12139 
12140   expand %{
12141     rolI_rReg(dst, src, shift, cr);
12142   %}
12143 %}
12144 
12145 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12146 %{
12147   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12148 
12149   expand %{
12150     rolI_rReg(dst, src, shift, cr);
12151   %}
12152 %}
12153 
12154 // ror expander
12155 
12156 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12157 %{
12158   effect(DEF dst, USE src, USE shift);
12159 
12160   format %{ "ror    $dst, $src, $shift" %}
12161   ins_cost(INSN_COST);
12162   ins_encode %{
12163     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12164             as_Register($shift$$reg));
12165     %}
12166   ins_pipe(ialu_reg_reg_vshift);
12167 %}
12168 
12169 // ror expander
12170 
12171 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12172 %{
12173   effect(DEF dst, USE src, USE shift);
12174 
12175   format %{ "ror    $dst, $src, $shift" %}
12176   ins_cost(INSN_COST);
12177   ins_encode %{
12178     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12179             as_Register($shift$$reg));
12180     %}
12181   ins_pipe(ialu_reg_reg_vshift);
12182 %}
12183 
12184 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12185 %{
12186   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12187 
12188   expand %{
12189     rorL_rReg(dst, src, shift, cr);
12190   %}
12191 %}
12192 
12193 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12194 %{
12195   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12196 
12197   expand %{
12198     rorL_rReg(dst, src, shift, cr);
12199   %}
12200 %}
12201 
12202 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12203 %{
12204   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12205 
12206   expand %{
12207     rorI_rReg(dst, src, shift, cr);
12208   %}
12209 %}
12210 
12211 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12212 %{
12213   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12214 
12215   expand %{
12216     rorI_rReg(dst, src, shift, cr);
12217   %}
12218 %}
12219 
12220 // Add/subtract (extended)
12221 
12222 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12223 %{
12224   match(Set dst (AddL src1 (ConvI2L src2)));
12225   ins_cost(INSN_COST);
12226   format %{ "add  $dst, $src1, $src2, sxtw" %}
12227 
12228    ins_encode %{
12229      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12230             as_Register($src2$$reg), ext::sxtw);
12231    %}
12232   ins_pipe(ialu_reg_reg);
12233 %};
12234 
12235 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12236 %{
12237   match(Set dst (SubL src1 (ConvI2L src2)));
12238   ins_cost(INSN_COST);
12239   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12240 
12241    ins_encode %{
12242      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12243             as_Register($src2$$reg), ext::sxtw);
12244    %}
12245   ins_pipe(ialu_reg_reg);
12246 %};
12247 
12248 
12249 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12250 %{
12251   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12252   ins_cost(INSN_COST);
12253   format %{ "add  $dst, $src1, $src2, sxth" %}
12254 
12255    ins_encode %{
12256      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12257             as_Register($src2$$reg), ext::sxth);
12258    %}
12259   ins_pipe(ialu_reg_reg);
12260 %}
12261 
12262 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12263 %{
12264   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12265   ins_cost(INSN_COST);
12266   format %{ "add  $dst, $src1, $src2, sxtb" %}
12267 
12268    ins_encode %{
12269      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12270             as_Register($src2$$reg), ext::sxtb);
12271    %}
12272   ins_pipe(ialu_reg_reg);
12273 %}
12274 
12275 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12276 %{
12277   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12278   ins_cost(INSN_COST);
12279   format %{ "add  $dst, $src1, $src2, uxtb" %}
12280 
12281    ins_encode %{
12282      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12283             as_Register($src2$$reg), ext::uxtb);
12284    %}
12285   ins_pipe(ialu_reg_reg);
12286 %}
12287 
12288 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12289 %{
12290   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12291   ins_cost(INSN_COST);
12292   format %{ "add  $dst, $src1, $src2, sxth" %}
12293 
12294    ins_encode %{
12295      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12296             as_Register($src2$$reg), ext::sxth);
12297    %}
12298   ins_pipe(ialu_reg_reg);
12299 %}
12300 
12301 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12302 %{
12303   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12304   ins_cost(INSN_COST);
12305   format %{ "add  $dst, $src1, $src2, sxtw" %}
12306 
12307    ins_encode %{
12308      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12309             as_Register($src2$$reg), ext::sxtw);
12310    %}
12311   ins_pipe(ialu_reg_reg);
12312 %}
12313 
12314 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12315 %{
12316   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12317   ins_cost(INSN_COST);
12318   format %{ "add  $dst, $src1, $src2, sxtb" %}
12319 
12320    ins_encode %{
12321      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12322             as_Register($src2$$reg), ext::sxtb);
12323    %}
12324   ins_pipe(ialu_reg_reg);
12325 %}
12326 
12327 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12328 %{
12329   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12330   ins_cost(INSN_COST);
12331   format %{ "add  $dst, $src1, $src2, uxtb" %}
12332 
12333    ins_encode %{
12334      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12335             as_Register($src2$$reg), ext::uxtb);
12336    %}
12337   ins_pipe(ialu_reg_reg);
12338 %}
12339 
12340 
12341 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12342 %{
12343   match(Set dst (AddI src1 (AndI src2 mask)));
12344   ins_cost(INSN_COST);
12345   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12346 
12347    ins_encode %{
12348      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12349             as_Register($src2$$reg), ext::uxtb);
12350    %}
12351   ins_pipe(ialu_reg_reg);
12352 %}
12353 
12354 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12355 %{
12356   match(Set dst (AddI src1 (AndI src2 mask)));
12357   ins_cost(INSN_COST);
12358   format %{ "addw  $dst, $src1, $src2, uxth" %}
12359 
12360    ins_encode %{
12361      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12362             as_Register($src2$$reg), ext::uxth);
12363    %}
12364   ins_pipe(ialu_reg_reg);
12365 %}
12366 
12367 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12368 %{
12369   match(Set dst (AddL src1 (AndL src2 mask)));
12370   ins_cost(INSN_COST);
12371   format %{ "add  $dst, $src1, $src2, uxtb" %}
12372 
12373    ins_encode %{
12374      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12375             as_Register($src2$$reg), ext::uxtb);
12376    %}
12377   ins_pipe(ialu_reg_reg);
12378 %}
12379 
12380 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12381 %{
12382   match(Set dst (AddL src1 (AndL src2 mask)));
12383   ins_cost(INSN_COST);
12384   format %{ "add  $dst, $src1, $src2, uxth" %}
12385 
12386    ins_encode %{
12387      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12388             as_Register($src2$$reg), ext::uxth);
12389    %}
12390   ins_pipe(ialu_reg_reg);
12391 %}
12392 
12393 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12394 %{
12395   match(Set dst (AddL src1 (AndL src2 mask)));
12396   ins_cost(INSN_COST);
12397   format %{ "add  $dst, $src1, $src2, uxtw" %}
12398 
12399    ins_encode %{
12400      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12401             as_Register($src2$$reg), ext::uxtw);
12402    %}
12403   ins_pipe(ialu_reg_reg);
12404 %}
12405 
12406 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12407 %{
12408   match(Set dst (SubI src1 (AndI src2 mask)));
12409   ins_cost(INSN_COST);
12410   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12411 
12412    ins_encode %{
12413      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12414             as_Register($src2$$reg), ext::uxtb);
12415    %}
12416   ins_pipe(ialu_reg_reg);
12417 %}
12418 
12419 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12420 %{
12421   match(Set dst (SubI src1 (AndI src2 mask)));
12422   ins_cost(INSN_COST);
12423   format %{ "subw  $dst, $src1, $src2, uxth" %}
12424 
12425    ins_encode %{
12426      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12427             as_Register($src2$$reg), ext::uxth);
12428    %}
12429   ins_pipe(ialu_reg_reg);
12430 %}
12431 
12432 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12433 %{
12434   match(Set dst (SubL src1 (AndL src2 mask)));
12435   ins_cost(INSN_COST);
12436   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12437 
12438    ins_encode %{
12439      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12440             as_Register($src2$$reg), ext::uxtb);
12441    %}
12442   ins_pipe(ialu_reg_reg);
12443 %}
12444 
12445 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12446 %{
12447   match(Set dst (SubL src1 (AndL src2 mask)));
12448   ins_cost(INSN_COST);
12449   format %{ "sub  $dst, $src1, $src2, uxth" %}
12450 
12451    ins_encode %{
12452      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12453             as_Register($src2$$reg), ext::uxth);
12454    %}
12455   ins_pipe(ialu_reg_reg);
12456 %}
12457 
12458 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12459 %{
12460   match(Set dst (SubL src1 (AndL src2 mask)));
12461   ins_cost(INSN_COST);
12462   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12463 
12464    ins_encode %{
12465      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12466             as_Register($src2$$reg), ext::uxtw);
12467    %}
12468   ins_pipe(ialu_reg_reg);
12469 %}
12470 
12471 
12472 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12473 %{
12474   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12475   ins_cost(1.9 * INSN_COST);
12476   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12477 
12478    ins_encode %{
12479      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12480             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12481    %}
12482   ins_pipe(ialu_reg_reg_shift);
12483 %}
12484 
12485 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12486 %{
12487   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12488   ins_cost(1.9 * INSN_COST);
12489   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12490 
12491    ins_encode %{
12492      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12493             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12494    %}
12495   ins_pipe(ialu_reg_reg_shift);
12496 %}
12497 
12498 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12499 %{
12500   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12501   ins_cost(1.9 * INSN_COST);
12502   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12503 
12504    ins_encode %{
12505      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12506             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12507    %}
12508   ins_pipe(ialu_reg_reg_shift);
12509 %}
12510 
12511 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12512 %{
12513   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12514   ins_cost(1.9 * INSN_COST);
12515   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12516 
12517    ins_encode %{
12518      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12519             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12520    %}
12521   ins_pipe(ialu_reg_reg_shift);
12522 %}
12523 
12524 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12525 %{
12526   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12527   ins_cost(1.9 * INSN_COST);
12528   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12529 
12530    ins_encode %{
12531      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12532             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12533    %}
12534   ins_pipe(ialu_reg_reg_shift);
12535 %}
12536 
12537 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12538 %{
12539   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12540   ins_cost(1.9 * INSN_COST);
12541   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12542 
12543    ins_encode %{
12544      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12545             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12546    %}
12547   ins_pipe(ialu_reg_reg_shift);
12548 %}
12549 
12550 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12551 %{
12552   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12553   ins_cost(1.9 * INSN_COST);
12554   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12555 
12556    ins_encode %{
12557      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12558             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12559    %}
12560   ins_pipe(ialu_reg_reg_shift);
12561 %}
12562 
12563 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12564 %{
12565   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12566   ins_cost(1.9 * INSN_COST);
12567   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12568 
12569    ins_encode %{
12570      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12571             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12572    %}
12573   ins_pipe(ialu_reg_reg_shift);
12574 %}
12575 
12576 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12577 %{
12578   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12579   ins_cost(1.9 * INSN_COST);
12580   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12581 
12582    ins_encode %{
12583      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12584             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12585    %}
12586   ins_pipe(ialu_reg_reg_shift);
12587 %}
12588 
12589 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12590 %{
12591   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12592   ins_cost(1.9 * INSN_COST);
12593   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12594 
12595    ins_encode %{
12596      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12597             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12598    %}
12599   ins_pipe(ialu_reg_reg_shift);
12600 %}
12601 
12602 
12603 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12604 %{
12605   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12606   ins_cost(1.9 * INSN_COST);
12607   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12608 
12609    ins_encode %{
12610      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12611             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12612    %}
12613   ins_pipe(ialu_reg_reg_shift);
12614 %};
12615 
12616 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12617 %{
12618   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12619   ins_cost(1.9 * INSN_COST);
12620   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12621 
12622    ins_encode %{
12623      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12624             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12625    %}
12626   ins_pipe(ialu_reg_reg_shift);
12627 %};
12628 
12629 
12630 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12631 %{
12632   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12633   ins_cost(1.9 * INSN_COST);
12634   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12635 
12636    ins_encode %{
12637      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12638             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12639    %}
12640   ins_pipe(ialu_reg_reg_shift);
12641 %}
12642 
12643 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12644 %{
12645   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12646   ins_cost(1.9 * INSN_COST);
12647   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12648 
12649    ins_encode %{
12650      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12651             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12652    %}
12653   ins_pipe(ialu_reg_reg_shift);
12654 %}
12655 
12656 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12657 %{
12658   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12659   ins_cost(1.9 * INSN_COST);
12660   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12661 
12662    ins_encode %{
12663      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12664             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12665    %}
12666   ins_pipe(ialu_reg_reg_shift);
12667 %}
12668 
12669 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12670 %{
12671   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12672   ins_cost(1.9 * INSN_COST);
12673   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12674 
12675    ins_encode %{
12676      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12677             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12678    %}
12679   ins_pipe(ialu_reg_reg_shift);
12680 %}
12681 
12682 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12683 %{
12684   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12685   ins_cost(1.9 * INSN_COST);
12686   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12687 
12688    ins_encode %{
12689      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12690             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12691    %}
12692   ins_pipe(ialu_reg_reg_shift);
12693 %}
12694 
12695 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12696 %{
12697   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12698   ins_cost(1.9 * INSN_COST);
12699   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12700 
12701    ins_encode %{
12702      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12703             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12704    %}
12705   ins_pipe(ialu_reg_reg_shift);
12706 %}
12707 
12708 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12709 %{
12710   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12711   ins_cost(1.9 * INSN_COST);
12712   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12713 
12714    ins_encode %{
12715      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12716             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12717    %}
12718   ins_pipe(ialu_reg_reg_shift);
12719 %}
12720 
12721 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12722 %{
12723   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12724   ins_cost(1.9 * INSN_COST);
12725   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12726 
12727    ins_encode %{
12728      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12729             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12730    %}
12731   ins_pipe(ialu_reg_reg_shift);
12732 %}
12733 
12734 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12735 %{
12736   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12737   ins_cost(1.9 * INSN_COST);
12738   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12739 
12740    ins_encode %{
12741      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12742             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12743    %}
12744   ins_pipe(ialu_reg_reg_shift);
12745 %}
12746 
12747 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12748 %{
12749   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12750   ins_cost(1.9 * INSN_COST);
12751   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12752 
12753    ins_encode %{
12754      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12755             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12756    %}
12757   ins_pipe(ialu_reg_reg_shift);
12758 %}
12759 // END This section of the file is automatically generated. Do not edit --------------
12760 
12761 // ============================================================================
12762 // Floating Point Arithmetic Instructions
12763 
12764 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12765   match(Set dst (AddF src1 src2));
12766 
12767   ins_cost(INSN_COST * 5);
12768   format %{ "fadds   $dst, $src1, $src2" %}
12769 
12770   ins_encode %{
12771     __ fadds(as_FloatRegister($dst$$reg),
12772              as_FloatRegister($src1$$reg),
12773              as_FloatRegister($src2$$reg));
12774   %}
12775 
12776   ins_pipe(fp_dop_reg_reg_s);
12777 %}
12778 
12779 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12780   match(Set dst (AddD src1 src2));
12781 
12782   ins_cost(INSN_COST * 5);
12783   format %{ "faddd   $dst, $src1, $src2" %}
12784 
12785   ins_encode %{
12786     __ faddd(as_FloatRegister($dst$$reg),
12787              as_FloatRegister($src1$$reg),
12788              as_FloatRegister($src2$$reg));
12789   %}
12790 
12791   ins_pipe(fp_dop_reg_reg_d);
12792 %}
12793 
12794 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12795   match(Set dst (SubF src1 src2));
12796 
12797   ins_cost(INSN_COST * 5);
12798   format %{ "fsubs   $dst, $src1, $src2" %}
12799 
12800   ins_encode %{
12801     __ fsubs(as_FloatRegister($dst$$reg),
12802              as_FloatRegister($src1$$reg),
12803              as_FloatRegister($src2$$reg));
12804   %}
12805 
12806   ins_pipe(fp_dop_reg_reg_s);
12807 %}
12808 
12809 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12810   match(Set dst (SubD src1 src2));
12811 
12812   ins_cost(INSN_COST * 5);
12813   format %{ "fsubd   $dst, $src1, $src2" %}
12814 
12815   ins_encode %{
12816     __ fsubd(as_FloatRegister($dst$$reg),
12817              as_FloatRegister($src1$$reg),
12818              as_FloatRegister($src2$$reg));
12819   %}
12820 
12821   ins_pipe(fp_dop_reg_reg_d);
12822 %}
12823 
12824 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12825   match(Set dst (MulF src1 src2));
12826 
12827   ins_cost(INSN_COST * 6);
12828   format %{ "fmuls   $dst, $src1, $src2" %}
12829 
12830   ins_encode %{
12831     __ fmuls(as_FloatRegister($dst$$reg),
12832              as_FloatRegister($src1$$reg),
12833              as_FloatRegister($src2$$reg));
12834   %}
12835 
12836   ins_pipe(fp_dop_reg_reg_s);
12837 %}
12838 
12839 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12840   match(Set dst (MulD src1 src2));
12841 
12842   ins_cost(INSN_COST * 6);
12843   format %{ "fmuld   $dst, $src1, $src2" %}
12844 
12845   ins_encode %{
12846     __ fmuld(as_FloatRegister($dst$$reg),
12847              as_FloatRegister($src1$$reg),
12848              as_FloatRegister($src2$$reg));
12849   %}
12850 
12851   ins_pipe(fp_dop_reg_reg_d);
12852 %}
12853 
12854 // src1 * src2 + src3
12855 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12856   predicate(UseFMA);
12857   match(Set dst (FmaF src3 (Binary src1 src2)));
12858 
12859   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12860 
12861   ins_encode %{
12862     __ fmadds(as_FloatRegister($dst$$reg),
12863              as_FloatRegister($src1$$reg),
12864              as_FloatRegister($src2$$reg),
12865              as_FloatRegister($src3$$reg));
12866   %}
12867 
12868   ins_pipe(pipe_class_default);
12869 %}
12870 
12871 // src1 * src2 + src3
12872 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12873   predicate(UseFMA);
12874   match(Set dst (FmaD src3 (Binary src1 src2)));
12875 
12876   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12877 
12878   ins_encode %{
12879     __ fmaddd(as_FloatRegister($dst$$reg),
12880              as_FloatRegister($src1$$reg),
12881              as_FloatRegister($src2$$reg),
12882              as_FloatRegister($src3$$reg));
12883   %}
12884 
12885   ins_pipe(pipe_class_default);
12886 %}
12887 
12888 // -src1 * src2 + src3
12889 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12890   predicate(UseFMA);
12891   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12892   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12893 
12894   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12895 
12896   ins_encode %{
12897     __ fmsubs(as_FloatRegister($dst$$reg),
12898               as_FloatRegister($src1$$reg),
12899               as_FloatRegister($src2$$reg),
12900               as_FloatRegister($src3$$reg));
12901   %}
12902 
12903   ins_pipe(pipe_class_default);
12904 %}
12905 
12906 // -src1 * src2 + src3
12907 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12908   predicate(UseFMA);
12909   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12910   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12911 
12912   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12913 
12914   ins_encode %{
12915     __ fmsubd(as_FloatRegister($dst$$reg),
12916               as_FloatRegister($src1$$reg),
12917               as_FloatRegister($src2$$reg),
12918               as_FloatRegister($src3$$reg));
12919   %}
12920 
12921   ins_pipe(pipe_class_default);
12922 %}
12923 
12924 // -src1 * src2 - src3
12925 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12926   predicate(UseFMA);
12927   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12928   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12929 
12930   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12931 
12932   ins_encode %{
12933     __ fnmadds(as_FloatRegister($dst$$reg),
12934                as_FloatRegister($src1$$reg),
12935                as_FloatRegister($src2$$reg),
12936                as_FloatRegister($src3$$reg));
12937   %}
12938 
12939   ins_pipe(pipe_class_default);
12940 %}
12941 
12942 // -src1 * src2 - src3
12943 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12944   predicate(UseFMA);
12945   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12946   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12947 
12948   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12949 
12950   ins_encode %{
12951     __ fnmaddd(as_FloatRegister($dst$$reg),
12952                as_FloatRegister($src1$$reg),
12953                as_FloatRegister($src2$$reg),
12954                as_FloatRegister($src3$$reg));
12955   %}
12956 
12957   ins_pipe(pipe_class_default);
12958 %}
12959 
12960 // src1 * src2 - src3
12961 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12962   predicate(UseFMA);
12963   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12964 
12965   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12966 
12967   ins_encode %{
12968     __ fnmsubs(as_FloatRegister($dst$$reg),
12969                as_FloatRegister($src1$$reg),
12970                as_FloatRegister($src2$$reg),
12971                as_FloatRegister($src3$$reg));
12972   %}
12973 
12974   ins_pipe(pipe_class_default);
12975 %}
12976 
12977 // src1 * src2 - src3
12978 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12979   predicate(UseFMA);
12980   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12981 
12982   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12983 
12984   ins_encode %{
12985   // n.b. insn name should be fnmsubd
12986     __ fnmsub(as_FloatRegister($dst$$reg),
12987               as_FloatRegister($src1$$reg),
12988               as_FloatRegister($src2$$reg),
12989               as_FloatRegister($src3$$reg));
12990   %}
12991 
12992   ins_pipe(pipe_class_default);
12993 %}
12994 
12995 
12996 // Math.max(FF)F
12997 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12998   match(Set dst (MaxF src1 src2));
12999 
13000   format %{ "fmaxs   $dst, $src1, $src2" %}
13001   ins_encode %{
13002     __ fmaxs(as_FloatRegister($dst$$reg),
13003              as_FloatRegister($src1$$reg),
13004              as_FloatRegister($src2$$reg));
13005   %}
13006 
13007   ins_pipe(fp_dop_reg_reg_s);
13008 %}
13009 
13010 // Math.min(FF)F
13011 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13012   match(Set dst (MinF src1 src2));
13013 
13014   format %{ "fmins   $dst, $src1, $src2" %}
13015   ins_encode %{
13016     __ fmins(as_FloatRegister($dst$$reg),
13017              as_FloatRegister($src1$$reg),
13018              as_FloatRegister($src2$$reg));
13019   %}
13020 
13021   ins_pipe(fp_dop_reg_reg_s);
13022 %}
13023 
13024 // Math.max(DD)D
13025 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13026   match(Set dst (MaxD src1 src2));
13027 
13028   format %{ "fmaxd   $dst, $src1, $src2" %}
13029   ins_encode %{
13030     __ fmaxd(as_FloatRegister($dst$$reg),
13031              as_FloatRegister($src1$$reg),
13032              as_FloatRegister($src2$$reg));
13033   %}
13034 
13035   ins_pipe(fp_dop_reg_reg_d);
13036 %}
13037 
13038 // Math.min(DD)D
13039 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13040   match(Set dst (MinD src1 src2));
13041 
13042   format %{ "fmind   $dst, $src1, $src2" %}
13043   ins_encode %{
13044     __ fmind(as_FloatRegister($dst$$reg),
13045              as_FloatRegister($src1$$reg),
13046              as_FloatRegister($src2$$reg));
13047   %}
13048 
13049   ins_pipe(fp_dop_reg_reg_d);
13050 %}
13051 
13052 
13053 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13054   match(Set dst (DivF src1  src2));
13055 
13056   ins_cost(INSN_COST * 18);
13057   format %{ "fdivs   $dst, $src1, $src2" %}
13058 
13059   ins_encode %{
13060     __ fdivs(as_FloatRegister($dst$$reg),
13061              as_FloatRegister($src1$$reg),
13062              as_FloatRegister($src2$$reg));
13063   %}
13064 
13065   ins_pipe(fp_div_s);
13066 %}
13067 
13068 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13069   match(Set dst (DivD src1  src2));
13070 
13071   ins_cost(INSN_COST * 32);
13072   format %{ "fdivd   $dst, $src1, $src2" %}
13073 
13074   ins_encode %{
13075     __ fdivd(as_FloatRegister($dst$$reg),
13076              as_FloatRegister($src1$$reg),
13077              as_FloatRegister($src2$$reg));
13078   %}
13079 
13080   ins_pipe(fp_div_d);
13081 %}
13082 
13083 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13084   match(Set dst (NegF src));
13085 
13086   ins_cost(INSN_COST * 3);
13087   format %{ "fneg   $dst, $src" %}
13088 
13089   ins_encode %{
13090     __ fnegs(as_FloatRegister($dst$$reg),
13091              as_FloatRegister($src$$reg));
13092   %}
13093 
13094   ins_pipe(fp_uop_s);
13095 %}
13096 
13097 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13098   match(Set dst (NegD src));
13099 
13100   ins_cost(INSN_COST * 3);
13101   format %{ "fnegd   $dst, $src" %}
13102 
13103   ins_encode %{
13104     __ fnegd(as_FloatRegister($dst$$reg),
13105              as_FloatRegister($src$$reg));
13106   %}
13107 
13108   ins_pipe(fp_uop_d);
13109 %}
13110 
13111 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13112 %{
13113   match(Set dst (AbsI src));
13114 
13115   effect(KILL cr);
13116   ins_cost(INSN_COST * 2);
13117   format %{ "cmpw  $src, zr\n\t"
13118             "cnegw $dst, $src, Assembler::LT\t# int abs"
13119   %}
13120 
13121   ins_encode %{
13122     __ cmpw(as_Register($src$$reg), zr);
13123     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13124   %}
13125   ins_pipe(pipe_class_default);
13126 %}
13127 
13128 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13129 %{
13130   match(Set dst (AbsL src));
13131 
13132   effect(KILL cr);
13133   ins_cost(INSN_COST * 2);
13134   format %{ "cmp  $src, zr\n\t"
13135             "cneg $dst, $src, Assembler::LT\t# long abs"
13136   %}
13137 
13138   ins_encode %{
13139     __ cmp(as_Register($src$$reg), zr);
13140     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13141   %}
13142   ins_pipe(pipe_class_default);
13143 %}
13144 
13145 instruct absF_reg(vRegF dst, vRegF src) %{
13146   match(Set dst (AbsF src));
13147 
13148   ins_cost(INSN_COST * 3);
13149   format %{ "fabss   $dst, $src" %}
13150   ins_encode %{
13151     __ fabss(as_FloatRegister($dst$$reg),
13152              as_FloatRegister($src$$reg));
13153   %}
13154 
13155   ins_pipe(fp_uop_s);
13156 %}
13157 
13158 instruct absD_reg(vRegD dst, vRegD src) %{
13159   match(Set dst (AbsD src));
13160 
13161   ins_cost(INSN_COST * 3);
13162   format %{ "fabsd   $dst, $src" %}
13163   ins_encode %{
13164     __ fabsd(as_FloatRegister($dst$$reg),
13165              as_FloatRegister($src$$reg));
13166   %}
13167 
13168   ins_pipe(fp_uop_d);
13169 %}
13170 
13171 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13172   match(Set dst (SqrtD src));
13173 
13174   ins_cost(INSN_COST * 50);
13175   format %{ "fsqrtd  $dst, $src" %}
13176   ins_encode %{
13177     __ fsqrtd(as_FloatRegister($dst$$reg),
13178              as_FloatRegister($src$$reg));
13179   %}
13180 
13181   ins_pipe(fp_div_s);
13182 %}
13183 
13184 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13185   match(Set dst (SqrtF src));
13186 
13187   ins_cost(INSN_COST * 50);
13188   format %{ "fsqrts  $dst, $src" %}
13189   ins_encode %{
13190     __ fsqrts(as_FloatRegister($dst$$reg),
13191              as_FloatRegister($src$$reg));
13192   %}
13193 
13194   ins_pipe(fp_div_d);
13195 %}
13196 
13197 // Math.rint, floor, ceil
13198 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13199   match(Set dst (RoundDoubleMode src rmode));
13200   format %{ "frint  $dst, $src, $rmode" %}
13201   ins_encode %{
13202     switch ($rmode$$constant) {
13203       case RoundDoubleModeNode::rmode_rint:
13204         __ frintnd(as_FloatRegister($dst$$reg),
13205                    as_FloatRegister($src$$reg));
13206         break;
13207       case RoundDoubleModeNode::rmode_floor:
13208         __ frintmd(as_FloatRegister($dst$$reg),
13209                    as_FloatRegister($src$$reg));
13210         break;
13211       case RoundDoubleModeNode::rmode_ceil:
13212         __ frintpd(as_FloatRegister($dst$$reg),
13213                    as_FloatRegister($src$$reg));
13214         break;
13215     }
13216   %}
13217   ins_pipe(fp_uop_d);
13218 %}
13219 
13220 // ============================================================================
13221 // Logical Instructions
13222 
13223 // Integer Logical Instructions
13224 
13225 // And Instructions
13226 
13227 
13228 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13229   match(Set dst (AndI src1 src2));
13230 
13231   format %{ "andw  $dst, $src1, $src2\t# int" %}
13232 
13233   ins_cost(INSN_COST);
13234   ins_encode %{
13235     __ andw(as_Register($dst$$reg),
13236             as_Register($src1$$reg),
13237             as_Register($src2$$reg));
13238   %}
13239 
13240   ins_pipe(ialu_reg_reg);
13241 %}
13242 
13243 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13244   match(Set dst (AndI src1 src2));
13245 
13246   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13247 
13248   ins_cost(INSN_COST);
13249   ins_encode %{
13250     __ andw(as_Register($dst$$reg),
13251             as_Register($src1$$reg),
13252             (uint64_t)($src2$$constant));
13253   %}
13254 
13255   ins_pipe(ialu_reg_imm);
13256 %}
13257 
13258 // Or Instructions
13259 
13260 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13261   match(Set dst (OrI src1 src2));
13262 
13263   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13264 
13265   ins_cost(INSN_COST);
13266   ins_encode %{
13267     __ orrw(as_Register($dst$$reg),
13268             as_Register($src1$$reg),
13269             as_Register($src2$$reg));
13270   %}
13271 
13272   ins_pipe(ialu_reg_reg);
13273 %}
13274 
13275 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13276   match(Set dst (OrI src1 src2));
13277 
13278   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13279 
13280   ins_cost(INSN_COST);
13281   ins_encode %{
13282     __ orrw(as_Register($dst$$reg),
13283             as_Register($src1$$reg),
13284             (uint64_t)($src2$$constant));
13285   %}
13286 
13287   ins_pipe(ialu_reg_imm);
13288 %}
13289 
13290 // Xor Instructions
13291 
13292 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13293   match(Set dst (XorI src1 src2));
13294 
13295   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13296 
13297   ins_cost(INSN_COST);
13298   ins_encode %{
13299     __ eorw(as_Register($dst$$reg),
13300             as_Register($src1$$reg),
13301             as_Register($src2$$reg));
13302   %}
13303 
13304   ins_pipe(ialu_reg_reg);
13305 %}
13306 
13307 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13308   match(Set dst (XorI src1 src2));
13309 
13310   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13311 
13312   ins_cost(INSN_COST);
13313   ins_encode %{
13314     __ eorw(as_Register($dst$$reg),
13315             as_Register($src1$$reg),
13316             (uint64_t)($src2$$constant));
13317   %}
13318 
13319   ins_pipe(ialu_reg_imm);
13320 %}
13321 
13322 // Long Logical Instructions
13323 // TODO
13324 
13325 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13326   match(Set dst (AndL src1 src2));
13327 
13328   format %{ "and  $dst, $src1, $src2\t# int" %}
13329 
13330   ins_cost(INSN_COST);
13331   ins_encode %{
13332     __ andr(as_Register($dst$$reg),
13333             as_Register($src1$$reg),
13334             as_Register($src2$$reg));
13335   %}
13336 
13337   ins_pipe(ialu_reg_reg);
13338 %}
13339 
13340 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13341   match(Set dst (AndL src1 src2));
13342 
13343   format %{ "and  $dst, $src1, $src2\t# int" %}
13344 
13345   ins_cost(INSN_COST);
13346   ins_encode %{
13347     __ andr(as_Register($dst$$reg),
13348             as_Register($src1$$reg),
13349             (uint64_t)($src2$$constant));
13350   %}
13351 
13352   ins_pipe(ialu_reg_imm);
13353 %}
13354 
13355 // Or Instructions
13356 
13357 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13358   match(Set dst (OrL src1 src2));
13359 
13360   format %{ "orr  $dst, $src1, $src2\t# int" %}
13361 
13362   ins_cost(INSN_COST);
13363   ins_encode %{
13364     __ orr(as_Register($dst$$reg),
13365            as_Register($src1$$reg),
13366            as_Register($src2$$reg));
13367   %}
13368 
13369   ins_pipe(ialu_reg_reg);
13370 %}
13371 
13372 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13373   match(Set dst (OrL src1 src2));
13374 
13375   format %{ "orr  $dst, $src1, $src2\t# int" %}
13376 
13377   ins_cost(INSN_COST);
13378   ins_encode %{
13379     __ orr(as_Register($dst$$reg),
13380            as_Register($src1$$reg),
13381            (uint64_t)($src2$$constant));
13382   %}
13383 
13384   ins_pipe(ialu_reg_imm);
13385 %}
13386 
13387 // Xor Instructions
13388 
13389 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13390   match(Set dst (XorL src1 src2));
13391 
13392   format %{ "eor  $dst, $src1, $src2\t# int" %}
13393 
13394   ins_cost(INSN_COST);
13395   ins_encode %{
13396     __ eor(as_Register($dst$$reg),
13397            as_Register($src1$$reg),
13398            as_Register($src2$$reg));
13399   %}
13400 
13401   ins_pipe(ialu_reg_reg);
13402 %}
13403 
13404 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13405   match(Set dst (XorL src1 src2));
13406 
13407   ins_cost(INSN_COST);
13408   format %{ "eor  $dst, $src1, $src2\t# int" %}
13409 
13410   ins_encode %{
13411     __ eor(as_Register($dst$$reg),
13412            as_Register($src1$$reg),
13413            (uint64_t)($src2$$constant));
13414   %}
13415 
13416   ins_pipe(ialu_reg_imm);
13417 %}
13418 
13419 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13420 %{
13421   match(Set dst (ConvI2L src));
13422 
13423   ins_cost(INSN_COST);
13424   format %{ "sxtw  $dst, $src\t# i2l" %}
13425   ins_encode %{
13426     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13427   %}
13428   ins_pipe(ialu_reg_shift);
13429 %}
13430 
13431 // this pattern occurs in bigmath arithmetic
13432 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13433 %{
13434   match(Set dst (AndL (ConvI2L src) mask));
13435 
13436   ins_cost(INSN_COST);
13437   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13438   ins_encode %{
13439     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13440   %}
13441 
13442   ins_pipe(ialu_reg_shift);
13443 %}
13444 
13445 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13446   match(Set dst (ConvL2I src));
13447 
13448   ins_cost(INSN_COST);
13449   format %{ "movw  $dst, $src \t// l2i" %}
13450 
13451   ins_encode %{
13452     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13453   %}
13454 
13455   ins_pipe(ialu_reg);
13456 %}
13457 
13458 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13459 %{
13460   match(Set dst (Conv2B src));
13461   effect(KILL cr);
13462 
13463   format %{
13464     "cmpw $src, zr\n\t"
13465     "cset $dst, ne"
13466   %}
13467 
13468   ins_encode %{
13469     __ cmpw(as_Register($src$$reg), zr);
13470     __ cset(as_Register($dst$$reg), Assembler::NE);
13471   %}
13472 
13473   ins_pipe(ialu_reg);
13474 %}
13475 
13476 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13477 %{
13478   match(Set dst (Conv2B src));
13479   effect(KILL cr);
13480 
13481   format %{
13482     "cmp  $src, zr\n\t"
13483     "cset $dst, ne"
13484   %}
13485 
13486   ins_encode %{
13487     __ cmp(as_Register($src$$reg), zr);
13488     __ cset(as_Register($dst$$reg), Assembler::NE);
13489   %}
13490 
13491   ins_pipe(ialu_reg);
13492 %}
13493 
13494 instruct convD2F_reg(vRegF dst, vRegD src) %{
13495   match(Set dst (ConvD2F src));
13496 
13497   ins_cost(INSN_COST * 5);
13498   format %{ "fcvtd  $dst, $src \t// d2f" %}
13499 
13500   ins_encode %{
13501     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13502   %}
13503 
13504   ins_pipe(fp_d2f);
13505 %}
13506 
13507 instruct convF2D_reg(vRegD dst, vRegF src) %{
13508   match(Set dst (ConvF2D src));
13509 
13510   ins_cost(INSN_COST * 5);
13511   format %{ "fcvts  $dst, $src \t// f2d" %}
13512 
13513   ins_encode %{
13514     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13515   %}
13516 
13517   ins_pipe(fp_f2d);
13518 %}
13519 
13520 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13521   match(Set dst (ConvF2I src));
13522 
13523   ins_cost(INSN_COST * 5);
13524   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13525 
13526   ins_encode %{
13527     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13528   %}
13529 
13530   ins_pipe(fp_f2i);
13531 %}
13532 
13533 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13534   match(Set dst (ConvF2L src));
13535 
13536   ins_cost(INSN_COST * 5);
13537   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13538 
13539   ins_encode %{
13540     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13541   %}
13542 
13543   ins_pipe(fp_f2l);
13544 %}
13545 
13546 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13547   match(Set dst (ConvI2F src));
13548 
13549   ins_cost(INSN_COST * 5);
13550   format %{ "scvtfws  $dst, $src \t// i2f" %}
13551 
13552   ins_encode %{
13553     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13554   %}
13555 
13556   ins_pipe(fp_i2f);
13557 %}
13558 
13559 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13560   match(Set dst (ConvL2F src));
13561 
13562   ins_cost(INSN_COST * 5);
13563   format %{ "scvtfs  $dst, $src \t// l2f" %}
13564 
13565   ins_encode %{
13566     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13567   %}
13568 
13569   ins_pipe(fp_l2f);
13570 %}
13571 
13572 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13573   match(Set dst (ConvD2I src));
13574 
13575   ins_cost(INSN_COST * 5);
13576   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13577 
13578   ins_encode %{
13579     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13580   %}
13581 
13582   ins_pipe(fp_d2i);
13583 %}
13584 
13585 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13586   match(Set dst (ConvD2L src));
13587 
13588   ins_cost(INSN_COST * 5);
13589   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13590 
13591   ins_encode %{
13592     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13593   %}
13594 
13595   ins_pipe(fp_d2l);
13596 %}
13597 
13598 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13599   match(Set dst (ConvI2D src));
13600 
13601   ins_cost(INSN_COST * 5);
13602   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13603 
13604   ins_encode %{
13605     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13606   %}
13607 
13608   ins_pipe(fp_i2d);
13609 %}
13610 
13611 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13612   match(Set dst (ConvL2D src));
13613 
13614   ins_cost(INSN_COST * 5);
13615   format %{ "scvtfd  $dst, $src \t// l2d" %}
13616 
13617   ins_encode %{
13618     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13619   %}
13620 
13621   ins_pipe(fp_l2d);
13622 %}
13623 
13624 // stack <-> reg and reg <-> reg shuffles with no conversion
13625 
13626 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13627 
13628   match(Set dst (MoveF2I src));
13629 
13630   effect(DEF dst, USE src);
13631 
13632   ins_cost(4 * INSN_COST);
13633 
13634   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13635 
13636   ins_encode %{
13637     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13638   %}
13639 
13640   ins_pipe(iload_reg_reg);
13641 
13642 %}
13643 
13644 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13645 
13646   match(Set dst (MoveI2F src));
13647 
13648   effect(DEF dst, USE src);
13649 
13650   ins_cost(4 * INSN_COST);
13651 
13652   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13653 
13654   ins_encode %{
13655     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13656   %}
13657 
13658   ins_pipe(pipe_class_memory);
13659 
13660 %}
13661 
13662 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13663 
13664   match(Set dst (MoveD2L src));
13665 
13666   effect(DEF dst, USE src);
13667 
13668   ins_cost(4 * INSN_COST);
13669 
13670   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13671 
13672   ins_encode %{
13673     __ ldr($dst$$Register, Address(sp, $src$$disp));
13674   %}
13675 
13676   ins_pipe(iload_reg_reg);
13677 
13678 %}
13679 
13680 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13681 
13682   match(Set dst (MoveL2D src));
13683 
13684   effect(DEF dst, USE src);
13685 
13686   ins_cost(4 * INSN_COST);
13687 
13688   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13689 
13690   ins_encode %{
13691     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13692   %}
13693 
13694   ins_pipe(pipe_class_memory);
13695 
13696 %}
13697 
13698 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13699 
13700   match(Set dst (MoveF2I src));
13701 
13702   effect(DEF dst, USE src);
13703 
13704   ins_cost(INSN_COST);
13705 
13706   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13707 
13708   ins_encode %{
13709     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13710   %}
13711 
13712   ins_pipe(pipe_class_memory);
13713 
13714 %}
13715 
13716 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13717 
13718   match(Set dst (MoveI2F src));
13719 
13720   effect(DEF dst, USE src);
13721 
13722   ins_cost(INSN_COST);
13723 
13724   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13725 
13726   ins_encode %{
13727     __ strw($src$$Register, Address(sp, $dst$$disp));
13728   %}
13729 
13730   ins_pipe(istore_reg_reg);
13731 
13732 %}
13733 
13734 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13735 
13736   match(Set dst (MoveD2L src));
13737 
13738   effect(DEF dst, USE src);
13739 
13740   ins_cost(INSN_COST);
13741 
13742   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13743 
13744   ins_encode %{
13745     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13746   %}
13747 
13748   ins_pipe(pipe_class_memory);
13749 
13750 %}
13751 
13752 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13753 
13754   match(Set dst (MoveL2D src));
13755 
13756   effect(DEF dst, USE src);
13757 
13758   ins_cost(INSN_COST);
13759 
13760   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13761 
13762   ins_encode %{
13763     __ str($src$$Register, Address(sp, $dst$$disp));
13764   %}
13765 
13766   ins_pipe(istore_reg_reg);
13767 
13768 %}
13769 
13770 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13771 
13772   match(Set dst (MoveF2I src));
13773 
13774   effect(DEF dst, USE src);
13775 
13776   ins_cost(INSN_COST);
13777 
13778   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13779 
13780   ins_encode %{
13781     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13782   %}
13783 
13784   ins_pipe(fp_f2i);
13785 
13786 %}
13787 
13788 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13789 
13790   match(Set dst (MoveI2F src));
13791 
13792   effect(DEF dst, USE src);
13793 
13794   ins_cost(INSN_COST);
13795 
13796   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13797 
13798   ins_encode %{
13799     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13800   %}
13801 
13802   ins_pipe(fp_i2f);
13803 
13804 %}
13805 
13806 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13807 
13808   match(Set dst (MoveD2L src));
13809 
13810   effect(DEF dst, USE src);
13811 
13812   ins_cost(INSN_COST);
13813 
13814   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13815 
13816   ins_encode %{
13817     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13818   %}
13819 
13820   ins_pipe(fp_d2l);
13821 
13822 %}
13823 
13824 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13825 
13826   match(Set dst (MoveL2D src));
13827 
13828   effect(DEF dst, USE src);
13829 
13830   ins_cost(INSN_COST);
13831 
13832   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13833 
13834   ins_encode %{
13835     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13836   %}
13837 
13838   ins_pipe(fp_l2d);
13839 
13840 %}
13841 
13842 // ============================================================================
13843 // clearing of an array
13844 
13845 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13846 %{
13847   match(Set dummy (ClearArray cnt base));
13848   effect(USE_KILL cnt, USE_KILL base);
13849 
13850   ins_cost(4 * INSN_COST);
13851   format %{ "ClearArray $cnt, $base" %}
13852 
13853   ins_encode %{
13854     __ zero_words($base$$Register, $cnt$$Register);
13855   %}
13856 
13857   ins_pipe(pipe_class_memory);
13858 %}
13859 
13860 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13861 %{
13862   predicate((uint64_t)n->in(2)->get_long()
13863             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13864   match(Set dummy (ClearArray cnt base));
13865   effect(USE_KILL base);
13866 
13867   ins_cost(4 * INSN_COST);
13868   format %{ "ClearArray $cnt, $base" %}
13869 
13870   ins_encode %{
13871     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
13872   %}
13873 
13874   ins_pipe(pipe_class_memory);
13875 %}
13876 
13877 // ============================================================================
13878 // Overflow Math Instructions
13879 
13880 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13881 %{
13882   match(Set cr (OverflowAddI op1 op2));
13883 
13884   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13885   ins_cost(INSN_COST);
13886   ins_encode %{
13887     __ cmnw($op1$$Register, $op2$$Register);
13888   %}
13889 
13890   ins_pipe(icmp_reg_reg);
13891 %}
13892 
13893 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13894 %{
13895   match(Set cr (OverflowAddI op1 op2));
13896 
13897   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13898   ins_cost(INSN_COST);
13899   ins_encode %{
13900     __ cmnw($op1$$Register, $op2$$constant);
13901   %}
13902 
13903   ins_pipe(icmp_reg_imm);
13904 %}
13905 
13906 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13907 %{
13908   match(Set cr (OverflowAddL op1 op2));
13909 
13910   format %{ "cmn   $op1, $op2\t# overflow check int64_t" %}
13911   ins_cost(INSN_COST);
13912   ins_encode %{
13913     __ cmn($op1$$Register, $op2$$Register);
13914   %}
13915 
13916   ins_pipe(icmp_reg_reg);
13917 %}
13918 
13919 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13920 %{
13921   match(Set cr (OverflowAddL op1 op2));
13922 
13923   format %{ "cmn   $op1, $op2\t# overflow check int64_t" %}
13924   ins_cost(INSN_COST);
13925   ins_encode %{
13926     __ cmn($op1$$Register, $op2$$constant);
13927   %}
13928 
13929   ins_pipe(icmp_reg_imm);
13930 %}
13931 
13932 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13933 %{
13934   match(Set cr (OverflowSubI op1 op2));
13935 
13936   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13937   ins_cost(INSN_COST);
13938   ins_encode %{
13939     __ cmpw($op1$$Register, $op2$$Register);
13940   %}
13941 
13942   ins_pipe(icmp_reg_reg);
13943 %}
13944 
13945 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13946 %{
13947   match(Set cr (OverflowSubI op1 op2));
13948 
13949   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13950   ins_cost(INSN_COST);
13951   ins_encode %{
13952     __ cmpw($op1$$Register, $op2$$constant);
13953   %}
13954 
13955   ins_pipe(icmp_reg_imm);
13956 %}
13957 
13958 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13959 %{
13960   match(Set cr (OverflowSubL op1 op2));
13961 
13962   format %{ "cmp   $op1, $op2\t# overflow check int64_t" %}
13963   ins_cost(INSN_COST);
13964   ins_encode %{
13965     __ cmp($op1$$Register, $op2$$Register);
13966   %}
13967 
13968   ins_pipe(icmp_reg_reg);
13969 %}
13970 
13971 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13972 %{
13973   match(Set cr (OverflowSubL op1 op2));
13974 
13975   format %{ "cmp   $op1, $op2\t# overflow check int64_t" %}
13976   ins_cost(INSN_COST);
13977   ins_encode %{
13978     __ subs(zr, $op1$$Register, $op2$$constant);
13979   %}
13980 
13981   ins_pipe(icmp_reg_imm);
13982 %}
13983 
13984 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13985 %{
13986   match(Set cr (OverflowSubI zero op1));
13987 
13988   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13989   ins_cost(INSN_COST);
13990   ins_encode %{
13991     __ cmpw(zr, $op1$$Register);
13992   %}
13993 
13994   ins_pipe(icmp_reg_imm);
13995 %}
13996 
13997 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13998 %{
13999   match(Set cr (OverflowSubL zero op1));
14000 
14001   format %{ "cmp   zr, $op1\t# overflow check int64_t" %}
14002   ins_cost(INSN_COST);
14003   ins_encode %{
14004     __ cmp(zr, $op1$$Register);
14005   %}
14006 
14007   ins_pipe(icmp_reg_imm);
14008 %}
14009 
14010 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14011 %{
14012   match(Set cr (OverflowMulI op1 op2));
14013 
14014   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14015             "cmp   rscratch1, rscratch1, sxtw\n\t"
14016             "movw  rscratch1, #0x80000000\n\t"
14017             "cselw rscratch1, rscratch1, zr, NE\n\t"
14018             "cmpw  rscratch1, #1" %}
14019   ins_cost(5 * INSN_COST);
14020   ins_encode %{
14021     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14022     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14023     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14024     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14025     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14026   %}
14027 
14028   ins_pipe(pipe_slow);
14029 %}
14030 
14031 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14032 %{
14033   match(If cmp (OverflowMulI op1 op2));
14034   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14035             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14036   effect(USE labl, KILL cr);
14037 
14038   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14039             "cmp   rscratch1, rscratch1, sxtw\n\t"
14040             "b$cmp   $labl" %}
14041   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14042   ins_encode %{
14043     Label* L = $labl$$label;
14044     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14045     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14046     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14047     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14048   %}
14049 
14050   ins_pipe(pipe_serial);
14051 %}
14052 
14053 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14054 %{
14055   match(Set cr (OverflowMulL op1 op2));
14056 
14057   format %{ "mul   rscratch1, $op1, $op2\t#overflow check int64_t\n\t"
14058             "smulh rscratch2, $op1, $op2\n\t"
14059             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14060             "movw  rscratch1, #0x80000000\n\t"
14061             "cselw rscratch1, rscratch1, zr, NE\n\t"
14062             "cmpw  rscratch1, #1" %}
14063   ins_cost(6 * INSN_COST);
14064   ins_encode %{
14065     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14066     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14067     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14068     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14069     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14070     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14071   %}
14072 
14073   ins_pipe(pipe_slow);
14074 %}
14075 
14076 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14077 %{
14078   match(If cmp (OverflowMulL op1 op2));
14079   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14080             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14081   effect(USE labl, KILL cr);
14082 
14083   format %{ "mul   rscratch1, $op1, $op2\t#overflow check int64_t\n\t"
14084             "smulh rscratch2, $op1, $op2\n\t"
14085             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14086             "b$cmp $labl" %}
14087   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14088   ins_encode %{
14089     Label* L = $labl$$label;
14090     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14091     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14092     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14093     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14094     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14095   %}
14096 
14097   ins_pipe(pipe_serial);
14098 %}
14099 
14100 // ============================================================================
14101 // Compare Instructions
14102 
14103 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14104 %{
14105   match(Set cr (CmpI op1 op2));
14106 
14107   effect(DEF cr, USE op1, USE op2);
14108 
14109   ins_cost(INSN_COST);
14110   format %{ "cmpw  $op1, $op2" %}
14111 
14112   ins_encode(aarch64_enc_cmpw(op1, op2));
14113 
14114   ins_pipe(icmp_reg_reg);
14115 %}
14116 
14117 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14118 %{
14119   match(Set cr (CmpI op1 zero));
14120 
14121   effect(DEF cr, USE op1);
14122 
14123   ins_cost(INSN_COST);
14124   format %{ "cmpw $op1, 0" %}
14125 
14126   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14127 
14128   ins_pipe(icmp_reg_imm);
14129 %}
14130 
14131 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14132 %{
14133   match(Set cr (CmpI op1 op2));
14134 
14135   effect(DEF cr, USE op1);
14136 
14137   ins_cost(INSN_COST);
14138   format %{ "cmpw  $op1, $op2" %}
14139 
14140   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14141 
14142   ins_pipe(icmp_reg_imm);
14143 %}
14144 
14145 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14146 %{
14147   match(Set cr (CmpI op1 op2));
14148 
14149   effect(DEF cr, USE op1);
14150 
14151   ins_cost(INSN_COST * 2);
14152   format %{ "cmpw  $op1, $op2" %}
14153 
14154   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14155 
14156   ins_pipe(icmp_reg_imm);
14157 %}
14158 
14159 // Unsigned compare Instructions; really, same as signed compare
14160 // except it should only be used to feed an If or a CMovI which takes a
14161 // cmpOpU.
14162 
14163 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14164 %{
14165   match(Set cr (CmpU op1 op2));
14166 
14167   effect(DEF cr, USE op1, USE op2);
14168 
14169   ins_cost(INSN_COST);
14170   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14171 
14172   ins_encode(aarch64_enc_cmpw(op1, op2));
14173 
14174   ins_pipe(icmp_reg_reg);
14175 %}
14176 
14177 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14178 %{
14179   match(Set cr (CmpU op1 zero));
14180 
14181   effect(DEF cr, USE op1);
14182 
14183   ins_cost(INSN_COST);
14184   format %{ "cmpw $op1, #0\t# unsigned" %}
14185 
14186   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14187 
14188   ins_pipe(icmp_reg_imm);
14189 %}
14190 
14191 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14192 %{
14193   match(Set cr (CmpU op1 op2));
14194 
14195   effect(DEF cr, USE op1);
14196 
14197   ins_cost(INSN_COST);
14198   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14199 
14200   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14201 
14202   ins_pipe(icmp_reg_imm);
14203 %}
14204 
14205 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14206 %{
14207   match(Set cr (CmpU op1 op2));
14208 
14209   effect(DEF cr, USE op1);
14210 
14211   ins_cost(INSN_COST * 2);
14212   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14213 
14214   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14215 
14216   ins_pipe(icmp_reg_imm);
14217 %}
14218 
14219 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14220 %{
14221   match(Set cr (CmpL op1 op2));
14222 
14223   effect(DEF cr, USE op1, USE op2);
14224 
14225   ins_cost(INSN_COST);
14226   format %{ "cmp  $op1, $op2" %}
14227 
14228   ins_encode(aarch64_enc_cmp(op1, op2));
14229 
14230   ins_pipe(icmp_reg_reg);
14231 %}
14232 
14233 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14234 %{
14235   match(Set cr (CmpL op1 zero));
14236 
14237   effect(DEF cr, USE op1);
14238 
14239   ins_cost(INSN_COST);
14240   format %{ "tst  $op1" %}
14241 
14242   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14243 
14244   ins_pipe(icmp_reg_imm);
14245 %}
14246 
14247 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14248 %{
14249   match(Set cr (CmpL op1 op2));
14250 
14251   effect(DEF cr, USE op1);
14252 
14253   ins_cost(INSN_COST);
14254   format %{ "cmp  $op1, $op2" %}
14255 
14256   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14257 
14258   ins_pipe(icmp_reg_imm);
14259 %}
14260 
14261 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14262 %{
14263   match(Set cr (CmpL op1 op2));
14264 
14265   effect(DEF cr, USE op1);
14266 
14267   ins_cost(INSN_COST * 2);
14268   format %{ "cmp  $op1, $op2" %}
14269 
14270   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14271 
14272   ins_pipe(icmp_reg_imm);
14273 %}
14274 
14275 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14276 %{
14277   match(Set cr (CmpUL op1 op2));
14278 
14279   effect(DEF cr, USE op1, USE op2);
14280 
14281   ins_cost(INSN_COST);
14282   format %{ "cmp  $op1, $op2" %}
14283 
14284   ins_encode(aarch64_enc_cmp(op1, op2));
14285 
14286   ins_pipe(icmp_reg_reg);
14287 %}
14288 
14289 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14290 %{
14291   match(Set cr (CmpUL op1 zero));
14292 
14293   effect(DEF cr, USE op1);
14294 
14295   ins_cost(INSN_COST);
14296   format %{ "tst  $op1" %}
14297 
14298   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14299 
14300   ins_pipe(icmp_reg_imm);
14301 %}
14302 
14303 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14304 %{
14305   match(Set cr (CmpUL op1 op2));
14306 
14307   effect(DEF cr, USE op1);
14308 
14309   ins_cost(INSN_COST);
14310   format %{ "cmp  $op1, $op2" %}
14311 
14312   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14313 
14314   ins_pipe(icmp_reg_imm);
14315 %}
14316 
14317 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14318 %{
14319   match(Set cr (CmpUL op1 op2));
14320 
14321   effect(DEF cr, USE op1);
14322 
14323   ins_cost(INSN_COST * 2);
14324   format %{ "cmp  $op1, $op2" %}
14325 
14326   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14327 
14328   ins_pipe(icmp_reg_imm);
14329 %}
14330 
14331 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14332 %{
14333   match(Set cr (CmpP op1 op2));
14334 
14335   effect(DEF cr, USE op1, USE op2);
14336 
14337   ins_cost(INSN_COST);
14338   format %{ "cmp  $op1, $op2\t // ptr" %}
14339 
14340   ins_encode(aarch64_enc_cmpp(op1, op2));
14341 
14342   ins_pipe(icmp_reg_reg);
14343 %}
14344 
14345 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14346 %{
14347   match(Set cr (CmpN op1 op2));
14348 
14349   effect(DEF cr, USE op1, USE op2);
14350 
14351   ins_cost(INSN_COST);
14352   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14353 
14354   ins_encode(aarch64_enc_cmpn(op1, op2));
14355 
14356   ins_pipe(icmp_reg_reg);
14357 %}
14358 
14359 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14360 %{
14361   match(Set cr (CmpP op1 zero));
14362 
14363   effect(DEF cr, USE op1, USE zero);
14364 
14365   ins_cost(INSN_COST);
14366   format %{ "cmp  $op1, 0\t // ptr" %}
14367 
14368   ins_encode(aarch64_enc_testp(op1));
14369 
14370   ins_pipe(icmp_reg_imm);
14371 %}
14372 
14373 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14374 %{
14375   match(Set cr (CmpN op1 zero));
14376 
14377   effect(DEF cr, USE op1, USE zero);
14378 
14379   ins_cost(INSN_COST);
14380   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14381 
14382   ins_encode(aarch64_enc_testn(op1));
14383 
14384   ins_pipe(icmp_reg_imm);
14385 %}
14386 
14387 // FP comparisons
14388 //
14389 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14390 // using normal cmpOp. See declaration of rFlagsReg for details.
14391 
14392 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14393 %{
14394   match(Set cr (CmpF src1 src2));
14395 
14396   ins_cost(3 * INSN_COST);
14397   format %{ "fcmps $src1, $src2" %}
14398 
14399   ins_encode %{
14400     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14401   %}
14402 
14403   ins_pipe(pipe_class_compare);
14404 %}
14405 
14406 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14407 %{
14408   match(Set cr (CmpF src1 src2));
14409 
14410   ins_cost(3 * INSN_COST);
14411   format %{ "fcmps $src1, 0.0" %}
14412 
14413   ins_encode %{
14414     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14415   %}
14416 
14417   ins_pipe(pipe_class_compare);
14418 %}
14419 // FROM HERE
14420 
14421 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14422 %{
14423   match(Set cr (CmpD src1 src2));
14424 
14425   ins_cost(3 * INSN_COST);
14426   format %{ "fcmpd $src1, $src2" %}
14427 
14428   ins_encode %{
14429     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14430   %}
14431 
14432   ins_pipe(pipe_class_compare);
14433 %}
14434 
14435 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14436 %{
14437   match(Set cr (CmpD src1 src2));
14438 
14439   ins_cost(3 * INSN_COST);
14440   format %{ "fcmpd $src1, 0.0" %}
14441 
14442   ins_encode %{
14443     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14444   %}
14445 
14446   ins_pipe(pipe_class_compare);
14447 %}
14448 
14449 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14450 %{
14451   match(Set dst (CmpF3 src1 src2));
14452   effect(KILL cr);
14453 
14454   ins_cost(5 * INSN_COST);
14455   format %{ "fcmps $src1, $src2\n\t"
14456             "csinvw($dst, zr, zr, eq\n\t"
14457             "csnegw($dst, $dst, $dst, lt)"
14458   %}
14459 
14460   ins_encode %{
14461     Label done;
14462     FloatRegister s1 = as_FloatRegister($src1$$reg);
14463     FloatRegister s2 = as_FloatRegister($src2$$reg);
14464     Register d = as_Register($dst$$reg);
14465     __ fcmps(s1, s2);
14466     // installs 0 if EQ else -1
14467     __ csinvw(d, zr, zr, Assembler::EQ);
14468     // keeps -1 if less or unordered else installs 1
14469     __ csnegw(d, d, d, Assembler::LT);
14470     __ bind(done);
14471   %}
14472 
14473   ins_pipe(pipe_class_default);
14474 
14475 %}
14476 
14477 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14478 %{
14479   match(Set dst (CmpD3 src1 src2));
14480   effect(KILL cr);
14481 
14482   ins_cost(5 * INSN_COST);
14483   format %{ "fcmpd $src1, $src2\n\t"
14484             "csinvw($dst, zr, zr, eq\n\t"
14485             "csnegw($dst, $dst, $dst, lt)"
14486   %}
14487 
14488   ins_encode %{
14489     Label done;
14490     FloatRegister s1 = as_FloatRegister($src1$$reg);
14491     FloatRegister s2 = as_FloatRegister($src2$$reg);
14492     Register d = as_Register($dst$$reg);
14493     __ fcmpd(s1, s2);
14494     // installs 0 if EQ else -1
14495     __ csinvw(d, zr, zr, Assembler::EQ);
14496     // keeps -1 if less or unordered else installs 1
14497     __ csnegw(d, d, d, Assembler::LT);
14498     __ bind(done);
14499   %}
14500   ins_pipe(pipe_class_default);
14501 
14502 %}
14503 
14504 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14505 %{
14506   match(Set dst (CmpF3 src1 zero));
14507   effect(KILL cr);
14508 
14509   ins_cost(5 * INSN_COST);
14510   format %{ "fcmps $src1, 0.0\n\t"
14511             "csinvw($dst, zr, zr, eq\n\t"
14512             "csnegw($dst, $dst, $dst, lt)"
14513   %}
14514 
14515   ins_encode %{
14516     Label done;
14517     FloatRegister s1 = as_FloatRegister($src1$$reg);
14518     Register d = as_Register($dst$$reg);
14519     __ fcmps(s1, 0.0);
14520     // installs 0 if EQ else -1
14521     __ csinvw(d, zr, zr, Assembler::EQ);
14522     // keeps -1 if less or unordered else installs 1
14523     __ csnegw(d, d, d, Assembler::LT);
14524     __ bind(done);
14525   %}
14526 
14527   ins_pipe(pipe_class_default);
14528 
14529 %}
14530 
14531 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14532 %{
14533   match(Set dst (CmpD3 src1 zero));
14534   effect(KILL cr);
14535 
14536   ins_cost(5 * INSN_COST);
14537   format %{ "fcmpd $src1, 0.0\n\t"
14538             "csinvw($dst, zr, zr, eq\n\t"
14539             "csnegw($dst, $dst, $dst, lt)"
14540   %}
14541 
14542   ins_encode %{
14543     Label done;
14544     FloatRegister s1 = as_FloatRegister($src1$$reg);
14545     Register d = as_Register($dst$$reg);
14546     __ fcmpd(s1, 0.0);
14547     // installs 0 if EQ else -1
14548     __ csinvw(d, zr, zr, Assembler::EQ);
14549     // keeps -1 if less or unordered else installs 1
14550     __ csnegw(d, d, d, Assembler::LT);
14551     __ bind(done);
14552   %}
14553   ins_pipe(pipe_class_default);
14554 
14555 %}
14556 
14557 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14558 %{
14559   match(Set dst (CmpLTMask p q));
14560   effect(KILL cr);
14561 
14562   ins_cost(3 * INSN_COST);
14563 
14564   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14565             "csetw $dst, lt\n\t"
14566             "subw $dst, zr, $dst"
14567   %}
14568 
14569   ins_encode %{
14570     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14571     __ csetw(as_Register($dst$$reg), Assembler::LT);
14572     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14573   %}
14574 
14575   ins_pipe(ialu_reg_reg);
14576 %}
14577 
14578 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14579 %{
14580   match(Set dst (CmpLTMask src zero));
14581   effect(KILL cr);
14582 
14583   ins_cost(INSN_COST);
14584 
14585   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14586 
14587   ins_encode %{
14588     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14589   %}
14590 
14591   ins_pipe(ialu_reg_shift);
14592 %}
14593 
14594 // ============================================================================
14595 // Max and Min
14596 
14597 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14598 %{
14599   effect( DEF dst, USE src1, USE src2, USE cr );
14600 
14601   ins_cost(INSN_COST * 2);
14602   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14603 
14604   ins_encode %{
14605     __ cselw(as_Register($dst$$reg),
14606              as_Register($src1$$reg),
14607              as_Register($src2$$reg),
14608              Assembler::LT);
14609   %}
14610 
14611   ins_pipe(icond_reg_reg);
14612 %}
14613 
14614 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14615 %{
14616   match(Set dst (MinI src1 src2));
14617   ins_cost(INSN_COST * 3);
14618 
14619   expand %{
14620     rFlagsReg cr;
14621     compI_reg_reg(cr, src1, src2);
14622     cmovI_reg_reg_lt(dst, src1, src2, cr);
14623   %}
14624 
14625 %}
14626 // FROM HERE
14627 
14628 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14629 %{
14630   effect( DEF dst, USE src1, USE src2, USE cr );
14631 
14632   ins_cost(INSN_COST * 2);
14633   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14634 
14635   ins_encode %{
14636     __ cselw(as_Register($dst$$reg),
14637              as_Register($src1$$reg),
14638              as_Register($src2$$reg),
14639              Assembler::GT);
14640   %}
14641 
14642   ins_pipe(icond_reg_reg);
14643 %}
14644 
14645 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14646 %{
14647   match(Set dst (MaxI src1 src2));
14648   ins_cost(INSN_COST * 3);
14649   expand %{
14650     rFlagsReg cr;
14651     compI_reg_reg(cr, src1, src2);
14652     cmovI_reg_reg_gt(dst, src1, src2, cr);
14653   %}
14654 %}
14655 
14656 // ============================================================================
14657 // Branch Instructions
14658 
14659 // Direct Branch.
14660 instruct branch(label lbl)
14661 %{
14662   match(Goto);
14663 
14664   effect(USE lbl);
14665 
14666   ins_cost(BRANCH_COST);
14667   format %{ "b  $lbl" %}
14668 
14669   ins_encode(aarch64_enc_b(lbl));
14670 
14671   ins_pipe(pipe_branch);
14672 %}
14673 
14674 // Conditional Near Branch
14675 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14676 %{
14677   // Same match rule as `branchConFar'.
14678   match(If cmp cr);
14679 
14680   effect(USE lbl);
14681 
14682   ins_cost(BRANCH_COST);
14683   // If set to 1 this indicates that the current instruction is a
14684   // short variant of a long branch. This avoids using this
14685   // instruction in first-pass matching. It will then only be used in
14686   // the `Shorten_branches' pass.
14687   // ins_short_branch(1);
14688   format %{ "b$cmp  $lbl" %}
14689 
14690   ins_encode(aarch64_enc_br_con(cmp, lbl));
14691 
14692   ins_pipe(pipe_branch_cond);
14693 %}
14694 
14695 // Conditional Near Branch Unsigned
14696 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14697 %{
14698   // Same match rule as `branchConFar'.
14699   match(If cmp cr);
14700 
14701   effect(USE lbl);
14702 
14703   ins_cost(BRANCH_COST);
14704   // If set to 1 this indicates that the current instruction is a
14705   // short variant of a long branch. This avoids using this
14706   // instruction in first-pass matching. It will then only be used in
14707   // the `Shorten_branches' pass.
14708   // ins_short_branch(1);
14709   format %{ "b$cmp  $lbl\t# unsigned" %}
14710 
14711   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14712 
14713   ins_pipe(pipe_branch_cond);
14714 %}
14715 
14716 // Make use of CBZ and CBNZ.  These instructions, as well as being
14717 // shorter than (cmp; branch), have the additional benefit of not
14718 // killing the flags.
14719 
14720 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14721   match(If cmp (CmpI op1 op2));
14722   effect(USE labl);
14723 
14724   ins_cost(BRANCH_COST);
14725   format %{ "cbw$cmp   $op1, $labl" %}
14726   ins_encode %{
14727     Label* L = $labl$$label;
14728     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14729     if (cond == Assembler::EQ)
14730       __ cbzw($op1$$Register, *L);
14731     else
14732       __ cbnzw($op1$$Register, *L);
14733   %}
14734   ins_pipe(pipe_cmp_branch);
14735 %}
14736 
14737 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14738   match(If cmp (CmpL op1 op2));
14739   effect(USE labl);
14740 
14741   ins_cost(BRANCH_COST);
14742   format %{ "cb$cmp   $op1, $labl" %}
14743   ins_encode %{
14744     Label* L = $labl$$label;
14745     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14746     if (cond == Assembler::EQ)
14747       __ cbz($op1$$Register, *L);
14748     else
14749       __ cbnz($op1$$Register, *L);
14750   %}
14751   ins_pipe(pipe_cmp_branch);
14752 %}
14753 
14754 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14755   match(If cmp (CmpP op1 op2));
14756   effect(USE labl);
14757 
14758   ins_cost(BRANCH_COST);
14759   format %{ "cb$cmp   $op1, $labl" %}
14760   ins_encode %{
14761     Label* L = $labl$$label;
14762     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14763     if (cond == Assembler::EQ)
14764       __ cbz($op1$$Register, *L);
14765     else
14766       __ cbnz($op1$$Register, *L);
14767   %}
14768   ins_pipe(pipe_cmp_branch);
14769 %}
14770 
14771 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14772   match(If cmp (CmpN op1 op2));
14773   effect(USE labl);
14774 
14775   ins_cost(BRANCH_COST);
14776   format %{ "cbw$cmp   $op1, $labl" %}
14777   ins_encode %{
14778     Label* L = $labl$$label;
14779     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14780     if (cond == Assembler::EQ)
14781       __ cbzw($op1$$Register, *L);
14782     else
14783       __ cbnzw($op1$$Register, *L);
14784   %}
14785   ins_pipe(pipe_cmp_branch);
14786 %}
14787 
14788 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14789   match(If cmp (CmpP (DecodeN oop) zero));
14790   effect(USE labl);
14791 
14792   ins_cost(BRANCH_COST);
14793   format %{ "cb$cmp   $oop, $labl" %}
14794   ins_encode %{
14795     Label* L = $labl$$label;
14796     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14797     if (cond == Assembler::EQ)
14798       __ cbzw($oop$$Register, *L);
14799     else
14800       __ cbnzw($oop$$Register, *L);
14801   %}
14802   ins_pipe(pipe_cmp_branch);
14803 %}
14804 
14805 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14806   match(If cmp (CmpU op1 op2));
14807   effect(USE labl);
14808 
14809   ins_cost(BRANCH_COST);
14810   format %{ "cbw$cmp   $op1, $labl" %}
14811   ins_encode %{
14812     Label* L = $labl$$label;
14813     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14814     if (cond == Assembler::EQ || cond == Assembler::LS)
14815       __ cbzw($op1$$Register, *L);
14816     else
14817       __ cbnzw($op1$$Register, *L);
14818   %}
14819   ins_pipe(pipe_cmp_branch);
14820 %}
14821 
14822 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14823   match(If cmp (CmpUL op1 op2));
14824   effect(USE labl);
14825 
14826   ins_cost(BRANCH_COST);
14827   format %{ "cb$cmp   $op1, $labl" %}
14828   ins_encode %{
14829     Label* L = $labl$$label;
14830     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14831     if (cond == Assembler::EQ || cond == Assembler::LS)
14832       __ cbz($op1$$Register, *L);
14833     else
14834       __ cbnz($op1$$Register, *L);
14835   %}
14836   ins_pipe(pipe_cmp_branch);
14837 %}
14838 
14839 // Test bit and Branch
14840 
14841 // Patterns for short (< 32KiB) variants
14842 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14843   match(If cmp (CmpL op1 op2));
14844   effect(USE labl);
14845 
14846   ins_cost(BRANCH_COST);
14847   format %{ "cb$cmp   $op1, $labl # int64_t" %}
14848   ins_encode %{
14849     Label* L = $labl$$label;
14850     Assembler::Condition cond =
14851       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14852     __ tbr(cond, $op1$$Register, 63, *L);
14853   %}
14854   ins_pipe(pipe_cmp_branch);
14855   ins_short_branch(1);
14856 %}
14857 
14858 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14859   match(If cmp (CmpI op1 op2));
14860   effect(USE labl);
14861 
14862   ins_cost(BRANCH_COST);
14863   format %{ "cb$cmp   $op1, $labl # int" %}
14864   ins_encode %{
14865     Label* L = $labl$$label;
14866     Assembler::Condition cond =
14867       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14868     __ tbr(cond, $op1$$Register, 31, *L);
14869   %}
14870   ins_pipe(pipe_cmp_branch);
14871   ins_short_branch(1);
14872 %}
14873 
14874 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14875   match(If cmp (CmpL (AndL op1 op2) op3));
14876   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
14877   effect(USE labl);
14878 
14879   ins_cost(BRANCH_COST);
14880   format %{ "tb$cmp   $op1, $op2, $labl" %}
14881   ins_encode %{
14882     Label* L = $labl$$label;
14883     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14884     int bit = exact_log2_long($op2$$constant);
14885     __ tbr(cond, $op1$$Register, bit, *L);
14886   %}
14887   ins_pipe(pipe_cmp_branch);
14888   ins_short_branch(1);
14889 %}
14890 
14891 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14892   match(If cmp (CmpI (AndI op1 op2) op3));
14893   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
14894   effect(USE labl);
14895 
14896   ins_cost(BRANCH_COST);
14897   format %{ "tb$cmp   $op1, $op2, $labl" %}
14898   ins_encode %{
14899     Label* L = $labl$$label;
14900     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14901     int bit = exact_log2((juint)$op2$$constant);
14902     __ tbr(cond, $op1$$Register, bit, *L);
14903   %}
14904   ins_pipe(pipe_cmp_branch);
14905   ins_short_branch(1);
14906 %}
14907 
14908 // And far variants
14909 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14910   match(If cmp (CmpL op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cb$cmp   $op1, $labl # int64_t" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond =
14918       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14919     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14920   %}
14921   ins_pipe(pipe_cmp_branch);
14922 %}
14923 
14924 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14925   match(If cmp (CmpI op1 op2));
14926   effect(USE labl);
14927 
14928   ins_cost(BRANCH_COST);
14929   format %{ "cb$cmp   $op1, $labl # int" %}
14930   ins_encode %{
14931     Label* L = $labl$$label;
14932     Assembler::Condition cond =
14933       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14934     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14935   %}
14936   ins_pipe(pipe_cmp_branch);
14937 %}
14938 
14939 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14940   match(If cmp (CmpL (AndL op1 op2) op3));
14941   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
14942   effect(USE labl);
14943 
14944   ins_cost(BRANCH_COST);
14945   format %{ "tb$cmp   $op1, $op2, $labl" %}
14946   ins_encode %{
14947     Label* L = $labl$$label;
14948     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14949     int bit = exact_log2_long($op2$$constant);
14950     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14951   %}
14952   ins_pipe(pipe_cmp_branch);
14953 %}
14954 
14955 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14956   match(If cmp (CmpI (AndI op1 op2) op3));
14957   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
14958   effect(USE labl);
14959 
14960   ins_cost(BRANCH_COST);
14961   format %{ "tb$cmp   $op1, $op2, $labl" %}
14962   ins_encode %{
14963     Label* L = $labl$$label;
14964     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14965     int bit = exact_log2((juint)$op2$$constant);
14966     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14967   %}
14968   ins_pipe(pipe_cmp_branch);
14969 %}
14970 
14971 // Test bits
14972 
14973 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14974   match(Set cr (CmpL (AndL op1 op2) op3));
14975   predicate(Assembler::operand_valid_for_logical_immediate
14976             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14977 
14978   ins_cost(INSN_COST);
14979   format %{ "tst $op1, $op2 # int64_t" %}
14980   ins_encode %{
14981     __ tst($op1$$Register, $op2$$constant);
14982   %}
14983   ins_pipe(ialu_reg_reg);
14984 %}
14985 
14986 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14987   match(Set cr (CmpI (AndI op1 op2) op3));
14988   predicate(Assembler::operand_valid_for_logical_immediate
14989             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14990 
14991   ins_cost(INSN_COST);
14992   format %{ "tst $op1, $op2 # int" %}
14993   ins_encode %{
14994     __ tstw($op1$$Register, $op2$$constant);
14995   %}
14996   ins_pipe(ialu_reg_reg);
14997 %}
14998 
14999 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15000   match(Set cr (CmpL (AndL op1 op2) op3));
15001 
15002   ins_cost(INSN_COST);
15003   format %{ "tst $op1, $op2 # int64_t" %}
15004   ins_encode %{
15005     __ tst($op1$$Register, $op2$$Register);
15006   %}
15007   ins_pipe(ialu_reg_reg);
15008 %}
15009 
15010 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15011   match(Set cr (CmpI (AndI op1 op2) op3));
15012 
15013   ins_cost(INSN_COST);
15014   format %{ "tstw $op1, $op2 # int" %}
15015   ins_encode %{
15016     __ tstw($op1$$Register, $op2$$Register);
15017   %}
15018   ins_pipe(ialu_reg_reg);
15019 %}
15020 
15021 
15022 // Conditional Far Branch
15023 // Conditional Far Branch Unsigned
15024 // TODO: fixme
15025 
15026 // counted loop end branch near
15027 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15028 %{
15029   match(CountedLoopEnd cmp cr);
15030 
15031   effect(USE lbl);
15032 
15033   ins_cost(BRANCH_COST);
15034   // short variant.
15035   // ins_short_branch(1);
15036   format %{ "b$cmp $lbl \t// counted loop end" %}
15037 
15038   ins_encode(aarch64_enc_br_con(cmp, lbl));
15039 
15040   ins_pipe(pipe_branch);
15041 %}
15042 
15043 // counted loop end branch near Unsigned
15044 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15045 %{
15046   match(CountedLoopEnd cmp cr);
15047 
15048   effect(USE lbl);
15049 
15050   ins_cost(BRANCH_COST);
15051   // short variant.
15052   // ins_short_branch(1);
15053   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15054 
15055   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15056 
15057   ins_pipe(pipe_branch);
15058 %}
15059 
15060 // counted loop end branch far
15061 // counted loop end branch far unsigned
15062 // TODO: fixme
15063 
15064 // ============================================================================
15065 // inlined locking and unlocking
15066 
15067 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15068 %{
15069   match(Set cr (FastLock object box));
15070   effect(TEMP tmp, TEMP tmp2);
15071 
15072   // TODO
15073   // identify correct cost
15074   ins_cost(5 * INSN_COST);
15075   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15076 
15077   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15078 
15079   ins_pipe(pipe_serial);
15080 %}
15081 
15082 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15083 %{
15084   match(Set cr (FastUnlock object box));
15085   effect(TEMP tmp, TEMP tmp2);
15086 
15087   ins_cost(5 * INSN_COST);
15088   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15089 
15090   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15091 
15092   ins_pipe(pipe_serial);
15093 %}
15094 
15095 
15096 // ============================================================================
15097 // Safepoint Instructions
15098 
15099 // TODO
15100 // provide a near and far version of this code
15101 
15102 instruct safePoint(rFlagsReg cr, iRegP poll)
15103 %{
15104   match(SafePoint poll);
15105   effect(KILL cr);
15106 
15107   format %{
15108     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15109   %}
15110   ins_encode %{
15111     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15112   %}
15113   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15114 %}
15115 
15116 
15117 // ============================================================================
15118 // Procedure Call/Return Instructions
15119 
15120 // Call Java Static Instruction
15121 
15122 instruct CallStaticJavaDirect(method meth)
15123 %{
15124   match(CallStaticJava);
15125 
15126   effect(USE meth);
15127 
15128   ins_cost(CALL_COST);
15129 
15130   format %{ "call,static $meth \t// ==> " %}
15131 
15132   ins_encode( aarch64_enc_java_static_call(meth),
15133               aarch64_enc_call_epilog );
15134 
15135   ins_pipe(pipe_class_call);
15136 %}
15137 
15138 // TO HERE
15139 
15140 // Call Java Dynamic Instruction
15141 instruct CallDynamicJavaDirect(method meth)
15142 %{
15143   match(CallDynamicJava);
15144 
15145   effect(USE meth);
15146 
15147   ins_cost(CALL_COST);
15148 
15149   format %{ "CALL,dynamic $meth \t// ==> " %}
15150 
15151   ins_encode( aarch64_enc_java_dynamic_call(meth),
15152                aarch64_enc_call_epilog );
15153 
15154   ins_pipe(pipe_class_call);
15155 %}
15156 
15157 // Call Runtime Instruction
15158 
15159 instruct CallRuntimeDirect(method meth)
15160 %{
15161   match(CallRuntime);
15162 
15163   effect(USE meth);
15164 
15165   ins_cost(CALL_COST);
15166 
15167   format %{ "CALL, runtime $meth" %}
15168 
15169   ins_encode( aarch64_enc_java_to_runtime(meth) );
15170 
15171   ins_pipe(pipe_class_call);
15172 %}
15173 
15174 // Call Runtime Instruction
15175 
15176 instruct CallLeafDirect(method meth)
15177 %{
15178   match(CallLeaf);
15179 
15180   effect(USE meth);
15181 
15182   ins_cost(CALL_COST);
15183 
15184   format %{ "CALL, runtime leaf $meth" %}
15185 
15186   ins_encode( aarch64_enc_java_to_runtime(meth) );
15187 
15188   ins_pipe(pipe_class_call);
15189 %}
15190 
15191 // Call Runtime Instruction
15192 
15193 instruct CallLeafNoFPDirect(method meth)
15194 %{
15195   match(CallLeafNoFP);
15196 
15197   effect(USE meth);
15198 
15199   ins_cost(CALL_COST);
15200 
15201   format %{ "CALL, runtime leaf nofp $meth" %}
15202 
15203   ins_encode( aarch64_enc_java_to_runtime(meth) );
15204 
15205   ins_pipe(pipe_class_call);
15206 %}
15207 
15208 // Tail Call; Jump from runtime stub to Java code.
15209 // Also known as an 'interprocedural jump'.
15210 // Target of jump will eventually return to caller.
15211 // TailJump below removes the return address.
15212 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15213 %{
15214   match(TailCall jump_target method_oop);
15215 
15216   ins_cost(CALL_COST);
15217 
15218   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15219 
15220   ins_encode(aarch64_enc_tail_call(jump_target));
15221 
15222   ins_pipe(pipe_class_call);
15223 %}
15224 
15225 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15226 %{
15227   match(TailJump jump_target ex_oop);
15228 
15229   ins_cost(CALL_COST);
15230 
15231   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15232 
15233   ins_encode(aarch64_enc_tail_jmp(jump_target));
15234 
15235   ins_pipe(pipe_class_call);
15236 %}
15237 
15238 // Create exception oop: created by stack-crawling runtime code.
15239 // Created exception is now available to this handler, and is setup
15240 // just prior to jumping to this handler. No code emitted.
15241 // TODO check
15242 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15243 instruct CreateException(iRegP_R0 ex_oop)
15244 %{
15245   match(Set ex_oop (CreateEx));
15246 
15247   format %{ " -- \t// exception oop; no code emitted" %}
15248 
15249   size(0);
15250 
15251   ins_encode( /*empty*/ );
15252 
15253   ins_pipe(pipe_class_empty);
15254 %}
15255 
15256 // Rethrow exception: The exception oop will come in the first
15257 // argument position. Then JUMP (not call) to the rethrow stub code.
15258 instruct RethrowException() %{
15259   match(Rethrow);
15260   ins_cost(CALL_COST);
15261 
15262   format %{ "b rethrow_stub" %}
15263 
15264   ins_encode( aarch64_enc_rethrow() );
15265 
15266   ins_pipe(pipe_class_call);
15267 %}
15268 
15269 
15270 // Return Instruction
15271 // epilog node loads ret address into lr as part of frame pop
15272 instruct Ret()
15273 %{
15274   match(Return);
15275 
15276   format %{ "ret\t// return register" %}
15277 
15278   ins_encode( aarch64_enc_ret() );
15279 
15280   ins_pipe(pipe_branch);
15281 %}
15282 
15283 // Die now.
15284 instruct ShouldNotReachHere() %{
15285   match(Halt);
15286 
15287   ins_cost(CALL_COST);
15288   format %{ "ShouldNotReachHere" %}
15289 
15290   ins_encode %{
15291     if (is_reachable()) {
15292       __ stop(_halt_reason);
15293     }
15294   %}
15295 
15296   ins_pipe(pipe_class_default);
15297 %}
15298 
15299 // ============================================================================
15300 // Partial Subtype Check
15301 //
15302 // superklass array for an instance of the superklass.  Set a hidden
15303 // internal cache on a hit (cache is checked with exposed code in
15304 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15305 // encoding ALSO sets flags.
15306 
15307 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15308 %{
15309   match(Set result (PartialSubtypeCheck sub super));
15310   effect(KILL cr, KILL temp);
15311 
15312   ins_cost(1100);  // slightly larger than the next version
15313   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15314 
15315   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15316 
15317   opcode(0x1); // Force zero of result reg on hit
15318 
15319   ins_pipe(pipe_class_memory);
15320 %}
15321 
15322 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15323 %{
15324   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15325   effect(KILL temp, KILL result);
15326 
15327   ins_cost(1100);  // slightly larger than the next version
15328   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15329 
15330   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15331 
15332   opcode(0x0); // Don't zero result reg on hit
15333 
15334   ins_pipe(pipe_class_memory);
15335 %}
15336 
15337 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15338                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15339 %{
15340   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15341   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15342   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15343 
15344   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15345   ins_encode %{
15346     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15347     __ string_compare($str1$$Register, $str2$$Register,
15348                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15349                       $tmp1$$Register, $tmp2$$Register,
15350                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15351   %}
15352   ins_pipe(pipe_class_memory);
15353 %}
15354 
15355 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15356                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15357 %{
15358   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15359   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15360   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15361 
15362   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15363   ins_encode %{
15364     __ string_compare($str1$$Register, $str2$$Register,
15365                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15366                       $tmp1$$Register, $tmp2$$Register,
15367                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15368   %}
15369   ins_pipe(pipe_class_memory);
15370 %}
15371 
15372 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15373                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15374                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15375 %{
15376   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15377   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15378   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15379          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15380 
15381   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15382   ins_encode %{
15383     __ string_compare($str1$$Register, $str2$$Register,
15384                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15385                       $tmp1$$Register, $tmp2$$Register,
15386                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15387                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15388   %}
15389   ins_pipe(pipe_class_memory);
15390 %}
15391 
15392 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15393                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15394                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15395 %{
15396   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15397   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15398   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15399          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15400 
15401   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15402   ins_encode %{
15403     __ string_compare($str1$$Register, $str2$$Register,
15404                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15405                       $tmp1$$Register, $tmp2$$Register,
15406                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15407                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15408   %}
15409   ins_pipe(pipe_class_memory);
15410 %}
15411 
15412 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15413        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15414        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15415 %{
15416   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15417   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15418   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15419          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15420   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15421 
15422   ins_encode %{
15423     __ string_indexof($str1$$Register, $str2$$Register,
15424                       $cnt1$$Register, $cnt2$$Register,
15425                       $tmp1$$Register, $tmp2$$Register,
15426                       $tmp3$$Register, $tmp4$$Register,
15427                       $tmp5$$Register, $tmp6$$Register,
15428                       -1, $result$$Register, StrIntrinsicNode::UU);
15429   %}
15430   ins_pipe(pipe_class_memory);
15431 %}
15432 
15433 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15434        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15435        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15436 %{
15437   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15438   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15439   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15440          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15441   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15442 
15443   ins_encode %{
15444     __ string_indexof($str1$$Register, $str2$$Register,
15445                       $cnt1$$Register, $cnt2$$Register,
15446                       $tmp1$$Register, $tmp2$$Register,
15447                       $tmp3$$Register, $tmp4$$Register,
15448                       $tmp5$$Register, $tmp6$$Register,
15449                       -1, $result$$Register, StrIntrinsicNode::LL);
15450   %}
15451   ins_pipe(pipe_class_memory);
15452 %}
15453 
15454 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15455        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15456        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15457 %{
15458   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15459   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15460   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15461          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15462   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15463 
15464   ins_encode %{
15465     __ string_indexof($str1$$Register, $str2$$Register,
15466                       $cnt1$$Register, $cnt2$$Register,
15467                       $tmp1$$Register, $tmp2$$Register,
15468                       $tmp3$$Register, $tmp4$$Register,
15469                       $tmp5$$Register, $tmp6$$Register,
15470                       -1, $result$$Register, StrIntrinsicNode::UL);
15471   %}
15472   ins_pipe(pipe_class_memory);
15473 %}
15474 
15475 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15476                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15477                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15478 %{
15479   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15480   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15481   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15482          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15483   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15484 
15485   ins_encode %{
15486     int icnt2 = (int)$int_cnt2$$constant;
15487     __ string_indexof($str1$$Register, $str2$$Register,
15488                       $cnt1$$Register, zr,
15489                       $tmp1$$Register, $tmp2$$Register,
15490                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15491                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15492   %}
15493   ins_pipe(pipe_class_memory);
15494 %}
15495 
15496 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15497                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15498                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15499 %{
15500   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15501   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15502   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15503          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15504   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15505 
15506   ins_encode %{
15507     int icnt2 = (int)$int_cnt2$$constant;
15508     __ string_indexof($str1$$Register, $str2$$Register,
15509                       $cnt1$$Register, zr,
15510                       $tmp1$$Register, $tmp2$$Register,
15511                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15512                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15513   %}
15514   ins_pipe(pipe_class_memory);
15515 %}
15516 
15517 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15518                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15519                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15520 %{
15521   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15522   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15523   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15524          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15525   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15526 
15527   ins_encode %{
15528     int icnt2 = (int)$int_cnt2$$constant;
15529     __ string_indexof($str1$$Register, $str2$$Register,
15530                       $cnt1$$Register, zr,
15531                       $tmp1$$Register, $tmp2$$Register,
15532                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15533                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15534   %}
15535   ins_pipe(pipe_class_memory);
15536 %}
15537 
15538 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15539                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15540                               iRegINoSp tmp3, rFlagsReg cr)
15541 %{
15542   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15543   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15544          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15545 
15546   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15547 
15548   ins_encode %{
15549     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15550                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15551                            $tmp3$$Register);
15552   %}
15553   ins_pipe(pipe_class_memory);
15554 %}
15555 
15556 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15557                         iRegI_R0 result, rFlagsReg cr)
15558 %{
15559   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15560   match(Set result (StrEquals (Binary str1 str2) cnt));
15561   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15562 
15563   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15564   ins_encode %{
15565     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15566     __ string_equals($str1$$Register, $str2$$Register,
15567                      $result$$Register, $cnt$$Register, 1);
15568   %}
15569   ins_pipe(pipe_class_memory);
15570 %}
15571 
15572 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15573                         iRegI_R0 result, rFlagsReg cr)
15574 %{
15575   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15576   match(Set result (StrEquals (Binary str1 str2) cnt));
15577   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15578 
15579   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15580   ins_encode %{
15581     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15582     __ string_equals($str1$$Register, $str2$$Register,
15583                      $result$$Register, $cnt$$Register, 2);
15584   %}
15585   ins_pipe(pipe_class_memory);
15586 %}
15587 
15588 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15589                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15590                        iRegP_R10 tmp, rFlagsReg cr)
15591 %{
15592   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15593   match(Set result (AryEq ary1 ary2));
15594   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15595 
15596   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15597   ins_encode %{
15598     __ arrays_equals($ary1$$Register, $ary2$$Register,
15599                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15600                      $result$$Register, $tmp$$Register, 1);
15601     %}
15602   ins_pipe(pipe_class_memory);
15603 %}
15604 
15605 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15606                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15607                        iRegP_R10 tmp, rFlagsReg cr)
15608 %{
15609   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15610   match(Set result (AryEq ary1 ary2));
15611   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15612 
15613   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15614   ins_encode %{
15615     __ arrays_equals($ary1$$Register, $ary2$$Register,
15616                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15617                      $result$$Register, $tmp$$Register, 2);
15618   %}
15619   ins_pipe(pipe_class_memory);
15620 %}
15621 
15622 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15623 %{
15624   match(Set result (HasNegatives ary1 len));
15625   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15626   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15627   ins_encode %{
15628     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15629   %}
15630   ins_pipe( pipe_slow );
15631 %}
15632 
15633 // fast char[] to byte[] compression
15634 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15635                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15636                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15637                          iRegI_R0 result, rFlagsReg cr)
15638 %{
15639   match(Set result (StrCompressedCopy src (Binary dst len)));
15640   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15641 
15642   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15643   ins_encode %{
15644     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15645                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15646                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15647                            $result$$Register);
15648   %}
15649   ins_pipe( pipe_slow );
15650 %}
15651 
15652 // fast byte[] to char[] inflation
15653 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15654                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15655 %{
15656   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15657   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15658 
15659   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15660   ins_encode %{
15661     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15662                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15663   %}
15664   ins_pipe(pipe_class_memory);
15665 %}
15666 
15667 // encode char[] to byte[] in ISO_8859_1
15668 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15669                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15670                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15671                           iRegI_R0 result, rFlagsReg cr)
15672 %{
15673   match(Set result (EncodeISOArray src (Binary dst len)));
15674   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15675          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15676 
15677   format %{ "Encode array $src,$dst,$len -> $result" %}
15678   ins_encode %{
15679     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15680          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15681          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15682   %}
15683   ins_pipe( pipe_class_memory );
15684 %}
15685 
15686 // ============================================================================
15687 // This name is KNOWN by the ADLC and cannot be changed.
15688 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15689 // for this guy.
15690 instruct tlsLoadP(thread_RegP dst)
15691 %{
15692   match(Set dst (ThreadLocal));
15693 
15694   ins_cost(0);
15695 
15696   format %{ " -- \t// $dst=Thread::current(), empty" %}
15697 
15698   size(0);
15699 
15700   ins_encode( /*empty*/ );
15701 
15702   ins_pipe(pipe_class_empty);
15703 %}
15704 
15705 // ====================VECTOR INSTRUCTIONS=====================================
15706 
15707 // Load vector (32 bits)
15708 instruct loadV4(vecD dst, vmem4 mem)
15709 %{
15710   predicate(n->as_LoadVector()->memory_size() == 4);
15711   match(Set dst (LoadVector mem));
15712   ins_cost(4 * INSN_COST);
15713   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15714   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15715   ins_pipe(vload_reg_mem64);
15716 %}
15717 
15718 // Load vector (64 bits)
15719 instruct loadV8(vecD dst, vmem8 mem)
15720 %{
15721   predicate(n->as_LoadVector()->memory_size() == 8);
15722   match(Set dst (LoadVector mem));
15723   ins_cost(4 * INSN_COST);
15724   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15725   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15726   ins_pipe(vload_reg_mem64);
15727 %}
15728 
15729 // Load Vector (128 bits)
15730 instruct loadV16(vecX dst, vmem16 mem)
15731 %{
15732   predicate(n->as_LoadVector()->memory_size() == 16);
15733   match(Set dst (LoadVector mem));
15734   ins_cost(4 * INSN_COST);
15735   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15736   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15737   ins_pipe(vload_reg_mem128);
15738 %}
15739 
15740 // Store Vector (32 bits)
15741 instruct storeV4(vecD src, vmem4 mem)
15742 %{
15743   predicate(n->as_StoreVector()->memory_size() == 4);
15744   match(Set mem (StoreVector mem src));
15745   ins_cost(4 * INSN_COST);
15746   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15747   ins_encode( aarch64_enc_strvS(src, mem) );
15748   ins_pipe(vstore_reg_mem64);
15749 %}
15750 
15751 // Store Vector (64 bits)
15752 instruct storeV8(vecD src, vmem8 mem)
15753 %{
15754   predicate(n->as_StoreVector()->memory_size() == 8);
15755   match(Set mem (StoreVector mem src));
15756   ins_cost(4 * INSN_COST);
15757   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15758   ins_encode( aarch64_enc_strvD(src, mem) );
15759   ins_pipe(vstore_reg_mem64);
15760 %}
15761 
15762 // Store Vector (128 bits)
15763 instruct storeV16(vecX src, vmem16 mem)
15764 %{
15765   predicate(n->as_StoreVector()->memory_size() == 16);
15766   match(Set mem (StoreVector mem src));
15767   ins_cost(4 * INSN_COST);
15768   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15769   ins_encode( aarch64_enc_strvQ(src, mem) );
15770   ins_pipe(vstore_reg_mem128);
15771 %}
15772 
15773 instruct replicate8B(vecD dst, iRegIorL2I src)
15774 %{
15775   predicate(n->as_Vector()->length() == 4 ||
15776             n->as_Vector()->length() == 8);
15777   match(Set dst (ReplicateB src));
15778   ins_cost(INSN_COST);
15779   format %{ "dup  $dst, $src\t# vector (8B)" %}
15780   ins_encode %{
15781     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15782   %}
15783   ins_pipe(vdup_reg_reg64);
15784 %}
15785 
15786 instruct replicate16B(vecX dst, iRegIorL2I src)
15787 %{
15788   predicate(n->as_Vector()->length() == 16);
15789   match(Set dst (ReplicateB src));
15790   ins_cost(INSN_COST);
15791   format %{ "dup  $dst, $src\t# vector (16B)" %}
15792   ins_encode %{
15793     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15794   %}
15795   ins_pipe(vdup_reg_reg128);
15796 %}
15797 
15798 instruct replicate8B_imm(vecD dst, immI con)
15799 %{
15800   predicate(n->as_Vector()->length() == 4 ||
15801             n->as_Vector()->length() == 8);
15802   match(Set dst (ReplicateB con));
15803   ins_cost(INSN_COST);
15804   format %{ "movi  $dst, $con\t# vector(8B)" %}
15805   ins_encode %{
15806     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15807   %}
15808   ins_pipe(vmovi_reg_imm64);
15809 %}
15810 
15811 instruct replicate16B_imm(vecX dst, immI con)
15812 %{
15813   predicate(n->as_Vector()->length() == 16);
15814   match(Set dst (ReplicateB con));
15815   ins_cost(INSN_COST);
15816   format %{ "movi  $dst, $con\t# vector(16B)" %}
15817   ins_encode %{
15818     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15819   %}
15820   ins_pipe(vmovi_reg_imm128);
15821 %}
15822 
15823 instruct replicate4S(vecD dst, iRegIorL2I src)
15824 %{
15825   predicate(n->as_Vector()->length() == 2 ||
15826             n->as_Vector()->length() == 4);
15827   match(Set dst (ReplicateS src));
15828   ins_cost(INSN_COST);
15829   format %{ "dup  $dst, $src\t# vector (4S)" %}
15830   ins_encode %{
15831     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15832   %}
15833   ins_pipe(vdup_reg_reg64);
15834 %}
15835 
15836 instruct replicate8S(vecX dst, iRegIorL2I src)
15837 %{
15838   predicate(n->as_Vector()->length() == 8);
15839   match(Set dst (ReplicateS src));
15840   ins_cost(INSN_COST);
15841   format %{ "dup  $dst, $src\t# vector (8S)" %}
15842   ins_encode %{
15843     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15844   %}
15845   ins_pipe(vdup_reg_reg128);
15846 %}
15847 
15848 instruct replicate4S_imm(vecD dst, immI con)
15849 %{
15850   predicate(n->as_Vector()->length() == 2 ||
15851             n->as_Vector()->length() == 4);
15852   match(Set dst (ReplicateS con));
15853   ins_cost(INSN_COST);
15854   format %{ "movi  $dst, $con\t# vector(4H)" %}
15855   ins_encode %{
15856     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15857   %}
15858   ins_pipe(vmovi_reg_imm64);
15859 %}
15860 
15861 instruct replicate8S_imm(vecX dst, immI con)
15862 %{
15863   predicate(n->as_Vector()->length() == 8);
15864   match(Set dst (ReplicateS con));
15865   ins_cost(INSN_COST);
15866   format %{ "movi  $dst, $con\t# vector(8H)" %}
15867   ins_encode %{
15868     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15869   %}
15870   ins_pipe(vmovi_reg_imm128);
15871 %}
15872 
15873 instruct replicate2I(vecD dst, iRegIorL2I src)
15874 %{
15875   predicate(n->as_Vector()->length() == 2);
15876   match(Set dst (ReplicateI src));
15877   ins_cost(INSN_COST);
15878   format %{ "dup  $dst, $src\t# vector (2I)" %}
15879   ins_encode %{
15880     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15881   %}
15882   ins_pipe(vdup_reg_reg64);
15883 %}
15884 
15885 instruct replicate4I(vecX dst, iRegIorL2I src)
15886 %{
15887   predicate(n->as_Vector()->length() == 4);
15888   match(Set dst (ReplicateI src));
15889   ins_cost(INSN_COST);
15890   format %{ "dup  $dst, $src\t# vector (4I)" %}
15891   ins_encode %{
15892     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15893   %}
15894   ins_pipe(vdup_reg_reg128);
15895 %}
15896 
15897 instruct replicate2I_imm(vecD dst, immI con)
15898 %{
15899   predicate(n->as_Vector()->length() == 2);
15900   match(Set dst (ReplicateI con));
15901   ins_cost(INSN_COST);
15902   format %{ "movi  $dst, $con\t# vector(2I)" %}
15903   ins_encode %{
15904     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15905   %}
15906   ins_pipe(vmovi_reg_imm64);
15907 %}
15908 
15909 instruct replicate4I_imm(vecX dst, immI con)
15910 %{
15911   predicate(n->as_Vector()->length() == 4);
15912   match(Set dst (ReplicateI con));
15913   ins_cost(INSN_COST);
15914   format %{ "movi  $dst, $con\t# vector(4I)" %}
15915   ins_encode %{
15916     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15917   %}
15918   ins_pipe(vmovi_reg_imm128);
15919 %}
15920 
15921 instruct replicate2L(vecX dst, iRegL src)
15922 %{
15923   predicate(n->as_Vector()->length() == 2);
15924   match(Set dst (ReplicateL src));
15925   ins_cost(INSN_COST);
15926   format %{ "dup  $dst, $src\t# vector (2L)" %}
15927   ins_encode %{
15928     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15929   %}
15930   ins_pipe(vdup_reg_reg128);
15931 %}
15932 
15933 instruct replicate2L_zero(vecX dst, immI0 zero)
15934 %{
15935   predicate(n->as_Vector()->length() == 2);
15936   match(Set dst (ReplicateI zero));
15937   ins_cost(INSN_COST);
15938   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15939   ins_encode %{
15940     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15941            as_FloatRegister($dst$$reg),
15942            as_FloatRegister($dst$$reg));
15943   %}
15944   ins_pipe(vmovi_reg_imm128);
15945 %}
15946 
15947 instruct replicate2F(vecD dst, vRegF src)
15948 %{
15949   predicate(n->as_Vector()->length() == 2);
15950   match(Set dst (ReplicateF src));
15951   ins_cost(INSN_COST);
15952   format %{ "dup  $dst, $src\t# vector (2F)" %}
15953   ins_encode %{
15954     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15955            as_FloatRegister($src$$reg));
15956   %}
15957   ins_pipe(vdup_reg_freg64);
15958 %}
15959 
15960 instruct replicate4F(vecX dst, vRegF src)
15961 %{
15962   predicate(n->as_Vector()->length() == 4);
15963   match(Set dst (ReplicateF src));
15964   ins_cost(INSN_COST);
15965   format %{ "dup  $dst, $src\t# vector (4F)" %}
15966   ins_encode %{
15967     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15968            as_FloatRegister($src$$reg));
15969   %}
15970   ins_pipe(vdup_reg_freg128);
15971 %}
15972 
15973 instruct replicate2D(vecX dst, vRegD src)
15974 %{
15975   predicate(n->as_Vector()->length() == 2);
15976   match(Set dst (ReplicateD src));
15977   ins_cost(INSN_COST);
15978   format %{ "dup  $dst, $src\t# vector (2D)" %}
15979   ins_encode %{
15980     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15981            as_FloatRegister($src$$reg));
15982   %}
15983   ins_pipe(vdup_reg_dreg128);
15984 %}
15985 
15986 // ====================REDUCTION ARITHMETIC====================================
15987 
15988 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
15989 %{
15990   match(Set dst (AddReductionVI isrc vsrc));
15991   ins_cost(INSN_COST);
15992   effect(TEMP tmp, TEMP tmp2);
15993   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
15994             "umov  $tmp2, $vsrc, S, 1\n\t"
15995             "addw  $tmp, $isrc, $tmp\n\t"
15996             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
15997   %}
15998   ins_encode %{
15999     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16000     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16001     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
16002     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16003   %}
16004   ins_pipe(pipe_class_default);
16005 %}
16006 
16007 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16008 %{
16009   match(Set dst (AddReductionVI isrc vsrc));
16010   ins_cost(INSN_COST);
16011   effect(TEMP vtmp, TEMP itmp);
16012   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
16013             "umov  $itmp, $vtmp, S, 0\n\t"
16014             "addw  $dst, $itmp, $isrc\t# add reduction4I"
16015   %}
16016   ins_encode %{
16017     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
16018             as_FloatRegister($vsrc$$reg));
16019     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16020     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
16021   %}
16022   ins_pipe(pipe_class_default);
16023 %}
16024 
16025 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
16026 %{
16027   match(Set dst (MulReductionVI isrc vsrc));
16028   ins_cost(INSN_COST);
16029   effect(TEMP tmp, TEMP dst);
16030   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16031             "mul   $dst, $tmp, $isrc\n\t"
16032             "umov  $tmp, $vsrc, S, 1\n\t"
16033             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16034   %}
16035   ins_encode %{
16036     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16037     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
16038     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16039     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16040   %}
16041   ins_pipe(pipe_class_default);
16042 %}
16043 
16044 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16045 %{
16046   match(Set dst (MulReductionVI isrc vsrc));
16047   ins_cost(INSN_COST);
16048   effect(TEMP vtmp, TEMP itmp, TEMP dst);
16049   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
16050             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
16051             "umov  $itmp, $vtmp, S, 0\n\t"
16052             "mul   $dst, $itmp, $isrc\n\t"
16053             "umov  $itmp, $vtmp, S, 1\n\t"
16054             "mul   $dst, $itmp, $dst\t# mul reduction4I"
16055   %}
16056   ins_encode %{
16057     __ ins(as_FloatRegister($vtmp$$reg), __ D,
16058            as_FloatRegister($vsrc$$reg), 0, 1);
16059     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
16060             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
16061     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16062     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
16063     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
16064     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
16065   %}
16066   ins_pipe(pipe_class_default);
16067 %}
16068 
16069 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16070 %{
16071   match(Set dst (AddReductionVF fsrc vsrc));
16072   ins_cost(INSN_COST);
16073   effect(TEMP tmp, TEMP dst);
16074   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16075             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16076             "fadds $dst, $dst, $tmp\t# add reduction2F"
16077   %}
16078   ins_encode %{
16079     __ fadds(as_FloatRegister($dst$$reg),
16080              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16081     __ ins(as_FloatRegister($tmp$$reg), __ S,
16082            as_FloatRegister($vsrc$$reg), 0, 1);
16083     __ fadds(as_FloatRegister($dst$$reg),
16084              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16085   %}
16086   ins_pipe(pipe_class_default);
16087 %}
16088 
16089 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16090 %{
16091   match(Set dst (AddReductionVF fsrc vsrc));
16092   ins_cost(INSN_COST);
16093   effect(TEMP tmp, TEMP dst);
16094   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16095             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16096             "fadds $dst, $dst, $tmp\n\t"
16097             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16098             "fadds $dst, $dst, $tmp\n\t"
16099             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16100             "fadds $dst, $dst, $tmp\t# add reduction4F"
16101   %}
16102   ins_encode %{
16103     __ fadds(as_FloatRegister($dst$$reg),
16104              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16105     __ ins(as_FloatRegister($tmp$$reg), __ S,
16106            as_FloatRegister($vsrc$$reg), 0, 1);
16107     __ fadds(as_FloatRegister($dst$$reg),
16108              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16109     __ ins(as_FloatRegister($tmp$$reg), __ S,
16110            as_FloatRegister($vsrc$$reg), 0, 2);
16111     __ fadds(as_FloatRegister($dst$$reg),
16112              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16113     __ ins(as_FloatRegister($tmp$$reg), __ S,
16114            as_FloatRegister($vsrc$$reg), 0, 3);
16115     __ fadds(as_FloatRegister($dst$$reg),
16116              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16117   %}
16118   ins_pipe(pipe_class_default);
16119 %}
16120 
16121 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16122 %{
16123   match(Set dst (MulReductionVF fsrc vsrc));
16124   ins_cost(INSN_COST);
16125   effect(TEMP tmp, TEMP dst);
16126   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16127             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16128             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16129   %}
16130   ins_encode %{
16131     __ fmuls(as_FloatRegister($dst$$reg),
16132              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16133     __ ins(as_FloatRegister($tmp$$reg), __ S,
16134            as_FloatRegister($vsrc$$reg), 0, 1);
16135     __ fmuls(as_FloatRegister($dst$$reg),
16136              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16137   %}
16138   ins_pipe(pipe_class_default);
16139 %}
16140 
16141 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16142 %{
16143   match(Set dst (MulReductionVF fsrc vsrc));
16144   ins_cost(INSN_COST);
16145   effect(TEMP tmp, TEMP dst);
16146   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16147             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16148             "fmuls $dst, $dst, $tmp\n\t"
16149             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16150             "fmuls $dst, $dst, $tmp\n\t"
16151             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16152             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16153   %}
16154   ins_encode %{
16155     __ fmuls(as_FloatRegister($dst$$reg),
16156              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16157     __ ins(as_FloatRegister($tmp$$reg), __ S,
16158            as_FloatRegister($vsrc$$reg), 0, 1);
16159     __ fmuls(as_FloatRegister($dst$$reg),
16160              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16161     __ ins(as_FloatRegister($tmp$$reg), __ S,
16162            as_FloatRegister($vsrc$$reg), 0, 2);
16163     __ fmuls(as_FloatRegister($dst$$reg),
16164              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16165     __ ins(as_FloatRegister($tmp$$reg), __ S,
16166            as_FloatRegister($vsrc$$reg), 0, 3);
16167     __ fmuls(as_FloatRegister($dst$$reg),
16168              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16169   %}
16170   ins_pipe(pipe_class_default);
16171 %}
16172 
16173 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16174 %{
16175   match(Set dst (AddReductionVD dsrc vsrc));
16176   ins_cost(INSN_COST);
16177   effect(TEMP tmp, TEMP dst);
16178   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
16179             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16180             "faddd $dst, $dst, $tmp\t# add reduction2D"
16181   %}
16182   ins_encode %{
16183     __ faddd(as_FloatRegister($dst$$reg),
16184              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16185     __ ins(as_FloatRegister($tmp$$reg), __ D,
16186            as_FloatRegister($vsrc$$reg), 0, 1);
16187     __ faddd(as_FloatRegister($dst$$reg),
16188              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16189   %}
16190   ins_pipe(pipe_class_default);
16191 %}
16192 
16193 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16194 %{
16195   match(Set dst (MulReductionVD dsrc vsrc));
16196   ins_cost(INSN_COST);
16197   effect(TEMP tmp, TEMP dst);
16198   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
16199             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16200             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16201   %}
16202   ins_encode %{
16203     __ fmuld(as_FloatRegister($dst$$reg),
16204              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16205     __ ins(as_FloatRegister($tmp$$reg), __ D,
16206            as_FloatRegister($vsrc$$reg), 0, 1);
16207     __ fmuld(as_FloatRegister($dst$$reg),
16208              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16209   %}
16210   ins_pipe(pipe_class_default);
16211 %}
16212 
16213 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16214   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16215   match(Set dst (MaxReductionV fsrc vsrc));
16216   ins_cost(INSN_COST);
16217   effect(TEMP_DEF dst, TEMP tmp);
16218   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
16219             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16220             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16221   ins_encode %{
16222     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16223     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16224     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16225   %}
16226   ins_pipe(pipe_class_default);
16227 %}
16228 
16229 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16230   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16231   match(Set dst (MaxReductionV fsrc vsrc));
16232   ins_cost(INSN_COST);
16233   effect(TEMP_DEF dst);
16234   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
16235             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
16236   ins_encode %{
16237     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16238     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16239   %}
16240   ins_pipe(pipe_class_default);
16241 %}
16242 
16243 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16244   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16245   match(Set dst (MaxReductionV dsrc vsrc));
16246   ins_cost(INSN_COST);
16247   effect(TEMP_DEF dst, TEMP tmp);
16248   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
16249             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16250             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
16251   ins_encode %{
16252     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16253     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16254     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16255   %}
16256   ins_pipe(pipe_class_default);
16257 %}
16258 
16259 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16260   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16261   match(Set dst (MinReductionV fsrc vsrc));
16262   ins_cost(INSN_COST);
16263   effect(TEMP_DEF dst, TEMP tmp);
16264   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
16265             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16266             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
16267   ins_encode %{
16268     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16269     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16270     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16271   %}
16272   ins_pipe(pipe_class_default);
16273 %}
16274 
16275 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16276   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16277   match(Set dst (MinReductionV fsrc vsrc));
16278   ins_cost(INSN_COST);
16279   effect(TEMP_DEF dst);
16280   format %{ "fminv $dst, T4S, $vsrc\n\t"
16281             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
16282   ins_encode %{
16283     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16284     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16285   %}
16286   ins_pipe(pipe_class_default);
16287 %}
16288 
16289 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16290   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16291   match(Set dst (MinReductionV dsrc vsrc));
16292   ins_cost(INSN_COST);
16293   effect(TEMP_DEF dst, TEMP tmp);
16294   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
16295             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16296             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
16297   ins_encode %{
16298     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16299     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16300     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16301   %}
16302   ins_pipe(pipe_class_default);
16303 %}
16304 
16305 // ====================VECTOR ARITHMETIC=======================================
16306 
16307 // --------------------------------- ADD --------------------------------------
16308 
16309 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16310 %{
16311   predicate(n->as_Vector()->length() == 4 ||
16312             n->as_Vector()->length() == 8);
16313   match(Set dst (AddVB src1 src2));
16314   ins_cost(INSN_COST);
16315   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16316   ins_encode %{
16317     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16318             as_FloatRegister($src1$$reg),
16319             as_FloatRegister($src2$$reg));
16320   %}
16321   ins_pipe(vdop64);
16322 %}
16323 
16324 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16325 %{
16326   predicate(n->as_Vector()->length() == 16);
16327   match(Set dst (AddVB src1 src2));
16328   ins_cost(INSN_COST);
16329   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16330   ins_encode %{
16331     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16332             as_FloatRegister($src1$$reg),
16333             as_FloatRegister($src2$$reg));
16334   %}
16335   ins_pipe(vdop128);
16336 %}
16337 
16338 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16339 %{
16340   predicate(n->as_Vector()->length() == 2 ||
16341             n->as_Vector()->length() == 4);
16342   match(Set dst (AddVS src1 src2));
16343   ins_cost(INSN_COST);
16344   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16345   ins_encode %{
16346     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16347             as_FloatRegister($src1$$reg),
16348             as_FloatRegister($src2$$reg));
16349   %}
16350   ins_pipe(vdop64);
16351 %}
16352 
16353 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16354 %{
16355   predicate(n->as_Vector()->length() == 8);
16356   match(Set dst (AddVS src1 src2));
16357   ins_cost(INSN_COST);
16358   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16359   ins_encode %{
16360     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16361             as_FloatRegister($src1$$reg),
16362             as_FloatRegister($src2$$reg));
16363   %}
16364   ins_pipe(vdop128);
16365 %}
16366 
16367 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16368 %{
16369   predicate(n->as_Vector()->length() == 2);
16370   match(Set dst (AddVI src1 src2));
16371   ins_cost(INSN_COST);
16372   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16373   ins_encode %{
16374     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16375             as_FloatRegister($src1$$reg),
16376             as_FloatRegister($src2$$reg));
16377   %}
16378   ins_pipe(vdop64);
16379 %}
16380 
16381 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16382 %{
16383   predicate(n->as_Vector()->length() == 4);
16384   match(Set dst (AddVI src1 src2));
16385   ins_cost(INSN_COST);
16386   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16387   ins_encode %{
16388     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16389             as_FloatRegister($src1$$reg),
16390             as_FloatRegister($src2$$reg));
16391   %}
16392   ins_pipe(vdop128);
16393 %}
16394 
16395 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16396 %{
16397   predicate(n->as_Vector()->length() == 2);
16398   match(Set dst (AddVL src1 src2));
16399   ins_cost(INSN_COST);
16400   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16401   ins_encode %{
16402     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16403             as_FloatRegister($src1$$reg),
16404             as_FloatRegister($src2$$reg));
16405   %}
16406   ins_pipe(vdop128);
16407 %}
16408 
16409 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16410 %{
16411   predicate(n->as_Vector()->length() == 2);
16412   match(Set dst (AddVF src1 src2));
16413   ins_cost(INSN_COST);
16414   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16415   ins_encode %{
16416     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16417             as_FloatRegister($src1$$reg),
16418             as_FloatRegister($src2$$reg));
16419   %}
16420   ins_pipe(vdop_fp64);
16421 %}
16422 
16423 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16424 %{
16425   predicate(n->as_Vector()->length() == 4);
16426   match(Set dst (AddVF src1 src2));
16427   ins_cost(INSN_COST);
16428   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16429   ins_encode %{
16430     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16431             as_FloatRegister($src1$$reg),
16432             as_FloatRegister($src2$$reg));
16433   %}
16434   ins_pipe(vdop_fp128);
16435 %}
16436 
16437 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16438 %{
16439   match(Set dst (AddVD src1 src2));
16440   ins_cost(INSN_COST);
16441   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16442   ins_encode %{
16443     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16444             as_FloatRegister($src1$$reg),
16445             as_FloatRegister($src2$$reg));
16446   %}
16447   ins_pipe(vdop_fp128);
16448 %}
16449 
16450 // --------------------------------- SUB --------------------------------------
16451 
16452 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16453 %{
16454   predicate(n->as_Vector()->length() == 4 ||
16455             n->as_Vector()->length() == 8);
16456   match(Set dst (SubVB src1 src2));
16457   ins_cost(INSN_COST);
16458   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16459   ins_encode %{
16460     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16461             as_FloatRegister($src1$$reg),
16462             as_FloatRegister($src2$$reg));
16463   %}
16464   ins_pipe(vdop64);
16465 %}
16466 
16467 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16468 %{
16469   predicate(n->as_Vector()->length() == 16);
16470   match(Set dst (SubVB src1 src2));
16471   ins_cost(INSN_COST);
16472   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16473   ins_encode %{
16474     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16475             as_FloatRegister($src1$$reg),
16476             as_FloatRegister($src2$$reg));
16477   %}
16478   ins_pipe(vdop128);
16479 %}
16480 
16481 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16482 %{
16483   predicate(n->as_Vector()->length() == 2 ||
16484             n->as_Vector()->length() == 4);
16485   match(Set dst (SubVS src1 src2));
16486   ins_cost(INSN_COST);
16487   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16488   ins_encode %{
16489     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16490             as_FloatRegister($src1$$reg),
16491             as_FloatRegister($src2$$reg));
16492   %}
16493   ins_pipe(vdop64);
16494 %}
16495 
16496 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16497 %{
16498   predicate(n->as_Vector()->length() == 8);
16499   match(Set dst (SubVS src1 src2));
16500   ins_cost(INSN_COST);
16501   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16502   ins_encode %{
16503     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16504             as_FloatRegister($src1$$reg),
16505             as_FloatRegister($src2$$reg));
16506   %}
16507   ins_pipe(vdop128);
16508 %}
16509 
16510 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16511 %{
16512   predicate(n->as_Vector()->length() == 2);
16513   match(Set dst (SubVI src1 src2));
16514   ins_cost(INSN_COST);
16515   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16516   ins_encode %{
16517     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16518             as_FloatRegister($src1$$reg),
16519             as_FloatRegister($src2$$reg));
16520   %}
16521   ins_pipe(vdop64);
16522 %}
16523 
16524 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16525 %{
16526   predicate(n->as_Vector()->length() == 4);
16527   match(Set dst (SubVI src1 src2));
16528   ins_cost(INSN_COST);
16529   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16530   ins_encode %{
16531     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16532             as_FloatRegister($src1$$reg),
16533             as_FloatRegister($src2$$reg));
16534   %}
16535   ins_pipe(vdop128);
16536 %}
16537 
16538 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16539 %{
16540   predicate(n->as_Vector()->length() == 2);
16541   match(Set dst (SubVL src1 src2));
16542   ins_cost(INSN_COST);
16543   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16544   ins_encode %{
16545     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16546             as_FloatRegister($src1$$reg),
16547             as_FloatRegister($src2$$reg));
16548   %}
16549   ins_pipe(vdop128);
16550 %}
16551 
16552 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16553 %{
16554   predicate(n->as_Vector()->length() == 2);
16555   match(Set dst (SubVF src1 src2));
16556   ins_cost(INSN_COST);
16557   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16558   ins_encode %{
16559     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16560             as_FloatRegister($src1$$reg),
16561             as_FloatRegister($src2$$reg));
16562   %}
16563   ins_pipe(vdop_fp64);
16564 %}
16565 
16566 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16567 %{
16568   predicate(n->as_Vector()->length() == 4);
16569   match(Set dst (SubVF src1 src2));
16570   ins_cost(INSN_COST);
16571   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16572   ins_encode %{
16573     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16574             as_FloatRegister($src1$$reg),
16575             as_FloatRegister($src2$$reg));
16576   %}
16577   ins_pipe(vdop_fp128);
16578 %}
16579 
16580 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16581 %{
16582   predicate(n->as_Vector()->length() == 2);
16583   match(Set dst (SubVD src1 src2));
16584   ins_cost(INSN_COST);
16585   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16586   ins_encode %{
16587     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16588             as_FloatRegister($src1$$reg),
16589             as_FloatRegister($src2$$reg));
16590   %}
16591   ins_pipe(vdop_fp128);
16592 %}
16593 
16594 // --------------------------------- MUL --------------------------------------
16595 
16596 instruct vmul8B(vecD dst, vecD src1, vecD src2)
16597 %{
16598   predicate(n->as_Vector()->length() == 4 ||
16599             n->as_Vector()->length() == 8);
16600   match(Set dst (MulVB src1 src2));
16601   ins_cost(INSN_COST);
16602   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
16603   ins_encode %{
16604     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
16605             as_FloatRegister($src1$$reg),
16606             as_FloatRegister($src2$$reg));
16607   %}
16608   ins_pipe(vmul64);
16609 %}
16610 
16611 instruct vmul16B(vecX dst, vecX src1, vecX src2)
16612 %{
16613   predicate(n->as_Vector()->length() == 16);
16614   match(Set dst (MulVB src1 src2));
16615   ins_cost(INSN_COST);
16616   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
16617   ins_encode %{
16618     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
16619             as_FloatRegister($src1$$reg),
16620             as_FloatRegister($src2$$reg));
16621   %}
16622   ins_pipe(vmul128);
16623 %}
16624 
16625 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16626 %{
16627   predicate(n->as_Vector()->length() == 2 ||
16628             n->as_Vector()->length() == 4);
16629   match(Set dst (MulVS src1 src2));
16630   ins_cost(INSN_COST);
16631   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16632   ins_encode %{
16633     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16634             as_FloatRegister($src1$$reg),
16635             as_FloatRegister($src2$$reg));
16636   %}
16637   ins_pipe(vmul64);
16638 %}
16639 
16640 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16641 %{
16642   predicate(n->as_Vector()->length() == 8);
16643   match(Set dst (MulVS src1 src2));
16644   ins_cost(INSN_COST);
16645   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16646   ins_encode %{
16647     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16648             as_FloatRegister($src1$$reg),
16649             as_FloatRegister($src2$$reg));
16650   %}
16651   ins_pipe(vmul128);
16652 %}
16653 
16654 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16655 %{
16656   predicate(n->as_Vector()->length() == 2);
16657   match(Set dst (MulVI src1 src2));
16658   ins_cost(INSN_COST);
16659   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16660   ins_encode %{
16661     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16662             as_FloatRegister($src1$$reg),
16663             as_FloatRegister($src2$$reg));
16664   %}
16665   ins_pipe(vmul64);
16666 %}
16667 
16668 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16669 %{
16670   predicate(n->as_Vector()->length() == 4);
16671   match(Set dst (MulVI src1 src2));
16672   ins_cost(INSN_COST);
16673   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16674   ins_encode %{
16675     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16676             as_FloatRegister($src1$$reg),
16677             as_FloatRegister($src2$$reg));
16678   %}
16679   ins_pipe(vmul128);
16680 %}
16681 
16682 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16683 %{
16684   predicate(n->as_Vector()->length() == 2);
16685   match(Set dst (MulVF src1 src2));
16686   ins_cost(INSN_COST);
16687   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16688   ins_encode %{
16689     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16690             as_FloatRegister($src1$$reg),
16691             as_FloatRegister($src2$$reg));
16692   %}
16693   ins_pipe(vmuldiv_fp64);
16694 %}
16695 
16696 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16697 %{
16698   predicate(n->as_Vector()->length() == 4);
16699   match(Set dst (MulVF src1 src2));
16700   ins_cost(INSN_COST);
16701   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16702   ins_encode %{
16703     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16704             as_FloatRegister($src1$$reg),
16705             as_FloatRegister($src2$$reg));
16706   %}
16707   ins_pipe(vmuldiv_fp128);
16708 %}
16709 
16710 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16711 %{
16712   predicate(n->as_Vector()->length() == 2);
16713   match(Set dst (MulVD src1 src2));
16714   ins_cost(INSN_COST);
16715   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16716   ins_encode %{
16717     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16718             as_FloatRegister($src1$$reg),
16719             as_FloatRegister($src2$$reg));
16720   %}
16721   ins_pipe(vmuldiv_fp128);
16722 %}
16723 
16724 // --------------------------------- MLA --------------------------------------
16725 
16726 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16727 %{
16728   predicate(n->as_Vector()->length() == 2 ||
16729             n->as_Vector()->length() == 4);
16730   match(Set dst (AddVS dst (MulVS src1 src2)));
16731   ins_cost(INSN_COST);
16732   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16733   ins_encode %{
16734     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16735             as_FloatRegister($src1$$reg),
16736             as_FloatRegister($src2$$reg));
16737   %}
16738   ins_pipe(vmla64);
16739 %}
16740 
16741 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16742 %{
16743   predicate(n->as_Vector()->length() == 8);
16744   match(Set dst (AddVS dst (MulVS src1 src2)));
16745   ins_cost(INSN_COST);
16746   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16747   ins_encode %{
16748     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16749             as_FloatRegister($src1$$reg),
16750             as_FloatRegister($src2$$reg));
16751   %}
16752   ins_pipe(vmla128);
16753 %}
16754 
16755 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16756 %{
16757   predicate(n->as_Vector()->length() == 2);
16758   match(Set dst (AddVI dst (MulVI src1 src2)));
16759   ins_cost(INSN_COST);
16760   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16761   ins_encode %{
16762     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16763             as_FloatRegister($src1$$reg),
16764             as_FloatRegister($src2$$reg));
16765   %}
16766   ins_pipe(vmla64);
16767 %}
16768 
16769 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16770 %{
16771   predicate(n->as_Vector()->length() == 4);
16772   match(Set dst (AddVI dst (MulVI src1 src2)));
16773   ins_cost(INSN_COST);
16774   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16775   ins_encode %{
16776     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16777             as_FloatRegister($src1$$reg),
16778             as_FloatRegister($src2$$reg));
16779   %}
16780   ins_pipe(vmla128);
16781 %}
16782 
16783 // dst + src1 * src2
16784 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16785   predicate(UseFMA && n->as_Vector()->length() == 2);
16786   match(Set dst (FmaVF  dst (Binary src1 src2)));
16787   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16788   ins_cost(INSN_COST);
16789   ins_encode %{
16790     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16791             as_FloatRegister($src1$$reg),
16792             as_FloatRegister($src2$$reg));
16793   %}
16794   ins_pipe(vmuldiv_fp64);
16795 %}
16796 
16797 // dst + src1 * src2
16798 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16799   predicate(UseFMA && n->as_Vector()->length() == 4);
16800   match(Set dst (FmaVF  dst (Binary src1 src2)));
16801   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16802   ins_cost(INSN_COST);
16803   ins_encode %{
16804     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16805             as_FloatRegister($src1$$reg),
16806             as_FloatRegister($src2$$reg));
16807   %}
16808   ins_pipe(vmuldiv_fp128);
16809 %}
16810 
16811 // dst + src1 * src2
16812 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16813   predicate(UseFMA && n->as_Vector()->length() == 2);
16814   match(Set dst (FmaVD  dst (Binary src1 src2)));
16815   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16816   ins_cost(INSN_COST);
16817   ins_encode %{
16818     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16819             as_FloatRegister($src1$$reg),
16820             as_FloatRegister($src2$$reg));
16821   %}
16822   ins_pipe(vmuldiv_fp128);
16823 %}
16824 
16825 // --------------------------------- MLS --------------------------------------
16826 
16827 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16828 %{
16829   predicate(n->as_Vector()->length() == 2 ||
16830             n->as_Vector()->length() == 4);
16831   match(Set dst (SubVS dst (MulVS src1 src2)));
16832   ins_cost(INSN_COST);
16833   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16834   ins_encode %{
16835     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16836             as_FloatRegister($src1$$reg),
16837             as_FloatRegister($src2$$reg));
16838   %}
16839   ins_pipe(vmla64);
16840 %}
16841 
16842 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16843 %{
16844   predicate(n->as_Vector()->length() == 8);
16845   match(Set dst (SubVS dst (MulVS src1 src2)));
16846   ins_cost(INSN_COST);
16847   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16848   ins_encode %{
16849     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16850             as_FloatRegister($src1$$reg),
16851             as_FloatRegister($src2$$reg));
16852   %}
16853   ins_pipe(vmla128);
16854 %}
16855 
16856 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16857 %{
16858   predicate(n->as_Vector()->length() == 2);
16859   match(Set dst (SubVI dst (MulVI src1 src2)));
16860   ins_cost(INSN_COST);
16861   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16862   ins_encode %{
16863     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16864             as_FloatRegister($src1$$reg),
16865             as_FloatRegister($src2$$reg));
16866   %}
16867   ins_pipe(vmla64);
16868 %}
16869 
16870 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16871 %{
16872   predicate(n->as_Vector()->length() == 4);
16873   match(Set dst (SubVI dst (MulVI src1 src2)));
16874   ins_cost(INSN_COST);
16875   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16876   ins_encode %{
16877     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16878             as_FloatRegister($src1$$reg),
16879             as_FloatRegister($src2$$reg));
16880   %}
16881   ins_pipe(vmla128);
16882 %}
16883 
16884 // dst - src1 * src2
16885 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16886   predicate(UseFMA && n->as_Vector()->length() == 2);
16887   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16888   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16889   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16890   ins_cost(INSN_COST);
16891   ins_encode %{
16892     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16893             as_FloatRegister($src1$$reg),
16894             as_FloatRegister($src2$$reg));
16895   %}
16896   ins_pipe(vmuldiv_fp64);
16897 %}
16898 
16899 // dst - src1 * src2
16900 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16901   predicate(UseFMA && n->as_Vector()->length() == 4);
16902   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16903   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16904   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16905   ins_cost(INSN_COST);
16906   ins_encode %{
16907     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16908             as_FloatRegister($src1$$reg),
16909             as_FloatRegister($src2$$reg));
16910   %}
16911   ins_pipe(vmuldiv_fp128);
16912 %}
16913 
16914 // dst - src1 * src2
16915 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16916   predicate(UseFMA && n->as_Vector()->length() == 2);
16917   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16918   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16919   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16920   ins_cost(INSN_COST);
16921   ins_encode %{
16922     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16923             as_FloatRegister($src1$$reg),
16924             as_FloatRegister($src2$$reg));
16925   %}
16926   ins_pipe(vmuldiv_fp128);
16927 %}
16928 
16929 // --------------- Vector Multiply-Add Shorts into Integer --------------------
16930 
16931 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
16932   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16933   match(Set dst (MulAddVS2VI src1 src2));
16934   ins_cost(INSN_COST);
16935   effect(TEMP_DEF dst, TEMP tmp);
16936   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
16937             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
16938             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
16939   ins_encode %{
16940     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
16941               as_FloatRegister($src1$$reg),
16942               as_FloatRegister($src2$$reg));
16943     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
16944               as_FloatRegister($src1$$reg),
16945               as_FloatRegister($src2$$reg));
16946     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
16947              as_FloatRegister($tmp$$reg),
16948              as_FloatRegister($dst$$reg));
16949   %}
16950   ins_pipe(vmuldiv_fp128);
16951 %}
16952 
16953 // --------------------------------- DIV --------------------------------------
16954 
16955 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16956 %{
16957   predicate(n->as_Vector()->length() == 2);
16958   match(Set dst (DivVF src1 src2));
16959   ins_cost(INSN_COST);
16960   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16961   ins_encode %{
16962     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16963             as_FloatRegister($src1$$reg),
16964             as_FloatRegister($src2$$reg));
16965   %}
16966   ins_pipe(vmuldiv_fp64);
16967 %}
16968 
16969 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16970 %{
16971   predicate(n->as_Vector()->length() == 4);
16972   match(Set dst (DivVF src1 src2));
16973   ins_cost(INSN_COST);
16974   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16975   ins_encode %{
16976     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16977             as_FloatRegister($src1$$reg),
16978             as_FloatRegister($src2$$reg));
16979   %}
16980   ins_pipe(vmuldiv_fp128);
16981 %}
16982 
16983 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16984 %{
16985   predicate(n->as_Vector()->length() == 2);
16986   match(Set dst (DivVD src1 src2));
16987   ins_cost(INSN_COST);
16988   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16989   ins_encode %{
16990     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16991             as_FloatRegister($src1$$reg),
16992             as_FloatRegister($src2$$reg));
16993   %}
16994   ins_pipe(vmuldiv_fp128);
16995 %}
16996 
16997 // --------------------------------- SQRT -------------------------------------
16998 
16999 instruct vsqrt2F(vecD dst, vecD src)
17000 %{
17001   predicate(n->as_Vector()->length() == 2);
17002   match(Set dst (SqrtVF src));
17003   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
17004   ins_encode %{
17005     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17006   %}
17007   ins_pipe(vunop_fp64);
17008 %}
17009 
17010 instruct vsqrt4F(vecX dst, vecX src)
17011 %{
17012   predicate(n->as_Vector()->length() == 4);
17013   match(Set dst (SqrtVF src));
17014   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
17015   ins_encode %{
17016     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17017   %}
17018   ins_pipe(vsqrt_fp128);
17019 %}
17020 
17021 instruct vsqrt2D(vecX dst, vecX src)
17022 %{
17023   predicate(n->as_Vector()->length() == 2);
17024   match(Set dst (SqrtVD src));
17025   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17026   ins_encode %{
17027     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17028              as_FloatRegister($src$$reg));
17029   %}
17030   ins_pipe(vsqrt_fp128);
17031 %}
17032 
17033 // --------------------------------- ABS --------------------------------------
17034 
17035 instruct vabs8B(vecD dst, vecD src)
17036 %{
17037   predicate(n->as_Vector()->length() == 4 ||
17038             n->as_Vector()->length() == 8);
17039   match(Set dst (AbsVB src));
17040   ins_cost(INSN_COST);
17041   format %{ "abs  $dst, $src\t# vector (8B)" %}
17042   ins_encode %{
17043     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
17044   %}
17045   ins_pipe(vlogical64);
17046 %}
17047 
17048 instruct vabs16B(vecX dst, vecX src)
17049 %{
17050   predicate(n->as_Vector()->length() == 16);
17051   match(Set dst (AbsVB src));
17052   ins_cost(INSN_COST);
17053   format %{ "abs  $dst, $src\t# vector (16B)" %}
17054   ins_encode %{
17055     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
17056   %}
17057   ins_pipe(vlogical128);
17058 %}
17059 
17060 instruct vabs4S(vecD dst, vecD src)
17061 %{
17062   predicate(n->as_Vector()->length() == 4);
17063   match(Set dst (AbsVS src));
17064   ins_cost(INSN_COST);
17065   format %{ "abs  $dst, $src\t# vector (4H)" %}
17066   ins_encode %{
17067     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
17068   %}
17069   ins_pipe(vlogical64);
17070 %}
17071 
17072 instruct vabs8S(vecX dst, vecX src)
17073 %{
17074   predicate(n->as_Vector()->length() == 8);
17075   match(Set dst (AbsVS src));
17076   ins_cost(INSN_COST);
17077   format %{ "abs  $dst, $src\t# vector (8H)" %}
17078   ins_encode %{
17079     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
17080   %}
17081   ins_pipe(vlogical128);
17082 %}
17083 
17084 instruct vabs2I(vecD dst, vecD src)
17085 %{
17086   predicate(n->as_Vector()->length() == 2);
17087   match(Set dst (AbsVI src));
17088   ins_cost(INSN_COST);
17089   format %{ "abs  $dst, $src\t# vector (2S)" %}
17090   ins_encode %{
17091     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17092   %}
17093   ins_pipe(vlogical64);
17094 %}
17095 
17096 instruct vabs4I(vecX dst, vecX src)
17097 %{
17098   predicate(n->as_Vector()->length() == 4);
17099   match(Set dst (AbsVI src));
17100   ins_cost(INSN_COST);
17101   format %{ "abs  $dst, $src\t# vector (4S)" %}
17102   ins_encode %{
17103     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17104   %}
17105   ins_pipe(vlogical128);
17106 %}
17107 
17108 instruct vabs2L(vecX dst, vecX src)
17109 %{
17110   predicate(n->as_Vector()->length() == 2);
17111   match(Set dst (AbsVL src));
17112   ins_cost(INSN_COST);
17113   format %{ "abs  $dst, $src\t# vector (2D)" %}
17114   ins_encode %{
17115     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17116   %}
17117   ins_pipe(vlogical128);
17118 %}
17119 
17120 instruct vabs2F(vecD dst, vecD src)
17121 %{
17122   predicate(n->as_Vector()->length() == 2);
17123   match(Set dst (AbsVF src));
17124   ins_cost(INSN_COST * 3);
17125   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17126   ins_encode %{
17127     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17128             as_FloatRegister($src$$reg));
17129   %}
17130   ins_pipe(vunop_fp64);
17131 %}
17132 
17133 instruct vabs4F(vecX dst, vecX src)
17134 %{
17135   predicate(n->as_Vector()->length() == 4);
17136   match(Set dst (AbsVF src));
17137   ins_cost(INSN_COST * 3);
17138   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17139   ins_encode %{
17140     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17141             as_FloatRegister($src$$reg));
17142   %}
17143   ins_pipe(vunop_fp128);
17144 %}
17145 
17146 instruct vabs2D(vecX dst, vecX src)
17147 %{
17148   predicate(n->as_Vector()->length() == 2);
17149   match(Set dst (AbsVD src));
17150   ins_cost(INSN_COST * 3);
17151   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17152   ins_encode %{
17153     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17154             as_FloatRegister($src$$reg));
17155   %}
17156   ins_pipe(vunop_fp128);
17157 %}
17158 
17159 // --------------------------------- NEG --------------------------------------
17160 
17161 instruct vneg2F(vecD dst, vecD src)
17162 %{
17163   predicate(n->as_Vector()->length() == 2);
17164   match(Set dst (NegVF src));
17165   ins_cost(INSN_COST * 3);
17166   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17167   ins_encode %{
17168     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17169             as_FloatRegister($src$$reg));
17170   %}
17171   ins_pipe(vunop_fp64);
17172 %}
17173 
17174 instruct vneg4F(vecX dst, vecX src)
17175 %{
17176   predicate(n->as_Vector()->length() == 4);
17177   match(Set dst (NegVF src));
17178   ins_cost(INSN_COST * 3);
17179   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17180   ins_encode %{
17181     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17182             as_FloatRegister($src$$reg));
17183   %}
17184   ins_pipe(vunop_fp128);
17185 %}
17186 
17187 instruct vneg2D(vecX dst, vecX src)
17188 %{
17189   predicate(n->as_Vector()->length() == 2);
17190   match(Set dst (NegVD src));
17191   ins_cost(INSN_COST * 3);
17192   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17193   ins_encode %{
17194     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17195             as_FloatRegister($src$$reg));
17196   %}
17197   ins_pipe(vunop_fp128);
17198 %}
17199 
17200 // --------------------------------- AND --------------------------------------
17201 
17202 instruct vand8B(vecD dst, vecD src1, vecD src2)
17203 %{
17204   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17205             n->as_Vector()->length_in_bytes() == 8);
17206   match(Set dst (AndV src1 src2));
17207   ins_cost(INSN_COST);
17208   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17209   ins_encode %{
17210     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17211             as_FloatRegister($src1$$reg),
17212             as_FloatRegister($src2$$reg));
17213   %}
17214   ins_pipe(vlogical64);
17215 %}
17216 
17217 instruct vand16B(vecX dst, vecX src1, vecX src2)
17218 %{
17219   predicate(n->as_Vector()->length_in_bytes() == 16);
17220   match(Set dst (AndV src1 src2));
17221   ins_cost(INSN_COST);
17222   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17223   ins_encode %{
17224     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17225             as_FloatRegister($src1$$reg),
17226             as_FloatRegister($src2$$reg));
17227   %}
17228   ins_pipe(vlogical128);
17229 %}
17230 
17231 // --------------------------------- OR ---------------------------------------
17232 
17233 instruct vor8B(vecD dst, vecD src1, vecD src2)
17234 %{
17235   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17236             n->as_Vector()->length_in_bytes() == 8);
17237   match(Set dst (OrV src1 src2));
17238   ins_cost(INSN_COST);
17239   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17240   ins_encode %{
17241     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17242             as_FloatRegister($src1$$reg),
17243             as_FloatRegister($src2$$reg));
17244   %}
17245   ins_pipe(vlogical64);
17246 %}
17247 
17248 instruct vor16B(vecX dst, vecX src1, vecX src2)
17249 %{
17250   predicate(n->as_Vector()->length_in_bytes() == 16);
17251   match(Set dst (OrV src1 src2));
17252   ins_cost(INSN_COST);
17253   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17254   ins_encode %{
17255     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17256             as_FloatRegister($src1$$reg),
17257             as_FloatRegister($src2$$reg));
17258   %}
17259   ins_pipe(vlogical128);
17260 %}
17261 
17262 // --------------------------------- XOR --------------------------------------
17263 
17264 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17265 %{
17266   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17267             n->as_Vector()->length_in_bytes() == 8);
17268   match(Set dst (XorV src1 src2));
17269   ins_cost(INSN_COST);
17270   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17271   ins_encode %{
17272     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17273             as_FloatRegister($src1$$reg),
17274             as_FloatRegister($src2$$reg));
17275   %}
17276   ins_pipe(vlogical64);
17277 %}
17278 
17279 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17280 %{
17281   predicate(n->as_Vector()->length_in_bytes() == 16);
17282   match(Set dst (XorV src1 src2));
17283   ins_cost(INSN_COST);
17284   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17285   ins_encode %{
17286     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17287             as_FloatRegister($src1$$reg),
17288             as_FloatRegister($src2$$reg));
17289   %}
17290   ins_pipe(vlogical128);
17291 %}
17292 
17293 // ------------------------------ Shift ---------------------------------------
17294 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17295   predicate(n->as_Vector()->length_in_bytes() == 8);
17296   match(Set dst (LShiftCntV cnt));
17297   match(Set dst (RShiftCntV cnt));
17298   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17299   ins_encode %{
17300     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17301   %}
17302   ins_pipe(vdup_reg_reg64);
17303 %}
17304 
17305 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17306   predicate(n->as_Vector()->length_in_bytes() == 16);
17307   match(Set dst (LShiftCntV cnt));
17308   match(Set dst (RShiftCntV cnt));
17309   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17310   ins_encode %{
17311     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17312   %}
17313   ins_pipe(vdup_reg_reg128);
17314 %}
17315 
17316 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17317   predicate(n->as_Vector()->length() == 4 ||
17318             n->as_Vector()->length() == 8);
17319   match(Set dst (LShiftVB src shift));
17320   ins_cost(INSN_COST);
17321   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17322   ins_encode %{
17323     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17324             as_FloatRegister($src$$reg),
17325             as_FloatRegister($shift$$reg));
17326   %}
17327   ins_pipe(vshift64);
17328 %}
17329 
17330 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17331   predicate(n->as_Vector()->length() == 16);
17332   match(Set dst (LShiftVB src shift));
17333   ins_cost(INSN_COST);
17334   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17335   ins_encode %{
17336     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17337             as_FloatRegister($src$$reg),
17338             as_FloatRegister($shift$$reg));
17339   %}
17340   ins_pipe(vshift128);
17341 %}
17342 
17343 // Right shifts with vector shift count on aarch64 SIMD are implemented
17344 // as left shift by negative shift count.
17345 // There are two cases for vector shift count.
17346 //
17347 // Case 1: The vector shift count is from replication.
17348 //        |            |
17349 //    LoadVector  RShiftCntV
17350 //        |       /
17351 //     RShiftVI
17352 // Note: In inner loop, multiple neg instructions are used, which can be
17353 // moved to outer loop and merge into one neg instruction.
17354 //
17355 // Case 2: The vector shift count is from loading.
17356 // This case isn't supported by middle-end now. But it's supported by
17357 // panama/vectorIntrinsics(JEP 338: Vector API).
17358 //        |            |
17359 //    LoadVector  LoadVector
17360 //        |       /
17361 //     RShiftVI
17362 //
17363 
17364 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17365   predicate(n->as_Vector()->length() == 4 ||
17366             n->as_Vector()->length() == 8);
17367   match(Set dst (RShiftVB src shift));
17368   ins_cost(INSN_COST);
17369   effect(TEMP tmp);
17370   format %{ "negr  $tmp,$shift\t"
17371             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17372   ins_encode %{
17373     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17374             as_FloatRegister($shift$$reg));
17375     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17376             as_FloatRegister($src$$reg),
17377             as_FloatRegister($tmp$$reg));
17378   %}
17379   ins_pipe(vshift64);
17380 %}
17381 
17382 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17383   predicate(n->as_Vector()->length() == 16);
17384   match(Set dst (RShiftVB src shift));
17385   ins_cost(INSN_COST);
17386   effect(TEMP tmp);
17387   format %{ "negr  $tmp,$shift\t"
17388             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17389   ins_encode %{
17390     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17391             as_FloatRegister($shift$$reg));
17392     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17393             as_FloatRegister($src$$reg),
17394             as_FloatRegister($tmp$$reg));
17395   %}
17396   ins_pipe(vshift128);
17397 %}
17398 
17399 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17400   predicate(n->as_Vector()->length() == 4 ||
17401             n->as_Vector()->length() == 8);
17402   match(Set dst (URShiftVB src shift));
17403   ins_cost(INSN_COST);
17404   effect(TEMP tmp);
17405   format %{ "negr  $tmp,$shift\t"
17406             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17407   ins_encode %{
17408     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17409             as_FloatRegister($shift$$reg));
17410     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17411             as_FloatRegister($src$$reg),
17412             as_FloatRegister($tmp$$reg));
17413   %}
17414   ins_pipe(vshift64);
17415 %}
17416 
17417 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17418   predicate(n->as_Vector()->length() == 16);
17419   match(Set dst (URShiftVB src shift));
17420   ins_cost(INSN_COST);
17421   effect(TEMP tmp);
17422   format %{ "negr  $tmp,$shift\t"
17423             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17424   ins_encode %{
17425     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17426             as_FloatRegister($shift$$reg));
17427     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17428             as_FloatRegister($src$$reg),
17429             as_FloatRegister($tmp$$reg));
17430   %}
17431   ins_pipe(vshift128);
17432 %}
17433 
17434 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17435   predicate(n->as_Vector()->length() == 4 ||
17436             n->as_Vector()->length() == 8);
17437   match(Set dst (LShiftVB src (LShiftCntV shift)));
17438   ins_cost(INSN_COST);
17439   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17440   ins_encode %{
17441     int sh = (int)$shift$$constant;
17442     if (sh >= 8) {
17443       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17444              as_FloatRegister($src$$reg),
17445              as_FloatRegister($src$$reg));
17446     } else {
17447       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17448              as_FloatRegister($src$$reg), sh);
17449     }
17450   %}
17451   ins_pipe(vshift64_imm);
17452 %}
17453 
17454 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17455   predicate(n->as_Vector()->length() == 16);
17456   match(Set dst (LShiftVB src (LShiftCntV shift)));
17457   ins_cost(INSN_COST);
17458   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17459   ins_encode %{
17460     int sh = (int)$shift$$constant;
17461     if (sh >= 8) {
17462       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17463              as_FloatRegister($src$$reg),
17464              as_FloatRegister($src$$reg));
17465     } else {
17466       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17467              as_FloatRegister($src$$reg), sh);
17468     }
17469   %}
17470   ins_pipe(vshift128_imm);
17471 %}
17472 
17473 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17474   predicate(n->as_Vector()->length() == 4 ||
17475             n->as_Vector()->length() == 8);
17476   match(Set dst (RShiftVB src (RShiftCntV shift)));
17477   ins_cost(INSN_COST);
17478   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17479   ins_encode %{
17480     int sh = (int)$shift$$constant;
17481     if (sh >= 8) sh = 7;
17482     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17483            as_FloatRegister($src$$reg), sh);
17484   %}
17485   ins_pipe(vshift64_imm);
17486 %}
17487 
17488 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17489   predicate(n->as_Vector()->length() == 16);
17490   match(Set dst (RShiftVB src (RShiftCntV shift)));
17491   ins_cost(INSN_COST);
17492   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17493   ins_encode %{
17494     int sh = (int)$shift$$constant;
17495     if (sh >= 8) sh = 7;
17496     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17497            as_FloatRegister($src$$reg), sh);
17498   %}
17499   ins_pipe(vshift128_imm);
17500 %}
17501 
17502 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17503   predicate(n->as_Vector()->length() == 4 ||
17504             n->as_Vector()->length() == 8);
17505   match(Set dst (URShiftVB src (RShiftCntV shift)));
17506   ins_cost(INSN_COST);
17507   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17508   ins_encode %{
17509     int sh = (int)$shift$$constant;
17510     if (sh >= 8) {
17511       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17512              as_FloatRegister($src$$reg),
17513              as_FloatRegister($src$$reg));
17514     } else {
17515       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17516              as_FloatRegister($src$$reg), sh);
17517     }
17518   %}
17519   ins_pipe(vshift64_imm);
17520 %}
17521 
17522 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17523   predicate(n->as_Vector()->length() == 16);
17524   match(Set dst (URShiftVB src (RShiftCntV shift)));
17525   ins_cost(INSN_COST);
17526   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17527   ins_encode %{
17528     int sh = (int)$shift$$constant;
17529     if (sh >= 8) {
17530       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17531              as_FloatRegister($src$$reg),
17532              as_FloatRegister($src$$reg));
17533     } else {
17534       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17535              as_FloatRegister($src$$reg), sh);
17536     }
17537   %}
17538   ins_pipe(vshift128_imm);
17539 %}
17540 
17541 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17542   predicate(n->as_Vector()->length() == 2 ||
17543             n->as_Vector()->length() == 4);
17544   match(Set dst (LShiftVS src shift));
17545   ins_cost(INSN_COST);
17546   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17547   ins_encode %{
17548     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17549             as_FloatRegister($src$$reg),
17550             as_FloatRegister($shift$$reg));
17551   %}
17552   ins_pipe(vshift64);
17553 %}
17554 
17555 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17556   predicate(n->as_Vector()->length() == 8);
17557   match(Set dst (LShiftVS src shift));
17558   ins_cost(INSN_COST);
17559   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17560   ins_encode %{
17561     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17562             as_FloatRegister($src$$reg),
17563             as_FloatRegister($shift$$reg));
17564   %}
17565   ins_pipe(vshift128);
17566 %}
17567 
17568 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17569   predicate(n->as_Vector()->length() == 2 ||
17570             n->as_Vector()->length() == 4);
17571   match(Set dst (RShiftVS src shift));
17572   ins_cost(INSN_COST);
17573   effect(TEMP tmp);
17574   format %{ "negr  $tmp,$shift\t"
17575             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17576   ins_encode %{
17577     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17578             as_FloatRegister($shift$$reg));
17579     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17580             as_FloatRegister($src$$reg),
17581             as_FloatRegister($tmp$$reg));
17582   %}
17583   ins_pipe(vshift64);
17584 %}
17585 
17586 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17587   predicate(n->as_Vector()->length() == 8);
17588   match(Set dst (RShiftVS src shift));
17589   ins_cost(INSN_COST);
17590   effect(TEMP tmp);
17591   format %{ "negr  $tmp,$shift\t"
17592             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17593   ins_encode %{
17594     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17595             as_FloatRegister($shift$$reg));
17596     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17597             as_FloatRegister($src$$reg),
17598             as_FloatRegister($tmp$$reg));
17599   %}
17600   ins_pipe(vshift128);
17601 %}
17602 
17603 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17604   predicate(n->as_Vector()->length() == 2 ||
17605             n->as_Vector()->length() == 4);
17606   match(Set dst (URShiftVS src shift));
17607   ins_cost(INSN_COST);
17608   effect(TEMP tmp);
17609   format %{ "negr  $tmp,$shift\t"
17610             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17611   ins_encode %{
17612     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17613             as_FloatRegister($shift$$reg));
17614     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17615             as_FloatRegister($src$$reg),
17616             as_FloatRegister($tmp$$reg));
17617   %}
17618   ins_pipe(vshift64);
17619 %}
17620 
17621 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17622   predicate(n->as_Vector()->length() == 8);
17623   match(Set dst (URShiftVS src shift));
17624   ins_cost(INSN_COST);
17625   effect(TEMP tmp);
17626   format %{ "negr  $tmp,$shift\t"
17627             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17628   ins_encode %{
17629     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17630             as_FloatRegister($shift$$reg));
17631     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17632             as_FloatRegister($src$$reg),
17633             as_FloatRegister($tmp$$reg));
17634   %}
17635   ins_pipe(vshift128);
17636 %}
17637 
17638 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17639   predicate(n->as_Vector()->length() == 2 ||
17640             n->as_Vector()->length() == 4);
17641   match(Set dst (LShiftVS src (LShiftCntV shift)));
17642   ins_cost(INSN_COST);
17643   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17644   ins_encode %{
17645     int sh = (int)$shift$$constant;
17646     if (sh >= 16) {
17647       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17648              as_FloatRegister($src$$reg),
17649              as_FloatRegister($src$$reg));
17650     } else {
17651       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17652              as_FloatRegister($src$$reg), sh);
17653     }
17654   %}
17655   ins_pipe(vshift64_imm);
17656 %}
17657 
17658 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17659   predicate(n->as_Vector()->length() == 8);
17660   match(Set dst (LShiftVS src (LShiftCntV shift)));
17661   ins_cost(INSN_COST);
17662   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17663   ins_encode %{
17664     int sh = (int)$shift$$constant;
17665     if (sh >= 16) {
17666       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17667              as_FloatRegister($src$$reg),
17668              as_FloatRegister($src$$reg));
17669     } else {
17670       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17671              as_FloatRegister($src$$reg), sh);
17672     }
17673   %}
17674   ins_pipe(vshift128_imm);
17675 %}
17676 
17677 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17678   predicate(n->as_Vector()->length() == 2 ||
17679             n->as_Vector()->length() == 4);
17680   match(Set dst (RShiftVS src (RShiftCntV shift)));
17681   ins_cost(INSN_COST);
17682   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17683   ins_encode %{
17684     int sh = (int)$shift$$constant;
17685     if (sh >= 16) sh = 15;
17686     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17687            as_FloatRegister($src$$reg), sh);
17688   %}
17689   ins_pipe(vshift64_imm);
17690 %}
17691 
17692 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17693   predicate(n->as_Vector()->length() == 8);
17694   match(Set dst (RShiftVS src (RShiftCntV shift)));
17695   ins_cost(INSN_COST);
17696   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17697   ins_encode %{
17698     int sh = (int)$shift$$constant;
17699     if (sh >= 16) sh = 15;
17700     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17701            as_FloatRegister($src$$reg), sh);
17702   %}
17703   ins_pipe(vshift128_imm);
17704 %}
17705 
17706 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17707   predicate(n->as_Vector()->length() == 2 ||
17708             n->as_Vector()->length() == 4);
17709   match(Set dst (URShiftVS src (RShiftCntV shift)));
17710   ins_cost(INSN_COST);
17711   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17712   ins_encode %{
17713     int sh = (int)$shift$$constant;
17714     if (sh >= 16) {
17715       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17716              as_FloatRegister($src$$reg),
17717              as_FloatRegister($src$$reg));
17718     } else {
17719       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17720              as_FloatRegister($src$$reg), sh);
17721     }
17722   %}
17723   ins_pipe(vshift64_imm);
17724 %}
17725 
17726 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17727   predicate(n->as_Vector()->length() == 8);
17728   match(Set dst (URShiftVS src (RShiftCntV shift)));
17729   ins_cost(INSN_COST);
17730   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17731   ins_encode %{
17732     int sh = (int)$shift$$constant;
17733     if (sh >= 16) {
17734       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17735              as_FloatRegister($src$$reg),
17736              as_FloatRegister($src$$reg));
17737     } else {
17738       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17739              as_FloatRegister($src$$reg), sh);
17740     }
17741   %}
17742   ins_pipe(vshift128_imm);
17743 %}
17744 
17745 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17746   predicate(n->as_Vector()->length() == 2);
17747   match(Set dst (LShiftVI src shift));
17748   ins_cost(INSN_COST);
17749   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17750   ins_encode %{
17751     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17752             as_FloatRegister($src$$reg),
17753             as_FloatRegister($shift$$reg));
17754   %}
17755   ins_pipe(vshift64);
17756 %}
17757 
17758 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17759   predicate(n->as_Vector()->length() == 4);
17760   match(Set dst (LShiftVI src shift));
17761   ins_cost(INSN_COST);
17762   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17763   ins_encode %{
17764     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17765             as_FloatRegister($src$$reg),
17766             as_FloatRegister($shift$$reg));
17767   %}
17768   ins_pipe(vshift128);
17769 %}
17770 
17771 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17772   predicate(n->as_Vector()->length() == 2);
17773   match(Set dst (RShiftVI src shift));
17774   ins_cost(INSN_COST);
17775   effect(TEMP tmp);
17776   format %{ "negr  $tmp,$shift\t"
17777             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17778   ins_encode %{
17779     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17780             as_FloatRegister($shift$$reg));
17781     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17782             as_FloatRegister($src$$reg),
17783             as_FloatRegister($tmp$$reg));
17784   %}
17785   ins_pipe(vshift64);
17786 %}
17787 
17788 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17789   predicate(n->as_Vector()->length() == 4);
17790   match(Set dst (RShiftVI src shift));
17791   ins_cost(INSN_COST);
17792   effect(TEMP tmp);
17793   format %{ "negr  $tmp,$shift\t"
17794             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17795   ins_encode %{
17796     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17797             as_FloatRegister($shift$$reg));
17798     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17799             as_FloatRegister($src$$reg),
17800             as_FloatRegister($tmp$$reg));
17801   %}
17802   ins_pipe(vshift128);
17803 %}
17804 
17805 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17806   predicate(n->as_Vector()->length() == 2);
17807   match(Set dst (URShiftVI src shift));
17808   ins_cost(INSN_COST);
17809   effect(TEMP tmp);
17810   format %{ "negr  $tmp,$shift\t"
17811             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17812   ins_encode %{
17813     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17814             as_FloatRegister($shift$$reg));
17815     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17816             as_FloatRegister($src$$reg),
17817             as_FloatRegister($tmp$$reg));
17818   %}
17819   ins_pipe(vshift64);
17820 %}
17821 
17822 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17823   predicate(n->as_Vector()->length() == 4);
17824   match(Set dst (URShiftVI src shift));
17825   ins_cost(INSN_COST);
17826   effect(TEMP tmp);
17827   format %{ "negr  $tmp,$shift\t"
17828             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17829   ins_encode %{
17830     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17831             as_FloatRegister($shift$$reg));
17832     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17833             as_FloatRegister($src$$reg),
17834             as_FloatRegister($tmp$$reg));
17835   %}
17836   ins_pipe(vshift128);
17837 %}
17838 
17839 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17840   predicate(n->as_Vector()->length() == 2);
17841   match(Set dst (LShiftVI src (LShiftCntV shift)));
17842   ins_cost(INSN_COST);
17843   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17844   ins_encode %{
17845     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17846            as_FloatRegister($src$$reg),
17847            (int)$shift$$constant);
17848   %}
17849   ins_pipe(vshift64_imm);
17850 %}
17851 
17852 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17853   predicate(n->as_Vector()->length() == 4);
17854   match(Set dst (LShiftVI src (LShiftCntV shift)));
17855   ins_cost(INSN_COST);
17856   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17857   ins_encode %{
17858     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17859            as_FloatRegister($src$$reg),
17860            (int)$shift$$constant);
17861   %}
17862   ins_pipe(vshift128_imm);
17863 %}
17864 
17865 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17866   predicate(n->as_Vector()->length() == 2);
17867   match(Set dst (RShiftVI src (RShiftCntV shift)));
17868   ins_cost(INSN_COST);
17869   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17870   ins_encode %{
17871     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17872             as_FloatRegister($src$$reg),
17873             (int)$shift$$constant);
17874   %}
17875   ins_pipe(vshift64_imm);
17876 %}
17877 
17878 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17879   predicate(n->as_Vector()->length() == 4);
17880   match(Set dst (RShiftVI src (RShiftCntV shift)));
17881   ins_cost(INSN_COST);
17882   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17883   ins_encode %{
17884     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17885             as_FloatRegister($src$$reg),
17886             (int)$shift$$constant);
17887   %}
17888   ins_pipe(vshift128_imm);
17889 %}
17890 
17891 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17892   predicate(n->as_Vector()->length() == 2);
17893   match(Set dst (URShiftVI src (RShiftCntV shift)));
17894   ins_cost(INSN_COST);
17895   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17896   ins_encode %{
17897     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17898             as_FloatRegister($src$$reg),
17899             (int)$shift$$constant);
17900   %}
17901   ins_pipe(vshift64_imm);
17902 %}
17903 
17904 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17905   predicate(n->as_Vector()->length() == 4);
17906   match(Set dst (URShiftVI src (RShiftCntV shift)));
17907   ins_cost(INSN_COST);
17908   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17909   ins_encode %{
17910     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17911             as_FloatRegister($src$$reg),
17912             (int)$shift$$constant);
17913   %}
17914   ins_pipe(vshift128_imm);
17915 %}
17916 
17917 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17918   predicate(n->as_Vector()->length() == 2);
17919   match(Set dst (LShiftVL src shift));
17920   ins_cost(INSN_COST);
17921   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17922   ins_encode %{
17923     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17924             as_FloatRegister($src$$reg),
17925             as_FloatRegister($shift$$reg));
17926   %}
17927   ins_pipe(vshift128);
17928 %}
17929 
17930 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17931   predicate(n->as_Vector()->length() == 2);
17932   match(Set dst (RShiftVL src shift));
17933   ins_cost(INSN_COST);
17934   effect(TEMP tmp);
17935   format %{ "negr  $tmp,$shift\t"
17936             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17937   ins_encode %{
17938     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17939             as_FloatRegister($shift$$reg));
17940     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17941             as_FloatRegister($src$$reg),
17942             as_FloatRegister($tmp$$reg));
17943   %}
17944   ins_pipe(vshift128);
17945 %}
17946 
17947 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17948   predicate(n->as_Vector()->length() == 2);
17949   match(Set dst (URShiftVL src shift));
17950   ins_cost(INSN_COST);
17951   effect(TEMP tmp);
17952   format %{ "negr  $tmp,$shift\t"
17953             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17954   ins_encode %{
17955     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17956             as_FloatRegister($shift$$reg));
17957     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17958             as_FloatRegister($src$$reg),
17959             as_FloatRegister($tmp$$reg));
17960   %}
17961   ins_pipe(vshift128);
17962 %}
17963 
17964 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17965   predicate(n->as_Vector()->length() == 2);
17966   match(Set dst (LShiftVL src (LShiftCntV shift)));
17967   ins_cost(INSN_COST);
17968   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17969   ins_encode %{
17970     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17971            as_FloatRegister($src$$reg),
17972            (int)$shift$$constant);
17973   %}
17974   ins_pipe(vshift128_imm);
17975 %}
17976 
17977 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17978   predicate(n->as_Vector()->length() == 2);
17979   match(Set dst (RShiftVL src (RShiftCntV shift)));
17980   ins_cost(INSN_COST);
17981   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17982   ins_encode %{
17983     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17984             as_FloatRegister($src$$reg),
17985             (int)$shift$$constant);
17986   %}
17987   ins_pipe(vshift128_imm);
17988 %}
17989 
17990 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17991   predicate(n->as_Vector()->length() == 2);
17992   match(Set dst (URShiftVL src (RShiftCntV shift)));
17993   ins_cost(INSN_COST);
17994   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17995   ins_encode %{
17996     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17997             as_FloatRegister($src$$reg),
17998             (int)$shift$$constant);
17999   %}
18000   ins_pipe(vshift128_imm);
18001 %}
18002 
18003 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18004 %{
18005   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18006   match(Set dst (MaxV src1 src2));
18007   ins_cost(INSN_COST);
18008   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18009   ins_encode %{
18010     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18011             as_FloatRegister($src1$$reg),
18012             as_FloatRegister($src2$$reg));
18013   %}
18014   ins_pipe(vdop_fp64);
18015 %}
18016 
18017 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18018 %{
18019   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18020   match(Set dst (MaxV src1 src2));
18021   ins_cost(INSN_COST);
18022   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18023   ins_encode %{
18024     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18025             as_FloatRegister($src1$$reg),
18026             as_FloatRegister($src2$$reg));
18027   %}
18028   ins_pipe(vdop_fp128);
18029 %}
18030 
18031 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18032 %{
18033   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18034   match(Set dst (MaxV src1 src2));
18035   ins_cost(INSN_COST);
18036   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18037   ins_encode %{
18038     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18039             as_FloatRegister($src1$$reg),
18040             as_FloatRegister($src2$$reg));
18041   %}
18042   ins_pipe(vdop_fp128);
18043 %}
18044 
18045 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18046 %{
18047   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18048   match(Set dst (MinV src1 src2));
18049   ins_cost(INSN_COST);
18050   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18051   ins_encode %{
18052     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18053             as_FloatRegister($src1$$reg),
18054             as_FloatRegister($src2$$reg));
18055   %}
18056   ins_pipe(vdop_fp64);
18057 %}
18058 
18059 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18060 %{
18061   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18062   match(Set dst (MinV src1 src2));
18063   ins_cost(INSN_COST);
18064   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18065   ins_encode %{
18066     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18067             as_FloatRegister($src1$$reg),
18068             as_FloatRegister($src2$$reg));
18069   %}
18070   ins_pipe(vdop_fp128);
18071 %}
18072 
18073 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18074 %{
18075   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18076   match(Set dst (MinV src1 src2));
18077   ins_cost(INSN_COST);
18078   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18079   ins_encode %{
18080     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18081             as_FloatRegister($src1$$reg),
18082             as_FloatRegister($src2$$reg));
18083   %}
18084   ins_pipe(vdop_fp128);
18085 %}
18086 
18087 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18088   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18089   match(Set dst (RoundDoubleModeV src rmode));
18090   format %{ "frint  $dst, $src, $rmode" %}
18091   ins_encode %{
18092     switch ($rmode$$constant) {
18093       case RoundDoubleModeNode::rmode_rint:
18094         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18095                   as_FloatRegister($src$$reg));
18096         break;
18097       case RoundDoubleModeNode::rmode_floor:
18098         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18099                   as_FloatRegister($src$$reg));
18100         break;
18101       case RoundDoubleModeNode::rmode_ceil:
18102         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18103                   as_FloatRegister($src$$reg));
18104         break;
18105     }
18106   %}
18107   ins_pipe(vdop_fp128);
18108 %}
18109 
18110 instruct vpopcount4I(vecX dst, vecX src) %{
18111   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18112   match(Set dst (PopCountVI src));
18113   format %{
18114     "cnt     $dst, $src\t# vector (16B)\n\t"
18115     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18116     "uaddlp  $dst, $dst\t# vector (8H)"
18117   %}
18118   ins_encode %{
18119      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18120             as_FloatRegister($src$$reg));
18121      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18122                as_FloatRegister($dst$$reg));
18123      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18124                as_FloatRegister($dst$$reg));
18125   %}
18126   ins_pipe(pipe_class_default);
18127 %}
18128 
18129 instruct vpopcount2I(vecD dst, vecD src) %{
18130   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18131   match(Set dst (PopCountVI src));
18132   format %{
18133     "cnt     $dst, $src\t# vector (8B)\n\t"
18134     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18135     "uaddlp  $dst, $dst\t# vector (4H)"
18136   %}
18137   ins_encode %{
18138      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18139             as_FloatRegister($src$$reg));
18140      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18141                as_FloatRegister($dst$$reg));
18142      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18143                as_FloatRegister($dst$$reg));
18144   %}
18145   ins_pipe(pipe_class_default);
18146 %}
18147 
18148 //----------PEEPHOLE RULES-----------------------------------------------------
18149 // These must follow all instruction definitions as they use the names
18150 // defined in the instructions definitions.
18151 //
18152 // peepmatch ( root_instr_name [preceding_instruction]* );
18153 //
18154 // peepconstraint %{
18155 // (instruction_number.operand_name relational_op instruction_number.operand_name
18156 //  [, ...] );
18157 // // instruction numbers are zero-based using left to right order in peepmatch
18158 //
18159 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18160 // // provide an instruction_number.operand_name for each operand that appears
18161 // // in the replacement instruction's match rule
18162 //
18163 // ---------VM FLAGS---------------------------------------------------------
18164 //
18165 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18166 //
18167 // Each peephole rule is given an identifying number starting with zero and
18168 // increasing by one in the order seen by the parser.  An individual peephole
18169 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18170 // on the command-line.
18171 //
18172 // ---------CURRENT LIMITATIONS----------------------------------------------
18173 //
18174 // Only match adjacent instructions in same basic block
18175 // Only equality constraints
18176 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18177 // Only one replacement instruction
18178 //
18179 // ---------EXAMPLE----------------------------------------------------------
18180 //
18181 // // pertinent parts of existing instructions in architecture description
18182 // instruct movI(iRegINoSp dst, iRegI src)
18183 // %{
18184 //   match(Set dst (CopyI src));
18185 // %}
18186 //
18187 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18188 // %{
18189 //   match(Set dst (AddI dst src));
18190 //   effect(KILL cr);
18191 // %}
18192 //
18193 // // Change (inc mov) to lea
18194 // peephole %{
18195 //   // increment preceeded by register-register move
18196 //   peepmatch ( incI_iReg movI );
18197 //   // require that the destination register of the increment
18198 //   // match the destination register of the move
18199 //   peepconstraint ( 0.dst == 1.dst );
18200 //   // construct a replacement instruction that sets
18201 //   // the destination to ( move's source register + one )
18202 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18203 // %}
18204 //
18205 
18206 // Implementation no longer uses movX instructions since
18207 // machine-independent system no longer uses CopyX nodes.
18208 //
18209 // peephole
18210 // %{
18211 //   peepmatch (incI_iReg movI);
18212 //   peepconstraint (0.dst == 1.dst);
18213 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18214 // %}
18215 
18216 // peephole
18217 // %{
18218 //   peepmatch (decI_iReg movI);
18219 //   peepconstraint (0.dst == 1.dst);
18220 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18221 // %}
18222 
18223 // peephole
18224 // %{
18225 //   peepmatch (addI_iReg_imm movI);
18226 //   peepconstraint (0.dst == 1.dst);
18227 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18228 // %}
18229 
18230 // peephole
18231 // %{
18232 //   peepmatch (incL_iReg movL);
18233 //   peepconstraint (0.dst == 1.dst);
18234 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18235 // %}
18236 
18237 // peephole
18238 // %{
18239 //   peepmatch (decL_iReg movL);
18240 //   peepconstraint (0.dst == 1.dst);
18241 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18242 // %}
18243 
18244 // peephole
18245 // %{
18246 //   peepmatch (addL_iReg_imm movL);
18247 //   peepconstraint (0.dst == 1.dst);
18248 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18249 // %}
18250 
18251 // peephole
18252 // %{
18253 //   peepmatch (addP_iReg_imm movP);
18254 //   peepconstraint (0.dst == 1.dst);
18255 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18256 // %}
18257 
18258 // // Change load of spilled value to only a spill
18259 // instruct storeI(memory mem, iRegI src)
18260 // %{
18261 //   match(Set mem (StoreI mem src));
18262 // %}
18263 //
18264 // instruct loadI(iRegINoSp dst, memory mem)
18265 // %{
18266 //   match(Set dst (LoadI mem));
18267 // %}
18268 //
18269 
18270 //----------SMARTSPILL RULES---------------------------------------------------
18271 // These must follow all instruction definitions as they use the names
18272 // defined in the instructions definitions.
18273 
18274 // Local Variables:
18275 // mode: c++
18276 // End: