1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Class for 128 bit register v4
 961 reg_class v4_reg(
 962     V4, V4_H
 963 );
 964 
 965 // Class for 128 bit register v5
 966 reg_class v5_reg(
 967     V5, V5_H
 968 );
 969 
 970 // Class for 128 bit register v6
 971 reg_class v6_reg(
 972     V6, V6_H
 973 );
 974 
 975 // Class for 128 bit register v7
 976 reg_class v7_reg(
 977     V7, V7_H
 978 );
 979 
 980 // Class for 128 bit register v8
 981 reg_class v8_reg(
 982     V8, V8_H
 983 );
 984 
 985 // Class for 128 bit register v9
 986 reg_class v9_reg(
 987     V9, V9_H
 988 );
 989 
 990 // Class for 128 bit register v10
 991 reg_class v10_reg(
 992     V10, V10_H
 993 );
 994 
 995 // Class for 128 bit register v11
 996 reg_class v11_reg(
 997     V11, V11_H
 998 );
 999 
1000 // Class for 128 bit register v12
1001 reg_class v12_reg(
1002     V12, V12_H
1003 );
1004 
1005 // Class for 128 bit register v13
1006 reg_class v13_reg(
1007     V13, V13_H
1008 );
1009 
1010 // Class for 128 bit register v14
1011 reg_class v14_reg(
1012     V14, V14_H
1013 );
1014 
1015 // Class for 128 bit register v15
1016 reg_class v15_reg(
1017     V15, V15_H
1018 );
1019 
1020 // Class for 128 bit register v16
1021 reg_class v16_reg(
1022     V16, V16_H
1023 );
1024 
1025 // Class for 128 bit register v17
1026 reg_class v17_reg(
1027     V17, V17_H
1028 );
1029 
1030 // Class for 128 bit register v18
1031 reg_class v18_reg(
1032     V18, V18_H
1033 );
1034 
1035 // Class for 128 bit register v19
1036 reg_class v19_reg(
1037     V19, V19_H
1038 );
1039 
1040 // Class for 128 bit register v20
1041 reg_class v20_reg(
1042     V20, V20_H
1043 );
1044 
1045 // Class for 128 bit register v21
1046 reg_class v21_reg(
1047     V21, V21_H
1048 );
1049 
1050 // Class for 128 bit register v22
1051 reg_class v22_reg(
1052     V22, V22_H
1053 );
1054 
1055 // Class for 128 bit register v23
1056 reg_class v23_reg(
1057     V23, V23_H
1058 );
1059 
1060 // Class for 128 bit register v24
1061 reg_class v24_reg(
1062     V24, V24_H
1063 );
1064 
1065 // Class for 128 bit register v25
1066 reg_class v25_reg(
1067     V25, V25_H
1068 );
1069 
1070 // Class for 128 bit register v26
1071 reg_class v26_reg(
1072     V26, V26_H
1073 );
1074 
1075 // Class for 128 bit register v27
1076 reg_class v27_reg(
1077     V27, V27_H
1078 );
1079 
1080 // Class for 128 bit register v28
1081 reg_class v28_reg(
1082     V28, V28_H
1083 );
1084 
1085 // Class for 128 bit register v29
1086 reg_class v29_reg(
1087     V29, V29_H
1088 );
1089 
1090 // Class for 128 bit register v30
1091 reg_class v30_reg(
1092     V30, V30_H
1093 );
1094 
1095 // Class for 128 bit register v31
1096 reg_class v31_reg(
1097     V31, V31_H
1098 );
1099 
1100 // Singleton class for condition codes
1101 reg_class int_flags(RFLAGS);
1102 
1103 %}
1104 
1105 //----------DEFINITION BLOCK---------------------------------------------------
1106 // Define name --> value mappings to inform the ADLC of an integer valued name
1107 // Current support includes integer values in the range [0, 0x7FFFFFFF]
1108 // Format:
1109 //        int_def  <name>         ( <int_value>, <expression>);
1110 // Generated Code in ad_<arch>.hpp
1111 //        #define  <name>   (<expression>)
1112 //        // value == <int_value>
1113 // Generated code in ad_<arch>.cpp adlc_verification()
1114 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
1115 //
1116 
1117 // we follow the ppc-aix port in using a simple cost model which ranks
1118 // register operations as cheap, memory ops as more expensive and
1119 // branches as most expensive. the first two have a low as well as a
1120 // normal cost. huge cost appears to be a way of saying don't do
1121 // something
1122 
1123 definitions %{
1124   // The default cost (of a register move instruction).
1125   int_def INSN_COST            (    100,     100);
1126   int_def BRANCH_COST          (    200,     2 * INSN_COST);
1127   int_def CALL_COST            (    200,     2 * INSN_COST);
1128   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
1129 %}
1130 
1131 
1132 //----------SOURCE BLOCK-------------------------------------------------------
1133 // This is a block of C++ code which provides values, functions, and
1134 // definitions necessary in the rest of the architecture description
1135 
1136 source_hpp %{
1137 
1138 #include "asm/macroAssembler.hpp"
1139 #include "gc/shared/cardTable.hpp"
1140 #include "gc/shared/cardTableBarrierSet.hpp"
1141 #include "gc/shared/collectedHeap.hpp"
1142 #include "opto/addnode.hpp"
1143 
1144 class CallStubImpl {
1145 
1146   //--------------------------------------------------------------
1147   //---<  Used for optimization in Compile::shorten_branches  >---
1148   //--------------------------------------------------------------
1149 
1150  public:
1151   // Size of call trampoline stub.
1152   static uint size_call_trampoline() {
1153     return 0; // no call trampolines on this platform
1154   }
1155 
1156   // number of relocations needed by a call trampoline stub
1157   static uint reloc_call_trampoline() {
1158     return 0; // no call trampolines on this platform
1159   }
1160 };
1161 
1162 class HandlerImpl {
1163 
1164  public:
1165 
1166   static int emit_exception_handler(CodeBuffer &cbuf);
1167   static int emit_deopt_handler(CodeBuffer& cbuf);
1168 
1169   static uint size_exception_handler() {
1170     return MacroAssembler::far_branch_size();
1171   }
1172 
1173   static uint size_deopt_handler() {
1174     // count one adr and one far branch instruction
1175     return 4 * NativeInstruction::instruction_size;
1176   }
1177 };
1178 
1179  bool is_CAS(int opcode, bool maybe_volatile);
1180 
1181   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1182 
1183   bool unnecessary_acquire(const Node *barrier);
1184   bool needs_acquiring_load(const Node *load);
1185 
1186   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1187 
1188   bool unnecessary_release(const Node *barrier);
1189   bool unnecessary_volatile(const Node *barrier);
1190   bool needs_releasing_store(const Node *store);
1191 
1192   // predicate controlling translation of CompareAndSwapX
1193   bool needs_acquiring_load_exclusive(const Node *load);
1194 
1195   // predicate controlling translation of StoreCM
1196   bool unnecessary_storestore(const Node *storecm);
1197 
1198   // predicate controlling addressing modes
1199   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1200 %}
1201 
1202 source %{
1203 
1204   // Optimizaton of volatile gets and puts
1205   // -------------------------------------
1206   //
1207   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1208   // use to implement volatile reads and writes. For a volatile read
1209   // we simply need
1210   //
1211   //   ldar<x>
1212   //
1213   // and for a volatile write we need
1214   //
1215   //   stlr<x>
1216   //
1217   // Alternatively, we can implement them by pairing a normal
1218   // load/store with a memory barrier. For a volatile read we need
1219   //
1220   //   ldr<x>
1221   //   dmb ishld
1222   //
1223   // for a volatile write
1224   //
1225   //   dmb ish
1226   //   str<x>
1227   //   dmb ish
1228   //
1229   // We can also use ldaxr and stlxr to implement compare and swap CAS
1230   // sequences. These are normally translated to an instruction
1231   // sequence like the following
1232   //
1233   //   dmb      ish
1234   // retry:
1235   //   ldxr<x>   rval raddr
1236   //   cmp       rval rold
1237   //   b.ne done
1238   //   stlxr<x>  rval, rnew, rold
1239   //   cbnz      rval retry
1240   // done:
1241   //   cset      r0, eq
1242   //   dmb ishld
1243   //
1244   // Note that the exclusive store is already using an stlxr
1245   // instruction. That is required to ensure visibility to other
1246   // threads of the exclusive write (assuming it succeeds) before that
1247   // of any subsequent writes.
1248   //
1249   // The following instruction sequence is an improvement on the above
1250   //
1251   // retry:
1252   //   ldaxr<x>  rval raddr
1253   //   cmp       rval rold
1254   //   b.ne done
1255   //   stlxr<x>  rval, rnew, rold
1256   //   cbnz      rval retry
1257   // done:
1258   //   cset      r0, eq
1259   //
1260   // We don't need the leading dmb ish since the stlxr guarantees
1261   // visibility of prior writes in the case that the swap is
1262   // successful. Crucially we don't have to worry about the case where
1263   // the swap is not successful since no valid program should be
1264   // relying on visibility of prior changes by the attempting thread
1265   // in the case where the CAS fails.
1266   //
1267   // Similarly, we don't need the trailing dmb ishld if we substitute
1268   // an ldaxr instruction since that will provide all the guarantees we
1269   // require regarding observation of changes made by other threads
1270   // before any change to the CAS address observed by the load.
1271   //
1272   // In order to generate the desired instruction sequence we need to
1273   // be able to identify specific 'signature' ideal graph node
1274   // sequences which i) occur as a translation of a volatile reads or
1275   // writes or CAS operations and ii) do not occur through any other
1276   // translation or graph transformation. We can then provide
1277   // alternative aldc matching rules which translate these node
1278   // sequences to the desired machine code sequences. Selection of the
1279   // alternative rules can be implemented by predicates which identify
1280   // the relevant node sequences.
1281   //
1282   // The ideal graph generator translates a volatile read to the node
1283   // sequence
1284   //
1285   //   LoadX[mo_acquire]
1286   //   MemBarAcquire
1287   //
1288   // As a special case when using the compressed oops optimization we
1289   // may also see this variant
1290   //
1291   //   LoadN[mo_acquire]
1292   //   DecodeN
1293   //   MemBarAcquire
1294   //
1295   // A volatile write is translated to the node sequence
1296   //
1297   //   MemBarRelease
1298   //   StoreX[mo_release] {CardMark}-optional
1299   //   MemBarVolatile
1300   //
1301   // n.b. the above node patterns are generated with a strict
1302   // 'signature' configuration of input and output dependencies (see
1303   // the predicates below for exact details). The card mark may be as
1304   // simple as a few extra nodes or, in a few GC configurations, may
1305   // include more complex control flow between the leading and
1306   // trailing memory barriers. However, whatever the card mark
1307   // configuration these signatures are unique to translated volatile
1308   // reads/stores -- they will not appear as a result of any other
1309   // bytecode translation or inlining nor as a consequence of
1310   // optimizing transforms.
1311   //
1312   // We also want to catch inlined unsafe volatile gets and puts and
1313   // be able to implement them using either ldar<x>/stlr<x> or some
1314   // combination of ldr<x>/stlr<x> and dmb instructions.
1315   //
1316   // Inlined unsafe volatiles puts manifest as a minor variant of the
1317   // normal volatile put node sequence containing an extra cpuorder
1318   // membar
1319   //
1320   //   MemBarRelease
1321   //   MemBarCPUOrder
1322   //   StoreX[mo_release] {CardMark}-optional
1323   //   MemBarCPUOrder
1324   //   MemBarVolatile
1325   //
1326   // n.b. as an aside, a cpuorder membar is not itself subject to
1327   // matching and translation by adlc rules.  However, the rule
1328   // predicates need to detect its presence in order to correctly
1329   // select the desired adlc rules.
1330   //
1331   // Inlined unsafe volatile gets manifest as a slightly different
1332   // node sequence to a normal volatile get because of the
1333   // introduction of some CPUOrder memory barriers to bracket the
1334   // Load. However, but the same basic skeleton of a LoadX feeding a
1335   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1336   // present
1337   //
1338   //   MemBarCPUOrder
1339   //        ||       \\
1340   //   MemBarCPUOrder LoadX[mo_acquire]
1341   //        ||            |
1342   //        ||       {DecodeN} optional
1343   //        ||       /
1344   //     MemBarAcquire
1345   //
1346   // In this case the acquire membar does not directly depend on the
1347   // load. However, we can be sure that the load is generated from an
1348   // inlined unsafe volatile get if we see it dependent on this unique
1349   // sequence of membar nodes. Similarly, given an acquire membar we
1350   // can know that it was added because of an inlined unsafe volatile
1351   // get if it is fed and feeds a cpuorder membar and if its feed
1352   // membar also feeds an acquiring load.
1353   //
1354   // Finally an inlined (Unsafe) CAS operation is translated to the
1355   // following ideal graph
1356   //
1357   //   MemBarRelease
1358   //   MemBarCPUOrder
1359   //   CompareAndSwapX {CardMark}-optional
1360   //   MemBarCPUOrder
1361   //   MemBarAcquire
1362   //
1363   // So, where we can identify these volatile read and write
1364   // signatures we can choose to plant either of the above two code
1365   // sequences. For a volatile read we can simply plant a normal
1366   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1367   // also choose to inhibit translation of the MemBarAcquire and
1368   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1369   //
1370   // When we recognise a volatile store signature we can choose to
1371   // plant at a dmb ish as a translation for the MemBarRelease, a
1372   // normal str<x> and then a dmb ish for the MemBarVolatile.
1373   // Alternatively, we can inhibit translation of the MemBarRelease
1374   // and MemBarVolatile and instead plant a simple stlr<x>
1375   // instruction.
1376   //
1377   // when we recognise a CAS signature we can choose to plant a dmb
1378   // ish as a translation for the MemBarRelease, the conventional
1379   // macro-instruction sequence for the CompareAndSwap node (which
1380   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1381   // Alternatively, we can elide generation of the dmb instructions
1382   // and plant the alternative CompareAndSwap macro-instruction
1383   // sequence (which uses ldaxr<x>).
1384   //
1385   // Of course, the above only applies when we see these signature
1386   // configurations. We still want to plant dmb instructions in any
1387   // other cases where we may see a MemBarAcquire, MemBarRelease or
1388   // MemBarVolatile. For example, at the end of a constructor which
1389   // writes final/volatile fields we will see a MemBarRelease
1390   // instruction and this needs a 'dmb ish' lest we risk the
1391   // constructed object being visible without making the
1392   // final/volatile field writes visible.
1393   //
1394   // n.b. the translation rules below which rely on detection of the
1395   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1396   // If we see anything other than the signature configurations we
1397   // always just translate the loads and stores to ldr<x> and str<x>
1398   // and translate acquire, release and volatile membars to the
1399   // relevant dmb instructions.
1400   //
1401 
1402   // is_CAS(int opcode, bool maybe_volatile)
1403   //
1404   // return true if opcode is one of the possible CompareAndSwapX
1405   // values otherwise false.
1406 
1407   bool is_CAS(int opcode, bool maybe_volatile)
1408   {
1409     switch(opcode) {
1410       // We handle these
1411     case Op_CompareAndSwapI:
1412     case Op_CompareAndSwapL:
1413     case Op_CompareAndSwapP:
1414     case Op_CompareAndSwapN:
1415     case Op_ShenandoahCompareAndSwapP:
1416     case Op_ShenandoahCompareAndSwapN:
1417     case Op_CompareAndSwapB:
1418     case Op_CompareAndSwapS:
1419     case Op_GetAndSetI:
1420     case Op_GetAndSetL:
1421     case Op_GetAndSetP:
1422     case Op_GetAndSetN:
1423     case Op_GetAndAddI:
1424     case Op_GetAndAddL:
1425       return true;
1426     case Op_CompareAndExchangeI:
1427     case Op_CompareAndExchangeN:
1428     case Op_CompareAndExchangeB:
1429     case Op_CompareAndExchangeS:
1430     case Op_CompareAndExchangeL:
1431     case Op_CompareAndExchangeP:
1432     case Op_WeakCompareAndSwapB:
1433     case Op_WeakCompareAndSwapS:
1434     case Op_WeakCompareAndSwapI:
1435     case Op_WeakCompareAndSwapL:
1436     case Op_WeakCompareAndSwapP:
1437     case Op_WeakCompareAndSwapN:
1438     case Op_ShenandoahWeakCompareAndSwapP:
1439     case Op_ShenandoahWeakCompareAndSwapN:
1440     case Op_ShenandoahCompareAndExchangeP:
1441     case Op_ShenandoahCompareAndExchangeN:
1442       return maybe_volatile;
1443     default:
1444       return false;
1445     }
1446   }
1447 
1448   // helper to determine the maximum number of Phi nodes we may need to
1449   // traverse when searching from a card mark membar for the merge mem
1450   // feeding a trailing membar or vice versa
1451 
1452 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1453 
1454 bool unnecessary_acquire(const Node *barrier)
1455 {
1456   assert(barrier->is_MemBar(), "expecting a membar");
1457 
1458   if (UseBarriersForVolatile) {
1459     // we need to plant a dmb
1460     return false;
1461   }
1462 
1463   MemBarNode* mb = barrier->as_MemBar();
1464 
1465   if (mb->trailing_load()) {
1466     return true;
1467   }
1468 
1469   if (mb->trailing_load_store()) {
1470     Node* load_store = mb->in(MemBarNode::Precedent);
1471     assert(load_store->is_LoadStore(), "unexpected graph shape");
1472     return is_CAS(load_store->Opcode(), true);
1473   }
1474 
1475   return false;
1476 }
1477 
1478 bool needs_acquiring_load(const Node *n)
1479 {
1480   assert(n->is_Load(), "expecting a load");
1481   if (UseBarriersForVolatile) {
1482     // we use a normal load and a dmb
1483     return false;
1484   }
1485 
1486   LoadNode *ld = n->as_Load();
1487 
1488   return ld->is_acquire();
1489 }
1490 
1491 bool unnecessary_release(const Node *n)
1492 {
1493   assert((n->is_MemBar() &&
1494           n->Opcode() == Op_MemBarRelease),
1495          "expecting a release membar");
1496 
1497   if (UseBarriersForVolatile) {
1498     // we need to plant a dmb
1499     return false;
1500   }
1501 
1502   MemBarNode *barrier = n->as_MemBar();
1503   if (!barrier->leading()) {
1504     return false;
1505   } else {
1506     Node* trailing = barrier->trailing_membar();
1507     MemBarNode* trailing_mb = trailing->as_MemBar();
1508     assert(trailing_mb->trailing(), "Not a trailing membar?");
1509     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1510 
1511     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1512     if (mem->is_Store()) {
1513       assert(mem->as_Store()->is_release(), "");
1514       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1515       return true;
1516     } else {
1517       assert(mem->is_LoadStore(), "");
1518       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1519       return is_CAS(mem->Opcode(), true);
1520     }
1521   }
1522   return false;
1523 }
1524 
1525 bool unnecessary_volatile(const Node *n)
1526 {
1527   // assert n->is_MemBar();
1528   if (UseBarriersForVolatile) {
1529     // we need to plant a dmb
1530     return false;
1531   }
1532 
1533   MemBarNode *mbvol = n->as_MemBar();
1534 
1535   bool release = mbvol->trailing_store();
1536   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1537 #ifdef ASSERT
1538   if (release) {
1539     Node* leading = mbvol->leading_membar();
1540     assert(leading->Opcode() == Op_MemBarRelease, "");
1541     assert(leading->as_MemBar()->leading_store(), "");
1542     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1543   }
1544 #endif
1545 
1546   return release;
1547 }
1548 
1549 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1550 
1551 bool needs_releasing_store(const Node *n)
1552 {
1553   // assert n->is_Store();
1554   if (UseBarriersForVolatile) {
1555     // we use a normal store and dmb combination
1556     return false;
1557   }
1558 
1559   StoreNode *st = n->as_Store();
1560 
1561   return st->trailing_membar() != NULL;
1562 }
1563 
1564 // predicate controlling translation of CAS
1565 //
1566 // returns true if CAS needs to use an acquiring load otherwise false
1567 
1568 bool needs_acquiring_load_exclusive(const Node *n)
1569 {
1570   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1571   if (UseBarriersForVolatile) {
1572     return false;
1573   }
1574 
1575   LoadStoreNode* ldst = n->as_LoadStore();
1576   if (is_CAS(n->Opcode(), false)) {
1577     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1578   } else {
1579     return ldst->trailing_membar() != NULL;
1580   }
1581 
1582   // so we can just return true here
1583   return true;
1584 }
1585 
1586 // predicate controlling translation of StoreCM
1587 //
1588 // returns true if a StoreStore must precede the card write otherwise
1589 // false
1590 
1591 bool unnecessary_storestore(const Node *storecm)
1592 {
1593   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1594 
1595   // we need to generate a dmb ishst between an object put and the
1596   // associated card mark when we are using CMS without conditional
1597   // card marking
1598 
1599   if (UseConcMarkSweepGC && !UseCondCardMark) {
1600     return false;
1601   }
1602 
1603   // a storestore is unnecesary in all other cases
1604 
1605   return true;
1606 }
1607 
1608 
1609 #define __ _masm.
1610 
1611 // advance declarations for helper functions to convert register
1612 // indices to register objects
1613 
1614 // the ad file has to provide implementations of certain methods
1615 // expected by the generic code
1616 //
1617 // REQUIRED FUNCTIONALITY
1618 
1619 //=============================================================================
1620 
1621 // !!!!! Special hack to get all types of calls to specify the byte offset
1622 //       from the start of the call to the point where the return address
1623 //       will point.
1624 
1625 int MachCallStaticJavaNode::ret_addr_offset()
1626 {
1627   // call should be a simple bl
1628   int off = 4;
1629   return off;
1630 }
1631 
1632 int MachCallDynamicJavaNode::ret_addr_offset()
1633 {
1634   return 16; // movz, movk, movk, bl
1635 }
1636 
1637 int MachCallRuntimeNode::ret_addr_offset() {
1638   // for generated stubs the call will be
1639   //   far_call(addr)
1640   // for real runtime callouts it will be six instructions
1641   // see aarch64_enc_java_to_runtime
1642   //   adr(rscratch2, retaddr)
1643   //   lea(rscratch1, RuntimeAddress(addr)
1644   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1645   //   blr(rscratch1)
1646   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1647   if (cb) {
1648     return MacroAssembler::far_branch_size();
1649   } else {
1650     return 6 * NativeInstruction::instruction_size;
1651   }
1652 }
1653 
1654 // Indicate if the safepoint node needs the polling page as an input
1655 
1656 // the shared code plants the oop data at the start of the generated
1657 // code for the safepoint node and that needs ot be at the load
1658 // instruction itself. so we cannot plant a mov of the safepoint poll
1659 // address followed by a load. setting this to true means the mov is
1660 // scheduled as a prior instruction. that's better for scheduling
1661 // anyway.
1662 
1663 bool SafePointNode::needs_polling_address_input()
1664 {
1665   return true;
1666 }
1667 
1668 //=============================================================================
1669 
1670 #ifndef PRODUCT
1671 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1672   st->print("BREAKPOINT");
1673 }
1674 #endif
1675 
1676 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1677   MacroAssembler _masm(&cbuf);
1678   __ brk(0);
1679 }
1680 
1681 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1682   return MachNode::size(ra_);
1683 }
1684 
1685 //=============================================================================
1686 
1687 #ifndef PRODUCT
1688   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1689     st->print("nop \t# %d bytes pad for loops and calls", _count);
1690   }
1691 #endif
1692 
1693   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1694     MacroAssembler _masm(&cbuf);
1695     for (int i = 0; i < _count; i++) {
1696       __ nop();
1697     }
1698   }
1699 
1700   uint MachNopNode::size(PhaseRegAlloc*) const {
1701     return _count * NativeInstruction::instruction_size;
1702   }
1703 
1704 //=============================================================================
1705 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1706 
1707 int Compile::ConstantTable::calculate_table_base_offset() const {
1708   return 0;  // absolute addressing, no offset
1709 }
1710 
1711 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1712 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1713   ShouldNotReachHere();
1714 }
1715 
1716 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1717   // Empty encoding
1718 }
1719 
1720 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1721   return 0;
1722 }
1723 
1724 #ifndef PRODUCT
1725 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1726   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1727 }
1728 #endif
1729 
1730 #ifndef PRODUCT
1731 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1732   Compile* C = ra_->C;
1733 
1734   int framesize = C->frame_slots() << LogBytesPerInt;
1735 
1736   if (C->need_stack_bang(framesize))
1737     st->print("# stack bang size=%d\n\t", framesize);
1738 
1739   if (framesize < ((1 << 9) + 2 * wordSize)) {
1740     st->print("sub  sp, sp, #%d\n\t", framesize);
1741     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1742     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1743   } else {
1744     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1745     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1746     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1747     st->print("sub  sp, sp, rscratch1");
1748   }
1749 }
1750 #endif
1751 
1752 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1753   Compile* C = ra_->C;
1754   MacroAssembler _masm(&cbuf);
1755 
1756   // n.b. frame size includes space for return pc and rfp
1757   const long framesize = C->frame_size_in_bytes();
1758   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1759 
1760   // insert a nop at the start of the prolog so we can patch in a
1761   // branch if we need to invalidate the method later
1762   __ nop();
1763 
1764   if (C->clinit_barrier_on_entry()) {
1765     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1766 
1767     Label L_skip_barrier;
1768 
1769     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1770     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1771     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1772     __ bind(L_skip_barrier);
1773   }
1774 
1775   int bangsize = C->bang_size_in_bytes();
1776   if (C->need_stack_bang(bangsize) && UseStackBanging)
1777     __ generate_stack_overflow_check(bangsize);
1778 
1779   __ build_frame(framesize);
1780 
1781   if (VerifyStackAtCalls) {
1782     Unimplemented();
1783   }
1784 
1785   C->set_frame_complete(cbuf.insts_size());
1786 
1787   if (C->has_mach_constant_base_node()) {
1788     // NOTE: We set the table base offset here because users might be
1789     // emitted before MachConstantBaseNode.
1790     Compile::ConstantTable& constant_table = C->constant_table();
1791     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1792   }
1793 }
1794 
1795 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1796 {
1797   return MachNode::size(ra_); // too many variables; just compute it
1798                               // the hard way
1799 }
1800 
1801 int MachPrologNode::reloc() const
1802 {
1803   return 0;
1804 }
1805 
1806 //=============================================================================
1807 
1808 #ifndef PRODUCT
1809 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1810   Compile* C = ra_->C;
1811   int framesize = C->frame_slots() << LogBytesPerInt;
1812 
1813   st->print("# pop frame %d\n\t",framesize);
1814 
1815   if (framesize == 0) {
1816     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1817   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1818     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1819     st->print("add  sp, sp, #%d\n\t", framesize);
1820   } else {
1821     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1822     st->print("add  sp, sp, rscratch1\n\t");
1823     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1824   }
1825 
1826   if (do_polling() && C->is_method_compilation()) {
1827     st->print("# touch polling page\n\t");
1828     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1829     st->print("ldr zr, [rscratch1]");
1830   }
1831 }
1832 #endif
1833 
1834 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1835   Compile* C = ra_->C;
1836   MacroAssembler _masm(&cbuf);
1837   int framesize = C->frame_slots() << LogBytesPerInt;
1838 
1839   __ remove_frame(framesize);
1840 
1841   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1842     __ reserved_stack_check();
1843   }
1844 
1845   if (do_polling() && C->is_method_compilation()) {
1846     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1847   }
1848 }
1849 
1850 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1851   // Variable size. Determine dynamically.
1852   return MachNode::size(ra_);
1853 }
1854 
1855 int MachEpilogNode::reloc() const {
1856   // Return number of relocatable values contained in this instruction.
1857   return 1; // 1 for polling page.
1858 }
1859 
1860 const Pipeline * MachEpilogNode::pipeline() const {
1861   return MachNode::pipeline_class();
1862 }
1863 
1864 // This method seems to be obsolete. It is declared in machnode.hpp
1865 // and defined in all *.ad files, but it is never called. Should we
1866 // get rid of it?
1867 int MachEpilogNode::safepoint_offset() const {
1868   assert(do_polling(), "no return for this epilog node");
1869   return 4;
1870 }
1871 
1872 //=============================================================================
1873 
1874 // Figure out which register class each belongs in: rc_int, rc_float or
1875 // rc_stack.
1876 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1877 
1878 static enum RC rc_class(OptoReg::Name reg) {
1879 
1880   if (reg == OptoReg::Bad) {
1881     return rc_bad;
1882   }
1883 
1884   // we have 30 int registers * 2 halves
1885   // (rscratch1 and rscratch2 are omitted)
1886 
1887   if (reg < 60) {
1888     return rc_int;
1889   }
1890 
1891   // we have 32 float register * 2 halves
1892   if (reg < 60 + 128) {
1893     return rc_float;
1894   }
1895 
1896   // Between float regs & stack is the flags regs.
1897   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1898 
1899   return rc_stack;
1900 }
1901 
1902 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1903   Compile* C = ra_->C;
1904 
1905   // Get registers to move.
1906   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1907   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1908   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1909   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1910 
1911   enum RC src_hi_rc = rc_class(src_hi);
1912   enum RC src_lo_rc = rc_class(src_lo);
1913   enum RC dst_hi_rc = rc_class(dst_hi);
1914   enum RC dst_lo_rc = rc_class(dst_lo);
1915 
1916   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1917 
1918   if (src_hi != OptoReg::Bad) {
1919     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1920            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1921            "expected aligned-adjacent pairs");
1922   }
1923 
1924   if (src_lo == dst_lo && src_hi == dst_hi) {
1925     return 0;            // Self copy, no move.
1926   }
1927 
1928   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1929               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1930   int src_offset = ra_->reg2offset(src_lo);
1931   int dst_offset = ra_->reg2offset(dst_lo);
1932 
1933   if (bottom_type()->isa_vect() != NULL) {
1934     uint ireg = ideal_reg();
1935     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1936     if (cbuf) {
1937       MacroAssembler _masm(cbuf);
1938       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1939       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1940         // stack->stack
1941         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1942         if (ireg == Op_VecD) {
1943           __ unspill(rscratch1, true, src_offset);
1944           __ spill(rscratch1, true, dst_offset);
1945         } else {
1946           __ spill_copy128(src_offset, dst_offset);
1947         }
1948       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1949         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1950                ireg == Op_VecD ? __ T8B : __ T16B,
1951                as_FloatRegister(Matcher::_regEncode[src_lo]));
1952       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1953         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1954                        ireg == Op_VecD ? __ D : __ Q,
1955                        ra_->reg2offset(dst_lo));
1956       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1957         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1958                        ireg == Op_VecD ? __ D : __ Q,
1959                        ra_->reg2offset(src_lo));
1960       } else {
1961         ShouldNotReachHere();
1962       }
1963     }
1964   } else if (cbuf) {
1965     MacroAssembler _masm(cbuf);
1966     switch (src_lo_rc) {
1967     case rc_int:
1968       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1969         if (is64) {
1970             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1971                    as_Register(Matcher::_regEncode[src_lo]));
1972         } else {
1973             MacroAssembler _masm(cbuf);
1974             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1975                     as_Register(Matcher::_regEncode[src_lo]));
1976         }
1977       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1978         if (is64) {
1979             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1980                      as_Register(Matcher::_regEncode[src_lo]));
1981         } else {
1982             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1983                      as_Register(Matcher::_regEncode[src_lo]));
1984         }
1985       } else {                    // gpr --> stack spill
1986         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1987         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1988       }
1989       break;
1990     case rc_float:
1991       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1992         if (is64) {
1993             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1994                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1995         } else {
1996             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1997                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1998         }
1999       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2000           if (cbuf) {
2001             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2002                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2003         } else {
2004             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2005                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2006         }
2007       } else {                    // fpr --> stack spill
2008         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2009         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2010                  is64 ? __ D : __ S, dst_offset);
2011       }
2012       break;
2013     case rc_stack:
2014       if (dst_lo_rc == rc_int) {  // stack --> gpr load
2015         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
2016       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2017         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2018                    is64 ? __ D : __ S, src_offset);
2019       } else {                    // stack --> stack copy
2020         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2021         __ unspill(rscratch1, is64, src_offset);
2022         __ spill(rscratch1, is64, dst_offset);
2023       }
2024       break;
2025     default:
2026       assert(false, "bad rc_class for spill");
2027       ShouldNotReachHere();
2028     }
2029   }
2030 
2031   if (st) {
2032     st->print("spill ");
2033     if (src_lo_rc == rc_stack) {
2034       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2035     } else {
2036       st->print("%s -> ", Matcher::regName[src_lo]);
2037     }
2038     if (dst_lo_rc == rc_stack) {
2039       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2040     } else {
2041       st->print("%s", Matcher::regName[dst_lo]);
2042     }
2043     if (bottom_type()->isa_vect() != NULL) {
2044       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2045     } else {
2046       st->print("\t# spill size = %d", is64 ? 64:32);
2047     }
2048   }
2049 
2050   return 0;
2051 
2052 }
2053 
2054 #ifndef PRODUCT
2055 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2056   if (!ra_)
2057     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2058   else
2059     implementation(NULL, ra_, false, st);
2060 }
2061 #endif
2062 
2063 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2064   implementation(&cbuf, ra_, false, NULL);
2065 }
2066 
2067 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2068   return MachNode::size(ra_);
2069 }
2070 
2071 //=============================================================================
2072 
2073 #ifndef PRODUCT
2074 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2075   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2076   int reg = ra_->get_reg_first(this);
2077   st->print("add %s, rsp, #%d]\t# box lock",
2078             Matcher::regName[reg], offset);
2079 }
2080 #endif
2081 
2082 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2083   MacroAssembler _masm(&cbuf);
2084 
2085   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2086   int reg    = ra_->get_encode(this);
2087 
2088   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2089     __ add(as_Register(reg), sp, offset);
2090   } else {
2091     ShouldNotReachHere();
2092   }
2093 }
2094 
2095 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2096   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2097   return 4;
2098 }
2099 
2100 //=============================================================================
2101 
2102 #ifndef PRODUCT
2103 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2104 {
2105   st->print_cr("# MachUEPNode");
2106   if (UseCompressedClassPointers) {
2107     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2108     if (CompressedKlassPointers::shift() != 0) {
2109       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2110     }
2111   } else {
2112    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2113   }
2114   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2115   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2116 }
2117 #endif
2118 
2119 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2120 {
2121   // This is the unverified entry point.
2122   MacroAssembler _masm(&cbuf);
2123 
2124   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2125   Label skip;
2126   // TODO
2127   // can we avoid this skip and still use a reloc?
2128   __ br(Assembler::EQ, skip);
2129   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2130   __ bind(skip);
2131 }
2132 
2133 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2134 {
2135   return MachNode::size(ra_);
2136 }
2137 
2138 // REQUIRED EMIT CODE
2139 
2140 //=============================================================================
2141 
2142 // Emit exception handler code.
2143 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2144 {
2145   // mov rscratch1 #exception_blob_entry_point
2146   // br rscratch1
2147   // Note that the code buffer's insts_mark is always relative to insts.
2148   // That's why we must use the macroassembler to generate a handler.
2149   MacroAssembler _masm(&cbuf);
2150   address base = __ start_a_stub(size_exception_handler());
2151   if (base == NULL) {
2152     ciEnv::current()->record_failure("CodeCache is full");
2153     return 0;  // CodeBuffer::expand failed
2154   }
2155   int offset = __ offset();
2156   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2157   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2158   __ end_a_stub();
2159   return offset;
2160 }
2161 
2162 // Emit deopt handler code.
2163 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2164 {
2165   // Note that the code buffer's insts_mark is always relative to insts.
2166   // That's why we must use the macroassembler to generate a handler.
2167   MacroAssembler _masm(&cbuf);
2168   address base = __ start_a_stub(size_deopt_handler());
2169   if (base == NULL) {
2170     ciEnv::current()->record_failure("CodeCache is full");
2171     return 0;  // CodeBuffer::expand failed
2172   }
2173   int offset = __ offset();
2174 
2175   __ adr(lr, __ pc());
2176   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2177 
2178   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2179   __ end_a_stub();
2180   return offset;
2181 }
2182 
2183 // REQUIRED MATCHER CODE
2184 
2185 //=============================================================================
2186 
2187 const bool Matcher::match_rule_supported(int opcode) {
2188   if (!has_match_rule(opcode))
2189     return false;
2190 
2191   bool ret_value = true;
2192   switch (opcode) {
2193     case Op_CacheWB:
2194     case Op_CacheWBPreSync:
2195     case Op_CacheWBPostSync:
2196       if (!VM_Version::supports_data_cache_line_flush()) {
2197         ret_value = false;
2198       }
2199       break;
2200   }
2201 
2202   return ret_value; // Per default match rules are supported.
2203 }
2204 
2205 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2206 
2207   // TODO
2208   // identify extra cases that we might want to provide match rules for
2209   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2210   bool ret_value = match_rule_supported(opcode);
2211   // Add rules here.
2212 
2213   return ret_value;  // Per default match rules are supported.
2214 }
2215 
2216 const bool Matcher::has_predicated_vectors(void) {
2217   return false;
2218 }
2219 
2220 const int Matcher::float_pressure(int default_pressure_threshold) {
2221   return default_pressure_threshold;
2222 }
2223 
2224 int Matcher::regnum_to_fpu_offset(int regnum)
2225 {
2226   Unimplemented();
2227   return 0;
2228 }
2229 
2230 // Is this branch offset short enough that a short branch can be used?
2231 //
2232 // NOTE: If the platform does not provide any short branch variants, then
2233 //       this method should return false for offset 0.
2234 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2235   // The passed offset is relative to address of the branch.
2236 
2237   return (-32768 <= offset && offset < 32768);
2238 }
2239 
2240 const bool Matcher::isSimpleConstant64(jlong value) {
2241   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2242   // Probably always true, even if a temp register is required.
2243   return true;
2244 }
2245 
2246 // true just means we have fast l2f conversion
2247 const bool Matcher::convL2FSupported(void) {
2248   return true;
2249 }
2250 
2251 // Vector width in bytes.
2252 const int Matcher::vector_width_in_bytes(BasicType bt) {
2253   int size = MIN2(16,(int)MaxVectorSize);
2254   // Minimum 2 values in vector
2255   if (size < 2*type2aelembytes(bt)) size = 0;
2256   // But never < 4
2257   if (size < 4) size = 0;
2258   return size;
2259 }
2260 
2261 // Limits on vector size (number of elements) loaded into vector.
2262 const int Matcher::max_vector_size(const BasicType bt) {
2263   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2264 }
2265 const int Matcher::min_vector_size(const BasicType bt) {
2266 //  For the moment limit the vector size to 8 bytes
2267     int size = 8 / type2aelembytes(bt);
2268     if (size < 2) size = 2;
2269     return size;
2270 }
2271 
2272 // Vector ideal reg.
2273 const uint Matcher::vector_ideal_reg(int len) {
2274   switch(len) {
2275     case  8: return Op_VecD;
2276     case 16: return Op_VecX;
2277   }
2278   ShouldNotReachHere();
2279   return 0;
2280 }
2281 
2282 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2283   switch(size) {
2284     case  8: return Op_VecD;
2285     case 16: return Op_VecX;
2286   }
2287   ShouldNotReachHere();
2288   return 0;
2289 }
2290 
2291 // AES support not yet implemented
2292 const bool Matcher::pass_original_key_for_aes() {
2293   return false;
2294 }
2295 
2296 // aarch64 supports misaligned vectors store/load.
2297 const bool Matcher::misaligned_vectors_ok() {
2298   return true;
2299 }
2300 
2301 // false => size gets scaled to BytesPerLong, ok.
2302 const bool Matcher::init_array_count_is_in_bytes = false;
2303 
2304 // Use conditional move (CMOVL)
2305 const int Matcher::long_cmove_cost() {
2306   // long cmoves are no more expensive than int cmoves
2307   return 0;
2308 }
2309 
2310 const int Matcher::float_cmove_cost() {
2311   // float cmoves are no more expensive than int cmoves
2312   return 0;
2313 }
2314 
2315 // Does the CPU require late expand (see block.cpp for description of late expand)?
2316 const bool Matcher::require_postalloc_expand = false;
2317 
2318 // Do we need to mask the count passed to shift instructions or does
2319 // the cpu only look at the lower 5/6 bits anyway?
2320 const bool Matcher::need_masked_shift_count = false;
2321 
2322 // This affects two different things:
2323 //  - how Decode nodes are matched
2324 //  - how ImplicitNullCheck opportunities are recognized
2325 // If true, the matcher will try to remove all Decodes and match them
2326 // (as operands) into nodes. NullChecks are not prepared to deal with
2327 // Decodes by final_graph_reshaping().
2328 // If false, final_graph_reshaping() forces the decode behind the Cmp
2329 // for a NullCheck. The matcher matches the Decode node into a register.
2330 // Implicit_null_check optimization moves the Decode along with the
2331 // memory operation back up before the NullCheck.
2332 bool Matcher::narrow_oop_use_complex_address() {
2333   return CompressedOops::shift() == 0;
2334 }
2335 
2336 bool Matcher::narrow_klass_use_complex_address() {
2337 // TODO
2338 // decide whether we need to set this to true
2339   return false;
2340 }
2341 
2342 bool Matcher::const_oop_prefer_decode() {
2343   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2344   return CompressedOops::base() == NULL;
2345 }
2346 
2347 bool Matcher::const_klass_prefer_decode() {
2348   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2349   return CompressedKlassPointers::base() == NULL;
2350 }
2351 
2352 // Is it better to copy float constants, or load them directly from
2353 // memory?  Intel can load a float constant from a direct address,
2354 // requiring no extra registers.  Most RISCs will have to materialize
2355 // an address into a register first, so they would do better to copy
2356 // the constant from stack.
2357 const bool Matcher::rematerialize_float_constants = false;
2358 
2359 // If CPU can load and store mis-aligned doubles directly then no
2360 // fixup is needed.  Else we split the double into 2 integer pieces
2361 // and move it piece-by-piece.  Only happens when passing doubles into
2362 // C code as the Java calling convention forces doubles to be aligned.
2363 const bool Matcher::misaligned_doubles_ok = true;
2364 
2365 // No-op on amd64
2366 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2367   Unimplemented();
2368 }
2369 
2370 // Advertise here if the CPU requires explicit rounding operations to
2371 // implement the UseStrictFP mode.
2372 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2373 
2374 // Are floats converted to double when stored to stack during
2375 // deoptimization?
2376 bool Matcher::float_in_double() { return false; }
2377 
2378 // Do ints take an entire long register or just half?
2379 // The relevant question is how the int is callee-saved:
2380 // the whole long is written but de-opt'ing will have to extract
2381 // the relevant 32 bits.
2382 const bool Matcher::int_in_long = true;
2383 
2384 // Return whether or not this register is ever used as an argument.
2385 // This function is used on startup to build the trampoline stubs in
2386 // generateOptoStub.  Registers not mentioned will be killed by the VM
2387 // call in the trampoline, and arguments in those registers not be
2388 // available to the callee.
2389 bool Matcher::can_be_java_arg(int reg)
2390 {
2391   return
2392     reg ==  R0_num || reg == R0_H_num ||
2393     reg ==  R1_num || reg == R1_H_num ||
2394     reg ==  R2_num || reg == R2_H_num ||
2395     reg ==  R3_num || reg == R3_H_num ||
2396     reg ==  R4_num || reg == R4_H_num ||
2397     reg ==  R5_num || reg == R5_H_num ||
2398     reg ==  R6_num || reg == R6_H_num ||
2399     reg ==  R7_num || reg == R7_H_num ||
2400     reg ==  V0_num || reg == V0_H_num ||
2401     reg ==  V1_num || reg == V1_H_num ||
2402     reg ==  V2_num || reg == V2_H_num ||
2403     reg ==  V3_num || reg == V3_H_num ||
2404     reg ==  V4_num || reg == V4_H_num ||
2405     reg ==  V5_num || reg == V5_H_num ||
2406     reg ==  V6_num || reg == V6_H_num ||
2407     reg ==  V7_num || reg == V7_H_num;
2408 }
2409 
2410 bool Matcher::is_spillable_arg(int reg)
2411 {
2412   return can_be_java_arg(reg);
2413 }
2414 
2415 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2416   return false;
2417 }
2418 
2419 RegMask Matcher::divI_proj_mask() {
2420   ShouldNotReachHere();
2421   return RegMask();
2422 }
2423 
2424 // Register for MODI projection of divmodI.
2425 RegMask Matcher::modI_proj_mask() {
2426   ShouldNotReachHere();
2427   return RegMask();
2428 }
2429 
2430 // Register for DIVL projection of divmodL.
2431 RegMask Matcher::divL_proj_mask() {
2432   ShouldNotReachHere();
2433   return RegMask();
2434 }
2435 
2436 // Register for MODL projection of divmodL.
2437 RegMask Matcher::modL_proj_mask() {
2438   ShouldNotReachHere();
2439   return RegMask();
2440 }
2441 
2442 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2443   return FP_REG_mask();
2444 }
2445 
2446 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2447   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2448     Node* u = addp->fast_out(i);
2449     if (u->is_Mem()) {
2450       int opsize = u->as_Mem()->memory_size();
2451       assert(opsize > 0, "unexpected memory operand size");
2452       if (u->as_Mem()->memory_size() != (1<<shift)) {
2453         return false;
2454       }
2455     }
2456   }
2457   return true;
2458 }
2459 
2460 const bool Matcher::convi2l_type_required = false;
2461 
2462 // Should the Matcher clone shifts on addressing modes, expecting them
2463 // to be subsumed into complex addressing expressions or compute them
2464 // into registers?
2465 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2466   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2467     return true;
2468   }
2469 
2470   Node *off = m->in(AddPNode::Offset);
2471   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2472       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2473       // Are there other uses besides address expressions?
2474       !is_visited(off)) {
2475     address_visited.set(off->_idx); // Flag as address_visited
2476     mstack.push(off->in(2), Visit);
2477     Node *conv = off->in(1);
2478     if (conv->Opcode() == Op_ConvI2L &&
2479         // Are there other uses besides address expressions?
2480         !is_visited(conv)) {
2481       address_visited.set(conv->_idx); // Flag as address_visited
2482       mstack.push(conv->in(1), Pre_Visit);
2483     } else {
2484       mstack.push(conv, Pre_Visit);
2485     }
2486     address_visited.test_set(m->_idx); // Flag as address_visited
2487     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2488     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2489     return true;
2490   } else if (off->Opcode() == Op_ConvI2L &&
2491              // Are there other uses besides address expressions?
2492              !is_visited(off)) {
2493     address_visited.test_set(m->_idx); // Flag as address_visited
2494     address_visited.set(off->_idx); // Flag as address_visited
2495     mstack.push(off->in(1), Pre_Visit);
2496     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2497     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2498     return true;
2499   }
2500   return false;
2501 }
2502 
2503 void Compile::reshape_address(AddPNode* addp) {
2504 }
2505 
2506 
2507 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2508   MacroAssembler _masm(&cbuf);                                          \
2509   {                                                                     \
2510     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2511     guarantee(DISP == 0, "mode not permitted for volatile");            \
2512     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2513     __ INSN(REG, as_Register(BASE));                                    \
2514   }
2515 
2516 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2517 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2518 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2519                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2520 
2521   // Used for all non-volatile memory accesses.  The use of
2522   // $mem->opcode() to discover whether this pattern uses sign-extended
2523   // offsets is something of a kludge.
2524   static void loadStore(MacroAssembler masm, mem_insn insn,
2525                          Register reg, int opcode,
2526                          Register base, int index, int size, int disp)
2527   {
2528     Address::extend scale;
2529 
2530     // Hooboy, this is fugly.  We need a way to communicate to the
2531     // encoder that the index needs to be sign extended, so we have to
2532     // enumerate all the cases.
2533     switch (opcode) {
2534     case INDINDEXSCALEDI2L:
2535     case INDINDEXSCALEDI2LN:
2536     case INDINDEXI2L:
2537     case INDINDEXI2LN:
2538       scale = Address::sxtw(size);
2539       break;
2540     default:
2541       scale = Address::lsl(size);
2542     }
2543 
2544     if (index == -1) {
2545       (masm.*insn)(reg, Address(base, disp));
2546     } else {
2547       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2548       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2549     }
2550   }
2551 
2552   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2553                          FloatRegister reg, int opcode,
2554                          Register base, int index, int size, int disp)
2555   {
2556     Address::extend scale;
2557 
2558     switch (opcode) {
2559     case INDINDEXSCALEDI2L:
2560     case INDINDEXSCALEDI2LN:
2561       scale = Address::sxtw(size);
2562       break;
2563     default:
2564       scale = Address::lsl(size);
2565     }
2566 
2567      if (index == -1) {
2568       (masm.*insn)(reg, Address(base, disp));
2569     } else {
2570       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2571       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2572     }
2573   }
2574 
2575   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2576                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2577                          int opcode, Register base, int index, int size, int disp)
2578   {
2579     if (index == -1) {
2580       (masm.*insn)(reg, T, Address(base, disp));
2581     } else {
2582       assert(disp == 0, "unsupported address mode");
2583       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2584     }
2585   }
2586 
2587 %}
2588 
2589 
2590 
2591 //----------ENCODING BLOCK-----------------------------------------------------
2592 // This block specifies the encoding classes used by the compiler to
2593 // output byte streams.  Encoding classes are parameterized macros
2594 // used by Machine Instruction Nodes in order to generate the bit
2595 // encoding of the instruction.  Operands specify their base encoding
2596 // interface with the interface keyword.  There are currently
2597 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2598 // COND_INTER.  REG_INTER causes an operand to generate a function
2599 // which returns its register number when queried.  CONST_INTER causes
2600 // an operand to generate a function which returns the value of the
2601 // constant when queried.  MEMORY_INTER causes an operand to generate
2602 // four functions which return the Base Register, the Index Register,
2603 // the Scale Value, and the Offset Value of the operand when queried.
2604 // COND_INTER causes an operand to generate six functions which return
2605 // the encoding code (ie - encoding bits for the instruction)
2606 // associated with each basic boolean condition for a conditional
2607 // instruction.
2608 //
2609 // Instructions specify two basic values for encoding.  Again, a
2610 // function is available to check if the constant displacement is an
2611 // oop. They use the ins_encode keyword to specify their encoding
2612 // classes (which must be a sequence of enc_class names, and their
2613 // parameters, specified in the encoding block), and they use the
2614 // opcode keyword to specify, in order, their primary, secondary, and
2615 // tertiary opcode.  Only the opcode sections which a particular
2616 // instruction needs for encoding need to be specified.
2617 encode %{
2618   // Build emit functions for each basic byte or larger field in the
2619   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2620   // from C++ code in the enc_class source block.  Emit functions will
2621   // live in the main source block for now.  In future, we can
2622   // generalize this by adding a syntax that specifies the sizes of
2623   // fields in an order, so that the adlc can build the emit functions
2624   // automagically
2625 
2626   // catch all for unimplemented encodings
2627   enc_class enc_unimplemented %{
2628     MacroAssembler _masm(&cbuf);
2629     __ unimplemented("C2 catch all");
2630   %}
2631 
2632   // BEGIN Non-volatile memory access
2633 
2634   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2635     Register dst_reg = as_Register($dst$$reg);
2636     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2637                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2638   %}
2639 
2640   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2641     Register dst_reg = as_Register($dst$$reg);
2642     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2643                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2644   %}
2645 
2646   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2647     Register dst_reg = as_Register($dst$$reg);
2648     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2649                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2650   %}
2651 
2652   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2653     Register dst_reg = as_Register($dst$$reg);
2654     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2655                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2656   %}
2657 
2658   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2659     Register dst_reg = as_Register($dst$$reg);
2660     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2661                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2662   %}
2663 
2664   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2665     Register dst_reg = as_Register($dst$$reg);
2666     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2667                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2668   %}
2669 
2670   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2671     Register dst_reg = as_Register($dst$$reg);
2672     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2673                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2674   %}
2675 
2676   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2677     Register dst_reg = as_Register($dst$$reg);
2678     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2679                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2680   %}
2681 
2682   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2683     Register dst_reg = as_Register($dst$$reg);
2684     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2685                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2686   %}
2687 
2688   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2689     Register dst_reg = as_Register($dst$$reg);
2690     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2692   %}
2693 
2694   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2695     Register dst_reg = as_Register($dst$$reg);
2696     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2697                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2698   %}
2699 
2700   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2701     Register dst_reg = as_Register($dst$$reg);
2702     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2703                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2704   %}
2705 
2706   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2707     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2708     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2709                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2710   %}
2711 
2712   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2713     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2714     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2715                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2716   %}
2717 
2718   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2719     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2720     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2721        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2722   %}
2723 
2724   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2725     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2726     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2727        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2728   %}
2729 
2730   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2731     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2732     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2733        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2734   %}
2735 
2736   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2737     Register src_reg = as_Register($src$$reg);
2738     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2739                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2740   %}
2741 
2742   enc_class aarch64_enc_strb0(memory mem) %{
2743     MacroAssembler _masm(&cbuf);
2744     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2745                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2746   %}
2747 
2748   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2749     MacroAssembler _masm(&cbuf);
2750     __ membar(Assembler::StoreStore);
2751     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2752                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2753   %}
2754 
2755   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2756     Register src_reg = as_Register($src$$reg);
2757     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2758                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2759   %}
2760 
2761   enc_class aarch64_enc_strh0(memory mem) %{
2762     MacroAssembler _masm(&cbuf);
2763     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2764                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2765   %}
2766 
2767   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2768     Register src_reg = as_Register($src$$reg);
2769     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2770                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2771   %}
2772 
2773   enc_class aarch64_enc_strw0(memory mem) %{
2774     MacroAssembler _masm(&cbuf);
2775     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2776                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2777   %}
2778 
2779   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2780     Register src_reg = as_Register($src$$reg);
2781     // we sometimes get asked to store the stack pointer into the
2782     // current thread -- we cannot do that directly on AArch64
2783     if (src_reg == r31_sp) {
2784       MacroAssembler _masm(&cbuf);
2785       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2786       __ mov(rscratch2, sp);
2787       src_reg = rscratch2;
2788     }
2789     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2790                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2791   %}
2792 
2793   enc_class aarch64_enc_str0(memory mem) %{
2794     MacroAssembler _masm(&cbuf);
2795     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2796                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2797   %}
2798 
2799   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2800     FloatRegister src_reg = as_FloatRegister($src$$reg);
2801     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2802                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2803   %}
2804 
2805   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2806     FloatRegister src_reg = as_FloatRegister($src$$reg);
2807     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2808                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2809   %}
2810 
2811   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2812     FloatRegister src_reg = as_FloatRegister($src$$reg);
2813     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2814        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2815   %}
2816 
2817   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2818     FloatRegister src_reg = as_FloatRegister($src$$reg);
2819     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2820        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2821   %}
2822 
2823   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2824     FloatRegister src_reg = as_FloatRegister($src$$reg);
2825     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2826        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2827   %}
2828 
2829   // END Non-volatile memory access
2830 
2831   // volatile loads and stores
2832 
2833   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2834     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2835                  rscratch1, stlrb);
2836   %}
2837 
2838   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2839     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2840                  rscratch1, stlrh);
2841   %}
2842 
2843   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2844     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2845                  rscratch1, stlrw);
2846   %}
2847 
2848 
2849   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2850     Register dst_reg = as_Register($dst$$reg);
2851     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2852              rscratch1, ldarb);
2853     __ sxtbw(dst_reg, dst_reg);
2854   %}
2855 
2856   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2857     Register dst_reg = as_Register($dst$$reg);
2858     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2859              rscratch1, ldarb);
2860     __ sxtb(dst_reg, dst_reg);
2861   %}
2862 
2863   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2864     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2865              rscratch1, ldarb);
2866   %}
2867 
2868   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2869     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2870              rscratch1, ldarb);
2871   %}
2872 
2873   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2874     Register dst_reg = as_Register($dst$$reg);
2875     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2876              rscratch1, ldarh);
2877     __ sxthw(dst_reg, dst_reg);
2878   %}
2879 
2880   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2881     Register dst_reg = as_Register($dst$$reg);
2882     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2883              rscratch1, ldarh);
2884     __ sxth(dst_reg, dst_reg);
2885   %}
2886 
2887   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2888     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2889              rscratch1, ldarh);
2890   %}
2891 
2892   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2893     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2894              rscratch1, ldarh);
2895   %}
2896 
2897   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2898     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2899              rscratch1, ldarw);
2900   %}
2901 
2902   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2903     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2904              rscratch1, ldarw);
2905   %}
2906 
2907   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2908     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2909              rscratch1, ldar);
2910   %}
2911 
2912   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2913     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2914              rscratch1, ldarw);
2915     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2916   %}
2917 
2918   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2919     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2920              rscratch1, ldar);
2921     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2922   %}
2923 
2924   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2925     Register src_reg = as_Register($src$$reg);
2926     // we sometimes get asked to store the stack pointer into the
2927     // current thread -- we cannot do that directly on AArch64
2928     if (src_reg == r31_sp) {
2929         MacroAssembler _masm(&cbuf);
2930       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2931       __ mov(rscratch2, sp);
2932       src_reg = rscratch2;
2933     }
2934     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2935                  rscratch1, stlr);
2936   %}
2937 
2938   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2939     {
2940       MacroAssembler _masm(&cbuf);
2941       FloatRegister src_reg = as_FloatRegister($src$$reg);
2942       __ fmovs(rscratch2, src_reg);
2943     }
2944     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2945                  rscratch1, stlrw);
2946   %}
2947 
2948   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2949     {
2950       MacroAssembler _masm(&cbuf);
2951       FloatRegister src_reg = as_FloatRegister($src$$reg);
2952       __ fmovd(rscratch2, src_reg);
2953     }
2954     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2955                  rscratch1, stlr);
2956   %}
2957 
2958   // synchronized read/update encodings
2959 
2960   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2961     MacroAssembler _masm(&cbuf);
2962     Register dst_reg = as_Register($dst$$reg);
2963     Register base = as_Register($mem$$base);
2964     int index = $mem$$index;
2965     int scale = $mem$$scale;
2966     int disp = $mem$$disp;
2967     if (index == -1) {
2968        if (disp != 0) {
2969         __ lea(rscratch1, Address(base, disp));
2970         __ ldaxr(dst_reg, rscratch1);
2971       } else {
2972         // TODO
2973         // should we ever get anything other than this case?
2974         __ ldaxr(dst_reg, base);
2975       }
2976     } else {
2977       Register index_reg = as_Register(index);
2978       if (disp == 0) {
2979         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2980         __ ldaxr(dst_reg, rscratch1);
2981       } else {
2982         __ lea(rscratch1, Address(base, disp));
2983         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2984         __ ldaxr(dst_reg, rscratch1);
2985       }
2986     }
2987   %}
2988 
2989   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2990     MacroAssembler _masm(&cbuf);
2991     Register src_reg = as_Register($src$$reg);
2992     Register base = as_Register($mem$$base);
2993     int index = $mem$$index;
2994     int scale = $mem$$scale;
2995     int disp = $mem$$disp;
2996     if (index == -1) {
2997        if (disp != 0) {
2998         __ lea(rscratch2, Address(base, disp));
2999         __ stlxr(rscratch1, src_reg, rscratch2);
3000       } else {
3001         // TODO
3002         // should we ever get anything other than this case?
3003         __ stlxr(rscratch1, src_reg, base);
3004       }
3005     } else {
3006       Register index_reg = as_Register(index);
3007       if (disp == 0) {
3008         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3009         __ stlxr(rscratch1, src_reg, rscratch2);
3010       } else {
3011         __ lea(rscratch2, Address(base, disp));
3012         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3013         __ stlxr(rscratch1, src_reg, rscratch2);
3014       }
3015     }
3016     __ cmpw(rscratch1, zr);
3017   %}
3018 
3019   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3020     MacroAssembler _masm(&cbuf);
3021     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3022     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3023                Assembler::xword, /*acquire*/ false, /*release*/ true,
3024                /*weak*/ false, noreg);
3025   %}
3026 
3027   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3028     MacroAssembler _masm(&cbuf);
3029     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3030     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3031                Assembler::word, /*acquire*/ false, /*release*/ true,
3032                /*weak*/ false, noreg);
3033   %}
3034 
3035   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3036     MacroAssembler _masm(&cbuf);
3037     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3038     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3039                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3040                /*weak*/ false, noreg);
3041   %}
3042 
3043   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3044     MacroAssembler _masm(&cbuf);
3045     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3046     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3047                Assembler::byte, /*acquire*/ false, /*release*/ true,
3048                /*weak*/ false, noreg);
3049   %}
3050 
3051 
3052   // The only difference between aarch64_enc_cmpxchg and
3053   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3054   // CompareAndSwap sequence to serve as a barrier on acquiring a
3055   // lock.
3056   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3057     MacroAssembler _masm(&cbuf);
3058     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3059     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3060                Assembler::xword, /*acquire*/ true, /*release*/ true,
3061                /*weak*/ false, noreg);
3062   %}
3063 
3064   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3065     MacroAssembler _masm(&cbuf);
3066     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3067     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3068                Assembler::word, /*acquire*/ true, /*release*/ true,
3069                /*weak*/ false, noreg);
3070   %}
3071 
3072   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3073     MacroAssembler _masm(&cbuf);
3074     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3075     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3076                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3077                /*weak*/ false, noreg);
3078   %}
3079 
3080   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3081     MacroAssembler _masm(&cbuf);
3082     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3083     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3084                Assembler::byte, /*acquire*/ true, /*release*/ true,
3085                /*weak*/ false, noreg);
3086   %}
3087 
3088   // auxiliary used for CompareAndSwapX to set result register
3089   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3090     MacroAssembler _masm(&cbuf);
3091     Register res_reg = as_Register($res$$reg);
3092     __ cset(res_reg, Assembler::EQ);
3093   %}
3094 
3095   // prefetch encodings
3096 
3097   enc_class aarch64_enc_prefetchw(memory mem) %{
3098     MacroAssembler _masm(&cbuf);
3099     Register base = as_Register($mem$$base);
3100     int index = $mem$$index;
3101     int scale = $mem$$scale;
3102     int disp = $mem$$disp;
3103     if (index == -1) {
3104       __ prfm(Address(base, disp), PSTL1KEEP);
3105     } else {
3106       Register index_reg = as_Register(index);
3107       if (disp == 0) {
3108         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3109       } else {
3110         __ lea(rscratch1, Address(base, disp));
3111         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3112       }
3113     }
3114   %}
3115 
3116   /// mov envcodings
3117 
3118   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3119     MacroAssembler _masm(&cbuf);
3120     u_int32_t con = (u_int32_t)$src$$constant;
3121     Register dst_reg = as_Register($dst$$reg);
3122     if (con == 0) {
3123       __ movw(dst_reg, zr);
3124     } else {
3125       __ movw(dst_reg, con);
3126     }
3127   %}
3128 
3129   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3130     MacroAssembler _masm(&cbuf);
3131     Register dst_reg = as_Register($dst$$reg);
3132     u_int64_t con = (u_int64_t)$src$$constant;
3133     if (con == 0) {
3134       __ mov(dst_reg, zr);
3135     } else {
3136       __ mov(dst_reg, con);
3137     }
3138   %}
3139 
3140   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3141     MacroAssembler _masm(&cbuf);
3142     Register dst_reg = as_Register($dst$$reg);
3143     address con = (address)$src$$constant;
3144     if (con == NULL || con == (address)1) {
3145       ShouldNotReachHere();
3146     } else {
3147       relocInfo::relocType rtype = $src->constant_reloc();
3148       if (rtype == relocInfo::oop_type) {
3149         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3150       } else if (rtype == relocInfo::metadata_type) {
3151         __ mov_metadata(dst_reg, (Metadata*)con);
3152       } else {
3153         assert(rtype == relocInfo::none, "unexpected reloc type");
3154         if (con < (address)(uintptr_t)os::vm_page_size()) {
3155           __ mov(dst_reg, con);
3156         } else {
3157           unsigned long offset;
3158           __ adrp(dst_reg, con, offset);
3159           __ add(dst_reg, dst_reg, offset);
3160         }
3161       }
3162     }
3163   %}
3164 
3165   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3166     MacroAssembler _masm(&cbuf);
3167     Register dst_reg = as_Register($dst$$reg);
3168     __ mov(dst_reg, zr);
3169   %}
3170 
3171   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3172     MacroAssembler _masm(&cbuf);
3173     Register dst_reg = as_Register($dst$$reg);
3174     __ mov(dst_reg, (u_int64_t)1);
3175   %}
3176 
3177   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3178     MacroAssembler _masm(&cbuf);
3179     address page = (address)$src$$constant;
3180     Register dst_reg = as_Register($dst$$reg);
3181     unsigned long off;
3182     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3183     assert(off == 0, "assumed offset == 0");
3184   %}
3185 
3186   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3187     MacroAssembler _masm(&cbuf);
3188     __ load_byte_map_base($dst$$Register);
3189   %}
3190 
3191   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3192     MacroAssembler _masm(&cbuf);
3193     Register dst_reg = as_Register($dst$$reg);
3194     address con = (address)$src$$constant;
3195     if (con == NULL) {
3196       ShouldNotReachHere();
3197     } else {
3198       relocInfo::relocType rtype = $src->constant_reloc();
3199       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3200       __ set_narrow_oop(dst_reg, (jobject)con);
3201     }
3202   %}
3203 
3204   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3205     MacroAssembler _masm(&cbuf);
3206     Register dst_reg = as_Register($dst$$reg);
3207     __ mov(dst_reg, zr);
3208   %}
3209 
3210   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3211     MacroAssembler _masm(&cbuf);
3212     Register dst_reg = as_Register($dst$$reg);
3213     address con = (address)$src$$constant;
3214     if (con == NULL) {
3215       ShouldNotReachHere();
3216     } else {
3217       relocInfo::relocType rtype = $src->constant_reloc();
3218       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3219       __ set_narrow_klass(dst_reg, (Klass *)con);
3220     }
3221   %}
3222 
3223   // arithmetic encodings
3224 
3225   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3226     MacroAssembler _masm(&cbuf);
3227     Register dst_reg = as_Register($dst$$reg);
3228     Register src_reg = as_Register($src1$$reg);
3229     int32_t con = (int32_t)$src2$$constant;
3230     // add has primary == 0, subtract has primary == 1
3231     if ($primary) { con = -con; }
3232     if (con < 0) {
3233       __ subw(dst_reg, src_reg, -con);
3234     } else {
3235       __ addw(dst_reg, src_reg, con);
3236     }
3237   %}
3238 
3239   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3240     MacroAssembler _masm(&cbuf);
3241     Register dst_reg = as_Register($dst$$reg);
3242     Register src_reg = as_Register($src1$$reg);
3243     int32_t con = (int32_t)$src2$$constant;
3244     // add has primary == 0, subtract has primary == 1
3245     if ($primary) { con = -con; }
3246     if (con < 0) {
3247       __ sub(dst_reg, src_reg, -con);
3248     } else {
3249       __ add(dst_reg, src_reg, con);
3250     }
3251   %}
3252 
3253   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3254     MacroAssembler _masm(&cbuf);
3255    Register dst_reg = as_Register($dst$$reg);
3256    Register src1_reg = as_Register($src1$$reg);
3257    Register src2_reg = as_Register($src2$$reg);
3258     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3259   %}
3260 
3261   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3262     MacroAssembler _masm(&cbuf);
3263    Register dst_reg = as_Register($dst$$reg);
3264    Register src1_reg = as_Register($src1$$reg);
3265    Register src2_reg = as_Register($src2$$reg);
3266     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3267   %}
3268 
3269   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3270     MacroAssembler _masm(&cbuf);
3271    Register dst_reg = as_Register($dst$$reg);
3272    Register src1_reg = as_Register($src1$$reg);
3273    Register src2_reg = as_Register($src2$$reg);
3274     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3275   %}
3276 
3277   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3278     MacroAssembler _masm(&cbuf);
3279    Register dst_reg = as_Register($dst$$reg);
3280    Register src1_reg = as_Register($src1$$reg);
3281    Register src2_reg = as_Register($src2$$reg);
3282     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3283   %}
3284 
3285   // compare instruction encodings
3286 
3287   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3288     MacroAssembler _masm(&cbuf);
3289     Register reg1 = as_Register($src1$$reg);
3290     Register reg2 = as_Register($src2$$reg);
3291     __ cmpw(reg1, reg2);
3292   %}
3293 
3294   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3295     MacroAssembler _masm(&cbuf);
3296     Register reg = as_Register($src1$$reg);
3297     int32_t val = $src2$$constant;
3298     if (val >= 0) {
3299       __ subsw(zr, reg, val);
3300     } else {
3301       __ addsw(zr, reg, -val);
3302     }
3303   %}
3304 
3305   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3306     MacroAssembler _masm(&cbuf);
3307     Register reg1 = as_Register($src1$$reg);
3308     u_int32_t val = (u_int32_t)$src2$$constant;
3309     __ movw(rscratch1, val);
3310     __ cmpw(reg1, rscratch1);
3311   %}
3312 
3313   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3314     MacroAssembler _masm(&cbuf);
3315     Register reg1 = as_Register($src1$$reg);
3316     Register reg2 = as_Register($src2$$reg);
3317     __ cmp(reg1, reg2);
3318   %}
3319 
3320   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3321     MacroAssembler _masm(&cbuf);
3322     Register reg = as_Register($src1$$reg);
3323     int64_t val = $src2$$constant;
3324     if (val >= 0) {
3325       __ subs(zr, reg, val);
3326     } else if (val != -val) {
3327       __ adds(zr, reg, -val);
3328     } else {
3329     // aargh, Long.MIN_VALUE is a special case
3330       __ orr(rscratch1, zr, (u_int64_t)val);
3331       __ subs(zr, reg, rscratch1);
3332     }
3333   %}
3334 
3335   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3336     MacroAssembler _masm(&cbuf);
3337     Register reg1 = as_Register($src1$$reg);
3338     u_int64_t val = (u_int64_t)$src2$$constant;
3339     __ mov(rscratch1, val);
3340     __ cmp(reg1, rscratch1);
3341   %}
3342 
3343   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3344     MacroAssembler _masm(&cbuf);
3345     Register reg1 = as_Register($src1$$reg);
3346     Register reg2 = as_Register($src2$$reg);
3347     __ cmp(reg1, reg2);
3348   %}
3349 
3350   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3351     MacroAssembler _masm(&cbuf);
3352     Register reg1 = as_Register($src1$$reg);
3353     Register reg2 = as_Register($src2$$reg);
3354     __ cmpw(reg1, reg2);
3355   %}
3356 
3357   enc_class aarch64_enc_testp(iRegP src) %{
3358     MacroAssembler _masm(&cbuf);
3359     Register reg = as_Register($src$$reg);
3360     __ cmp(reg, zr);
3361   %}
3362 
3363   enc_class aarch64_enc_testn(iRegN src) %{
3364     MacroAssembler _masm(&cbuf);
3365     Register reg = as_Register($src$$reg);
3366     __ cmpw(reg, zr);
3367   %}
3368 
3369   enc_class aarch64_enc_b(label lbl) %{
3370     MacroAssembler _masm(&cbuf);
3371     Label *L = $lbl$$label;
3372     __ b(*L);
3373   %}
3374 
3375   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3376     MacroAssembler _masm(&cbuf);
3377     Label *L = $lbl$$label;
3378     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3379   %}
3380 
3381   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3382     MacroAssembler _masm(&cbuf);
3383     Label *L = $lbl$$label;
3384     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3385   %}
3386 
3387   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3388   %{
3389      Register sub_reg = as_Register($sub$$reg);
3390      Register super_reg = as_Register($super$$reg);
3391      Register temp_reg = as_Register($temp$$reg);
3392      Register result_reg = as_Register($result$$reg);
3393 
3394      Label miss;
3395      MacroAssembler _masm(&cbuf);
3396      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3397                                      NULL, &miss,
3398                                      /*set_cond_codes:*/ true);
3399      if ($primary) {
3400        __ mov(result_reg, zr);
3401      }
3402      __ bind(miss);
3403   %}
3404 
3405   enc_class aarch64_enc_java_static_call(method meth) %{
3406     MacroAssembler _masm(&cbuf);
3407 
3408     address addr = (address)$meth$$method;
3409     address call;
3410     if (!_method) {
3411       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3412       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3413     } else {
3414       int method_index = resolved_method_index(cbuf);
3415       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3416                                                   : static_call_Relocation::spec(method_index);
3417       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3418 
3419       // Emit stub for static call
3420       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3421       if (stub == NULL) {
3422         ciEnv::current()->record_failure("CodeCache is full");
3423         return;
3424       }
3425     }
3426     if (call == NULL) {
3427       ciEnv::current()->record_failure("CodeCache is full");
3428       return;
3429     }
3430   %}
3431 
3432   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3433     MacroAssembler _masm(&cbuf);
3434     int method_index = resolved_method_index(cbuf);
3435     address call = __ ic_call((address)$meth$$method, method_index);
3436     if (call == NULL) {
3437       ciEnv::current()->record_failure("CodeCache is full");
3438       return;
3439     }
3440   %}
3441 
3442   enc_class aarch64_enc_call_epilog() %{
3443     MacroAssembler _masm(&cbuf);
3444     if (VerifyStackAtCalls) {
3445       // Check that stack depth is unchanged: find majik cookie on stack
3446       __ call_Unimplemented();
3447     }
3448   %}
3449 
3450   enc_class aarch64_enc_java_to_runtime(method meth) %{
3451     MacroAssembler _masm(&cbuf);
3452 
3453     // some calls to generated routines (arraycopy code) are scheduled
3454     // by C2 as runtime calls. if so we can call them using a br (they
3455     // will be in a reachable segment) otherwise we have to use a blr
3456     // which loads the absolute address into a register.
3457     address entry = (address)$meth$$method;
3458     CodeBlob *cb = CodeCache::find_blob(entry);
3459     if (cb) {
3460       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3461       if (call == NULL) {
3462         ciEnv::current()->record_failure("CodeCache is full");
3463         return;
3464       }
3465     } else {
3466       Label retaddr;
3467       __ adr(rscratch2, retaddr);
3468       __ lea(rscratch1, RuntimeAddress(entry));
3469       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3470       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3471       __ blr(rscratch1);
3472       __ bind(retaddr);
3473       __ add(sp, sp, 2 * wordSize);
3474     }
3475   %}
3476 
3477   enc_class aarch64_enc_rethrow() %{
3478     MacroAssembler _masm(&cbuf);
3479     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3480   %}
3481 
3482   enc_class aarch64_enc_ret() %{
3483     MacroAssembler _masm(&cbuf);
3484     __ ret(lr);
3485   %}
3486 
3487   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3488     MacroAssembler _masm(&cbuf);
3489     Register target_reg = as_Register($jump_target$$reg);
3490     __ br(target_reg);
3491   %}
3492 
3493   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3494     MacroAssembler _masm(&cbuf);
3495     Register target_reg = as_Register($jump_target$$reg);
3496     // exception oop should be in r0
3497     // ret addr has been popped into lr
3498     // callee expects it in r3
3499     __ mov(r3, lr);
3500     __ br(target_reg);
3501   %}
3502 
3503   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3504     MacroAssembler _masm(&cbuf);
3505     Register oop = as_Register($object$$reg);
3506     Register box = as_Register($box$$reg);
3507     Register disp_hdr = as_Register($tmp$$reg);
3508     Register tmp = as_Register($tmp2$$reg);
3509     Label cont;
3510     Label object_has_monitor;
3511     Label cas_failed;
3512 
3513     assert_different_registers(oop, box, tmp, disp_hdr);
3514 
3515     // Load markWord from object into displaced_header.
3516     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3517 
3518     if (UseBiasedLocking && !UseOptoBiasInlining) {
3519       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3520     }
3521 
3522     // Check for existing monitor
3523     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3524 
3525     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3526     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3527 
3528     // Initialize the box. (Must happen before we update the object mark!)
3529     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3530 
3531     // Compare object markWord with an unlocked value (tmp) and if
3532     // equal exchange the stack address of our box with object markWord.
3533     // On failure disp_hdr contains the possibly locked markWord.
3534     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3535                /*release*/ true, /*weak*/ false, disp_hdr);
3536     __ br(Assembler::EQ, cont);
3537 
3538     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3539 
3540     // If the compare-and-exchange succeeded, then we found an unlocked
3541     // object, will have now locked it will continue at label cont
3542 
3543     __ bind(cas_failed);
3544     // We did not see an unlocked object so try the fast recursive case.
3545 
3546     // Check if the owner is self by comparing the value in the
3547     // markWord of object (disp_hdr) with the stack pointer.
3548     __ mov(rscratch1, sp);
3549     __ sub(disp_hdr, disp_hdr, rscratch1);
3550     __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
3551     // If condition is true we are cont and hence we can store 0 as the
3552     // displaced header in the box, which indicates that it is a recursive lock.
3553     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3554     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3555 
3556     __ b(cont);
3557 
3558     // Handle existing monitor.
3559     __ bind(object_has_monitor);
3560 
3561     // The object's monitor m is unlocked iff m->owner == NULL,
3562     // otherwise m->owner may contain a thread or a stack address.
3563     //
3564     // Try to CAS m->owner from NULL to current thread.
3565     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3566     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3567                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3568 
3569     // Store a non-null value into the box to avoid looking like a re-entrant
3570     // lock. The fast-path monitor unlock code checks for
3571     // markWord::monitor_value so use markWord::unused_mark which has the
3572     // relevant bit set, and also matches ObjectSynchronizer::enter.
3573     __ mov(tmp, (address)markWord::unused_mark().value());
3574     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3575 
3576     __ bind(cont);
3577     // flag == EQ indicates success
3578     // flag == NE indicates failure
3579   %}
3580 
3581   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3582     MacroAssembler _masm(&cbuf);
3583     Register oop = as_Register($object$$reg);
3584     Register box = as_Register($box$$reg);
3585     Register disp_hdr = as_Register($tmp$$reg);
3586     Register tmp = as_Register($tmp2$$reg);
3587     Label cont;
3588     Label object_has_monitor;
3589 
3590     assert_different_registers(oop, box, tmp, disp_hdr);
3591 
3592     if (UseBiasedLocking && !UseOptoBiasInlining) {
3593       __ biased_locking_exit(oop, tmp, cont);
3594     }
3595 
3596     // Find the lock address and load the displaced header from the stack.
3597     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3598 
3599     // If the displaced header is 0, we have a recursive unlock.
3600     __ cmp(disp_hdr, zr);
3601     __ br(Assembler::EQ, cont);
3602 
3603     // Handle existing monitor.
3604     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3605     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3606 
3607     // Check if it is still a light weight lock, this is is true if we
3608     // see the stack address of the basicLock in the markWord of the
3609     // object.
3610 
3611     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3612                /*release*/ true, /*weak*/ false, tmp);
3613     __ b(cont);
3614 
3615     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3616 
3617     // Handle existing monitor.
3618     __ bind(object_has_monitor);
3619     __ add(tmp, tmp, -markWord::monitor_value); // monitor
3620     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3621     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3622     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3623     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3624     __ cmp(rscratch1, zr); // Sets flags for result
3625     __ br(Assembler::NE, cont);
3626 
3627     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3628     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3629     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3630     __ cmp(rscratch1, zr); // Sets flags for result
3631     __ cbnz(rscratch1, cont);
3632     // need a release store here
3633     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3634     __ stlr(zr, tmp); // set unowned
3635 
3636     __ bind(cont);
3637     // flag == EQ indicates success
3638     // flag == NE indicates failure
3639   %}
3640 
3641 %}
3642 
3643 //----------FRAME--------------------------------------------------------------
3644 // Definition of frame structure and management information.
3645 //
3646 //  S T A C K   L A Y O U T    Allocators stack-slot number
3647 //                             |   (to get allocators register number
3648 //  G  Owned by    |        |  v    add OptoReg::stack0())
3649 //  r   CALLER     |        |
3650 //  o     |        +--------+      pad to even-align allocators stack-slot
3651 //  w     V        |  pad0  |        numbers; owned by CALLER
3652 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3653 //  h     ^        |   in   |  5
3654 //        |        |  args  |  4   Holes in incoming args owned by SELF
3655 //  |     |        |        |  3
3656 //  |     |        +--------+
3657 //  V     |        | old out|      Empty on Intel, window on Sparc
3658 //        |    old |preserve|      Must be even aligned.
3659 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3660 //        |        |   in   |  3   area for Intel ret address
3661 //     Owned by    |preserve|      Empty on Sparc.
3662 //       SELF      +--------+
3663 //        |        |  pad2  |  2   pad to align old SP
3664 //        |        +--------+  1
3665 //        |        | locks  |  0
3666 //        |        +--------+----> OptoReg::stack0(), even aligned
3667 //        |        |  pad1  | 11   pad to align new SP
3668 //        |        +--------+
3669 //        |        |        | 10
3670 //        |        | spills |  9   spills
3671 //        V        |        |  8   (pad0 slot for callee)
3672 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3673 //        ^        |  out   |  7
3674 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3675 //     Owned by    +--------+
3676 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3677 //        |    new |preserve|      Must be even-aligned.
3678 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3679 //        |        |        |
3680 //
3681 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3682 //         known from SELF's arguments and the Java calling convention.
3683 //         Region 6-7 is determined per call site.
3684 // Note 2: If the calling convention leaves holes in the incoming argument
3685 //         area, those holes are owned by SELF.  Holes in the outgoing area
3686 //         are owned by the CALLEE.  Holes should not be nessecary in the
3687 //         incoming area, as the Java calling convention is completely under
3688 //         the control of the AD file.  Doubles can be sorted and packed to
3689 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3690 //         varargs C calling conventions.
3691 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3692 //         even aligned with pad0 as needed.
3693 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3694 //           (the latter is true on Intel but is it false on AArch64?)
3695 //         region 6-11 is even aligned; it may be padded out more so that
3696 //         the region from SP to FP meets the minimum stack alignment.
3697 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3698 //         alignment.  Region 11, pad1, may be dynamically extended so that
3699 //         SP meets the minimum alignment.
3700 
3701 frame %{
3702   // What direction does stack grow in (assumed to be same for C & Java)
3703   stack_direction(TOWARDS_LOW);
3704 
3705   // These three registers define part of the calling convention
3706   // between compiled code and the interpreter.
3707 
3708   // Inline Cache Register or methodOop for I2C.
3709   inline_cache_reg(R12);
3710 
3711   // Method Oop Register when calling interpreter.
3712   interpreter_method_oop_reg(R12);
3713 
3714   // Number of stack slots consumed by locking an object
3715   sync_stack_slots(2);
3716 
3717   // Compiled code's Frame Pointer
3718   frame_pointer(R31);
3719 
3720   // Interpreter stores its frame pointer in a register which is
3721   // stored to the stack by I2CAdaptors.
3722   // I2CAdaptors convert from interpreted java to compiled java.
3723   interpreter_frame_pointer(R29);
3724 
3725   // Stack alignment requirement
3726   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3727 
3728   // Number of stack slots between incoming argument block and the start of
3729   // a new frame.  The PROLOG must add this many slots to the stack.  The
3730   // EPILOG must remove this many slots. aarch64 needs two slots for
3731   // return address and fp.
3732   // TODO think this is correct but check
3733   in_preserve_stack_slots(4);
3734 
3735   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3736   // for calls to C.  Supports the var-args backing area for register parms.
3737   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3738 
3739   // The after-PROLOG location of the return address.  Location of
3740   // return address specifies a type (REG or STACK) and a number
3741   // representing the register number (i.e. - use a register name) or
3742   // stack slot.
3743   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3744   // Otherwise, it is above the locks and verification slot and alignment word
3745   // TODO this may well be correct but need to check why that - 2 is there
3746   // ppc port uses 0 but we definitely need to allow for fixed_slots
3747   // which folds in the space used for monitors
3748   return_addr(STACK - 2 +
3749               align_up((Compile::current()->in_preserve_stack_slots() +
3750                         Compile::current()->fixed_slots()),
3751                        stack_alignment_in_slots()));
3752 
3753   // Body of function which returns an integer array locating
3754   // arguments either in registers or in stack slots.  Passed an array
3755   // of ideal registers called "sig" and a "length" count.  Stack-slot
3756   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3757   // arguments for a CALLEE.  Incoming stack arguments are
3758   // automatically biased by the preserve_stack_slots field above.
3759 
3760   calling_convention
3761   %{
3762     // No difference between ingoing/outgoing just pass false
3763     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3764   %}
3765 
3766   c_calling_convention
3767   %{
3768     // This is obviously always outgoing
3769     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3770   %}
3771 
3772   // Location of compiled Java return values.  Same as C for now.
3773   return_value
3774   %{
3775     // TODO do we allow ideal_reg == Op_RegN???
3776     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3777            "only return normal values");
3778 
3779     static const int lo[Op_RegL + 1] = { // enum name
3780       0,                                 // Op_Node
3781       0,                                 // Op_Set
3782       R0_num,                            // Op_RegN
3783       R0_num,                            // Op_RegI
3784       R0_num,                            // Op_RegP
3785       V0_num,                            // Op_RegF
3786       V0_num,                            // Op_RegD
3787       R0_num                             // Op_RegL
3788     };
3789 
3790     static const int hi[Op_RegL + 1] = { // enum name
3791       0,                                 // Op_Node
3792       0,                                 // Op_Set
3793       OptoReg::Bad,                       // Op_RegN
3794       OptoReg::Bad,                      // Op_RegI
3795       R0_H_num,                          // Op_RegP
3796       OptoReg::Bad,                      // Op_RegF
3797       V0_H_num,                          // Op_RegD
3798       R0_H_num                           // Op_RegL
3799     };
3800 
3801     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3802   %}
3803 %}
3804 
3805 //----------ATTRIBUTES---------------------------------------------------------
3806 //----------Operand Attributes-------------------------------------------------
3807 op_attrib op_cost(1);        // Required cost attribute
3808 
3809 //----------Instruction Attributes---------------------------------------------
3810 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3811 ins_attrib ins_size(32);        // Required size attribute (in bits)
3812 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3813                                 // a non-matching short branch variant
3814                                 // of some long branch?
3815 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3816                                 // be a power of 2) specifies the
3817                                 // alignment that some part of the
3818                                 // instruction (not necessarily the
3819                                 // start) requires.  If > 1, a
3820                                 // compute_padding() function must be
3821                                 // provided for the instruction
3822 
3823 //----------OPERANDS-----------------------------------------------------------
3824 // Operand definitions must precede instruction definitions for correct parsing
3825 // in the ADLC because operands constitute user defined types which are used in
3826 // instruction definitions.
3827 
3828 //----------Simple Operands----------------------------------------------------
3829 
3830 // Integer operands 32 bit
3831 // 32 bit immediate
3832 operand immI()
3833 %{
3834   match(ConI);
3835 
3836   op_cost(0);
3837   format %{ %}
3838   interface(CONST_INTER);
3839 %}
3840 
3841 // 32 bit zero
3842 operand immI0()
3843 %{
3844   predicate(n->get_int() == 0);
3845   match(ConI);
3846 
3847   op_cost(0);
3848   format %{ %}
3849   interface(CONST_INTER);
3850 %}
3851 
3852 // 32 bit unit increment
3853 operand immI_1()
3854 %{
3855   predicate(n->get_int() == 1);
3856   match(ConI);
3857 
3858   op_cost(0);
3859   format %{ %}
3860   interface(CONST_INTER);
3861 %}
3862 
3863 // 32 bit unit decrement
3864 operand immI_M1()
3865 %{
3866   predicate(n->get_int() == -1);
3867   match(ConI);
3868 
3869   op_cost(0);
3870   format %{ %}
3871   interface(CONST_INTER);
3872 %}
3873 
3874 // Shift values for add/sub extension shift
3875 operand immIExt()
3876 %{
3877   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3878   match(ConI);
3879 
3880   op_cost(0);
3881   format %{ %}
3882   interface(CONST_INTER);
3883 %}
3884 
3885 operand immI_le_4()
3886 %{
3887   predicate(n->get_int() <= 4);
3888   match(ConI);
3889 
3890   op_cost(0);
3891   format %{ %}
3892   interface(CONST_INTER);
3893 %}
3894 
3895 operand immI_31()
3896 %{
3897   predicate(n->get_int() == 31);
3898   match(ConI);
3899 
3900   op_cost(0);
3901   format %{ %}
3902   interface(CONST_INTER);
3903 %}
3904 
3905 operand immI_8()
3906 %{
3907   predicate(n->get_int() == 8);
3908   match(ConI);
3909 
3910   op_cost(0);
3911   format %{ %}
3912   interface(CONST_INTER);
3913 %}
3914 
3915 operand immI_16()
3916 %{
3917   predicate(n->get_int() == 16);
3918   match(ConI);
3919 
3920   op_cost(0);
3921   format %{ %}
3922   interface(CONST_INTER);
3923 %}
3924 
3925 operand immI_24()
3926 %{
3927   predicate(n->get_int() == 24);
3928   match(ConI);
3929 
3930   op_cost(0);
3931   format %{ %}
3932   interface(CONST_INTER);
3933 %}
3934 
3935 operand immI_32()
3936 %{
3937   predicate(n->get_int() == 32);
3938   match(ConI);
3939 
3940   op_cost(0);
3941   format %{ %}
3942   interface(CONST_INTER);
3943 %}
3944 
3945 operand immI_48()
3946 %{
3947   predicate(n->get_int() == 48);
3948   match(ConI);
3949 
3950   op_cost(0);
3951   format %{ %}
3952   interface(CONST_INTER);
3953 %}
3954 
3955 operand immI_56()
3956 %{
3957   predicate(n->get_int() == 56);
3958   match(ConI);
3959 
3960   op_cost(0);
3961   format %{ %}
3962   interface(CONST_INTER);
3963 %}
3964 
3965 operand immI_63()
3966 %{
3967   predicate(n->get_int() == 63);
3968   match(ConI);
3969 
3970   op_cost(0);
3971   format %{ %}
3972   interface(CONST_INTER);
3973 %}
3974 
3975 operand immI_64()
3976 %{
3977   predicate(n->get_int() == 64);
3978   match(ConI);
3979 
3980   op_cost(0);
3981   format %{ %}
3982   interface(CONST_INTER);
3983 %}
3984 
3985 operand immI_255()
3986 %{
3987   predicate(n->get_int() == 255);
3988   match(ConI);
3989 
3990   op_cost(0);
3991   format %{ %}
3992   interface(CONST_INTER);
3993 %}
3994 
3995 operand immI_65535()
3996 %{
3997   predicate(n->get_int() == 65535);
3998   match(ConI);
3999 
4000   op_cost(0);
4001   format %{ %}
4002   interface(CONST_INTER);
4003 %}
4004 
4005 operand immL_255()
4006 %{
4007   predicate(n->get_long() == 255L);
4008   match(ConL);
4009 
4010   op_cost(0);
4011   format %{ %}
4012   interface(CONST_INTER);
4013 %}
4014 
4015 operand immL_65535()
4016 %{
4017   predicate(n->get_long() == 65535L);
4018   match(ConL);
4019 
4020   op_cost(0);
4021   format %{ %}
4022   interface(CONST_INTER);
4023 %}
4024 
4025 operand immL_4294967295()
4026 %{
4027   predicate(n->get_long() == 4294967295L);
4028   match(ConL);
4029 
4030   op_cost(0);
4031   format %{ %}
4032   interface(CONST_INTER);
4033 %}
4034 
4035 operand immL_bitmask()
4036 %{
4037   predicate((n->get_long() != 0)
4038             && ((n->get_long() & 0xc000000000000000l) == 0)
4039             && is_power_of_2(n->get_long() + 1));
4040   match(ConL);
4041 
4042   op_cost(0);
4043   format %{ %}
4044   interface(CONST_INTER);
4045 %}
4046 
4047 operand immI_bitmask()
4048 %{
4049   predicate((n->get_int() != 0)
4050             && ((n->get_int() & 0xc0000000) == 0)
4051             && is_power_of_2(n->get_int() + 1));
4052   match(ConI);
4053 
4054   op_cost(0);
4055   format %{ %}
4056   interface(CONST_INTER);
4057 %}
4058 
4059 // Scale values for scaled offset addressing modes (up to long but not quad)
4060 operand immIScale()
4061 %{
4062   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4063   match(ConI);
4064 
4065   op_cost(0);
4066   format %{ %}
4067   interface(CONST_INTER);
4068 %}
4069 
4070 // 26 bit signed offset -- for pc-relative branches
4071 operand immI26()
4072 %{
4073   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4074   match(ConI);
4075 
4076   op_cost(0);
4077   format %{ %}
4078   interface(CONST_INTER);
4079 %}
4080 
4081 // 19 bit signed offset -- for pc-relative loads
4082 operand immI19()
4083 %{
4084   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4085   match(ConI);
4086 
4087   op_cost(0);
4088   format %{ %}
4089   interface(CONST_INTER);
4090 %}
4091 
4092 // 12 bit unsigned offset -- for base plus immediate loads
4093 operand immIU12()
4094 %{
4095   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4096   match(ConI);
4097 
4098   op_cost(0);
4099   format %{ %}
4100   interface(CONST_INTER);
4101 %}
4102 
4103 operand immLU12()
4104 %{
4105   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4106   match(ConL);
4107 
4108   op_cost(0);
4109   format %{ %}
4110   interface(CONST_INTER);
4111 %}
4112 
4113 // Offset for scaled or unscaled immediate loads and stores
4114 operand immIOffset()
4115 %{
4116   predicate(Address::offset_ok_for_immed(n->get_int()));
4117   match(ConI);
4118 
4119   op_cost(0);
4120   format %{ %}
4121   interface(CONST_INTER);
4122 %}
4123 
4124 operand immIOffset4()
4125 %{
4126   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4127   match(ConI);
4128 
4129   op_cost(0);
4130   format %{ %}
4131   interface(CONST_INTER);
4132 %}
4133 
4134 operand immIOffset8()
4135 %{
4136   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4137   match(ConI);
4138 
4139   op_cost(0);
4140   format %{ %}
4141   interface(CONST_INTER);
4142 %}
4143 
4144 operand immIOffset16()
4145 %{
4146   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4147   match(ConI);
4148 
4149   op_cost(0);
4150   format %{ %}
4151   interface(CONST_INTER);
4152 %}
4153 
4154 operand immLoffset()
4155 %{
4156   predicate(Address::offset_ok_for_immed(n->get_long()));
4157   match(ConL);
4158 
4159   op_cost(0);
4160   format %{ %}
4161   interface(CONST_INTER);
4162 %}
4163 
4164 operand immLoffset4()
4165 %{
4166   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4167   match(ConL);
4168 
4169   op_cost(0);
4170   format %{ %}
4171   interface(CONST_INTER);
4172 %}
4173 
4174 operand immLoffset8()
4175 %{
4176   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4177   match(ConL);
4178 
4179   op_cost(0);
4180   format %{ %}
4181   interface(CONST_INTER);
4182 %}
4183 
4184 operand immLoffset16()
4185 %{
4186   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4187   match(ConL);
4188 
4189   op_cost(0);
4190   format %{ %}
4191   interface(CONST_INTER);
4192 %}
4193 
4194 // 32 bit integer valid for add sub immediate
4195 operand immIAddSub()
4196 %{
4197   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4198   match(ConI);
4199   op_cost(0);
4200   format %{ %}
4201   interface(CONST_INTER);
4202 %}
4203 
4204 // 32 bit unsigned integer valid for logical immediate
4205 // TODO -- check this is right when e.g the mask is 0x80000000
4206 operand immILog()
4207 %{
4208   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4209   match(ConI);
4210 
4211   op_cost(0);
4212   format %{ %}
4213   interface(CONST_INTER);
4214 %}
4215 
4216 // Integer operands 64 bit
4217 // 64 bit immediate
4218 operand immL()
4219 %{
4220   match(ConL);
4221 
4222   op_cost(0);
4223   format %{ %}
4224   interface(CONST_INTER);
4225 %}
4226 
4227 // 64 bit zero
4228 operand immL0()
4229 %{
4230   predicate(n->get_long() == 0);
4231   match(ConL);
4232 
4233   op_cost(0);
4234   format %{ %}
4235   interface(CONST_INTER);
4236 %}
4237 
4238 // 64 bit unit increment
4239 operand immL_1()
4240 %{
4241   predicate(n->get_long() == 1);
4242   match(ConL);
4243 
4244   op_cost(0);
4245   format %{ %}
4246   interface(CONST_INTER);
4247 %}
4248 
4249 // 64 bit unit decrement
4250 operand immL_M1()
4251 %{
4252   predicate(n->get_long() == -1);
4253   match(ConL);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // 32 bit offset of pc in thread anchor
4261 
4262 operand immL_pc_off()
4263 %{
4264   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4265                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4266   match(ConL);
4267 
4268   op_cost(0);
4269   format %{ %}
4270   interface(CONST_INTER);
4271 %}
4272 
4273 // 64 bit integer valid for add sub immediate
4274 operand immLAddSub()
4275 %{
4276   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4277   match(ConL);
4278   op_cost(0);
4279   format %{ %}
4280   interface(CONST_INTER);
4281 %}
4282 
4283 // 64 bit integer valid for logical immediate
4284 operand immLLog()
4285 %{
4286   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4287   match(ConL);
4288   op_cost(0);
4289   format %{ %}
4290   interface(CONST_INTER);
4291 %}
4292 
4293 // Long Immediate: low 32-bit mask
4294 operand immL_32bits()
4295 %{
4296   predicate(n->get_long() == 0xFFFFFFFFL);
4297   match(ConL);
4298   op_cost(0);
4299   format %{ %}
4300   interface(CONST_INTER);
4301 %}
4302 
4303 // Pointer operands
4304 // Pointer Immediate
4305 operand immP()
4306 %{
4307   match(ConP);
4308 
4309   op_cost(0);
4310   format %{ %}
4311   interface(CONST_INTER);
4312 %}
4313 
4314 // NULL Pointer Immediate
4315 operand immP0()
4316 %{
4317   predicate(n->get_ptr() == 0);
4318   match(ConP);
4319 
4320   op_cost(0);
4321   format %{ %}
4322   interface(CONST_INTER);
4323 %}
4324 
4325 // Pointer Immediate One
4326 // this is used in object initialization (initial object header)
4327 operand immP_1()
4328 %{
4329   predicate(n->get_ptr() == 1);
4330   match(ConP);
4331 
4332   op_cost(0);
4333   format %{ %}
4334   interface(CONST_INTER);
4335 %}
4336 
4337 // Polling Page Pointer Immediate
4338 operand immPollPage()
4339 %{
4340   predicate((address)n->get_ptr() == os::get_polling_page());
4341   match(ConP);
4342 
4343   op_cost(0);
4344   format %{ %}
4345   interface(CONST_INTER);
4346 %}
4347 
4348 // Card Table Byte Map Base
4349 operand immByteMapBase()
4350 %{
4351   // Get base of card map
4352   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4353             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4354   match(ConP);
4355 
4356   op_cost(0);
4357   format %{ %}
4358   interface(CONST_INTER);
4359 %}
4360 
4361 // Pointer Immediate Minus One
4362 // this is used when we want to write the current PC to the thread anchor
4363 operand immP_M1()
4364 %{
4365   predicate(n->get_ptr() == -1);
4366   match(ConP);
4367 
4368   op_cost(0);
4369   format %{ %}
4370   interface(CONST_INTER);
4371 %}
4372 
4373 // Pointer Immediate Minus Two
4374 // this is used when we want to write the current PC to the thread anchor
4375 operand immP_M2()
4376 %{
4377   predicate(n->get_ptr() == -2);
4378   match(ConP);
4379 
4380   op_cost(0);
4381   format %{ %}
4382   interface(CONST_INTER);
4383 %}
4384 
4385 // Float and Double operands
4386 // Double Immediate
4387 operand immD()
4388 %{
4389   match(ConD);
4390   op_cost(0);
4391   format %{ %}
4392   interface(CONST_INTER);
4393 %}
4394 
4395 // Double Immediate: +0.0d
4396 operand immD0()
4397 %{
4398   predicate(jlong_cast(n->getd()) == 0);
4399   match(ConD);
4400 
4401   op_cost(0);
4402   format %{ %}
4403   interface(CONST_INTER);
4404 %}
4405 
4406 // constant 'double +0.0'.
4407 operand immDPacked()
4408 %{
4409   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4410   match(ConD);
4411   op_cost(0);
4412   format %{ %}
4413   interface(CONST_INTER);
4414 %}
4415 
4416 // Float Immediate
4417 operand immF()
4418 %{
4419   match(ConF);
4420   op_cost(0);
4421   format %{ %}
4422   interface(CONST_INTER);
4423 %}
4424 
4425 // Float Immediate: +0.0f.
4426 operand immF0()
4427 %{
4428   predicate(jint_cast(n->getf()) == 0);
4429   match(ConF);
4430 
4431   op_cost(0);
4432   format %{ %}
4433   interface(CONST_INTER);
4434 %}
4435 
4436 //
4437 operand immFPacked()
4438 %{
4439   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4440   match(ConF);
4441   op_cost(0);
4442   format %{ %}
4443   interface(CONST_INTER);
4444 %}
4445 
4446 // Narrow pointer operands
4447 // Narrow Pointer Immediate
4448 operand immN()
4449 %{
4450   match(ConN);
4451 
4452   op_cost(0);
4453   format %{ %}
4454   interface(CONST_INTER);
4455 %}
4456 
4457 // Narrow NULL Pointer Immediate
4458 operand immN0()
4459 %{
4460   predicate(n->get_narrowcon() == 0);
4461   match(ConN);
4462 
4463   op_cost(0);
4464   format %{ %}
4465   interface(CONST_INTER);
4466 %}
4467 
4468 operand immNKlass()
4469 %{
4470   match(ConNKlass);
4471 
4472   op_cost(0);
4473   format %{ %}
4474   interface(CONST_INTER);
4475 %}
4476 
4477 // Integer 32 bit Register Operands
4478 // Integer 32 bitRegister (excludes SP)
4479 operand iRegI()
4480 %{
4481   constraint(ALLOC_IN_RC(any_reg32));
4482   match(RegI);
4483   match(iRegINoSp);
4484   op_cost(0);
4485   format %{ %}
4486   interface(REG_INTER);
4487 %}
4488 
4489 // Integer 32 bit Register not Special
4490 operand iRegINoSp()
4491 %{
4492   constraint(ALLOC_IN_RC(no_special_reg32));
4493   match(RegI);
4494   op_cost(0);
4495   format %{ %}
4496   interface(REG_INTER);
4497 %}
4498 
4499 // Integer 64 bit Register Operands
4500 // Integer 64 bit Register (includes SP)
4501 operand iRegL()
4502 %{
4503   constraint(ALLOC_IN_RC(any_reg));
4504   match(RegL);
4505   match(iRegLNoSp);
4506   op_cost(0);
4507   format %{ %}
4508   interface(REG_INTER);
4509 %}
4510 
4511 // Integer 64 bit Register not Special
4512 operand iRegLNoSp()
4513 %{
4514   constraint(ALLOC_IN_RC(no_special_reg));
4515   match(RegL);
4516   match(iRegL_R0);
4517   format %{ %}
4518   interface(REG_INTER);
4519 %}
4520 
4521 // Pointer Register Operands
4522 // Pointer Register
4523 operand iRegP()
4524 %{
4525   constraint(ALLOC_IN_RC(ptr_reg));
4526   match(RegP);
4527   match(iRegPNoSp);
4528   match(iRegP_R0);
4529   //match(iRegP_R2);
4530   //match(iRegP_R4);
4531   //match(iRegP_R5);
4532   match(thread_RegP);
4533   op_cost(0);
4534   format %{ %}
4535   interface(REG_INTER);
4536 %}
4537 
4538 // Pointer 64 bit Register not Special
4539 operand iRegPNoSp()
4540 %{
4541   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4542   match(RegP);
4543   // match(iRegP);
4544   // match(iRegP_R0);
4545   // match(iRegP_R2);
4546   // match(iRegP_R4);
4547   // match(iRegP_R5);
4548   // match(thread_RegP);
4549   op_cost(0);
4550   format %{ %}
4551   interface(REG_INTER);
4552 %}
4553 
4554 // Pointer 64 bit Register R0 only
4555 operand iRegP_R0()
4556 %{
4557   constraint(ALLOC_IN_RC(r0_reg));
4558   match(RegP);
4559   // match(iRegP);
4560   match(iRegPNoSp);
4561   op_cost(0);
4562   format %{ %}
4563   interface(REG_INTER);
4564 %}
4565 
4566 // Pointer 64 bit Register R1 only
4567 operand iRegP_R1()
4568 %{
4569   constraint(ALLOC_IN_RC(r1_reg));
4570   match(RegP);
4571   // match(iRegP);
4572   match(iRegPNoSp);
4573   op_cost(0);
4574   format %{ %}
4575   interface(REG_INTER);
4576 %}
4577 
4578 // Pointer 64 bit Register R2 only
4579 operand iRegP_R2()
4580 %{
4581   constraint(ALLOC_IN_RC(r2_reg));
4582   match(RegP);
4583   // match(iRegP);
4584   match(iRegPNoSp);
4585   op_cost(0);
4586   format %{ %}
4587   interface(REG_INTER);
4588 %}
4589 
4590 // Pointer 64 bit Register R3 only
4591 operand iRegP_R3()
4592 %{
4593   constraint(ALLOC_IN_RC(r3_reg));
4594   match(RegP);
4595   // match(iRegP);
4596   match(iRegPNoSp);
4597   op_cost(0);
4598   format %{ %}
4599   interface(REG_INTER);
4600 %}
4601 
4602 // Pointer 64 bit Register R4 only
4603 operand iRegP_R4()
4604 %{
4605   constraint(ALLOC_IN_RC(r4_reg));
4606   match(RegP);
4607   // match(iRegP);
4608   match(iRegPNoSp);
4609   op_cost(0);
4610   format %{ %}
4611   interface(REG_INTER);
4612 %}
4613 
4614 // Pointer 64 bit Register R5 only
4615 operand iRegP_R5()
4616 %{
4617   constraint(ALLOC_IN_RC(r5_reg));
4618   match(RegP);
4619   // match(iRegP);
4620   match(iRegPNoSp);
4621   op_cost(0);
4622   format %{ %}
4623   interface(REG_INTER);
4624 %}
4625 
4626 // Pointer 64 bit Register R10 only
4627 operand iRegP_R10()
4628 %{
4629   constraint(ALLOC_IN_RC(r10_reg));
4630   match(RegP);
4631   // match(iRegP);
4632   match(iRegPNoSp);
4633   op_cost(0);
4634   format %{ %}
4635   interface(REG_INTER);
4636 %}
4637 
4638 // Long 64 bit Register R0 only
4639 operand iRegL_R0()
4640 %{
4641   constraint(ALLOC_IN_RC(r0_reg));
4642   match(RegL);
4643   match(iRegLNoSp);
4644   op_cost(0);
4645   format %{ %}
4646   interface(REG_INTER);
4647 %}
4648 
4649 // Long 64 bit Register R2 only
4650 operand iRegL_R2()
4651 %{
4652   constraint(ALLOC_IN_RC(r2_reg));
4653   match(RegL);
4654   match(iRegLNoSp);
4655   op_cost(0);
4656   format %{ %}
4657   interface(REG_INTER);
4658 %}
4659 
4660 // Long 64 bit Register R3 only
4661 operand iRegL_R3()
4662 %{
4663   constraint(ALLOC_IN_RC(r3_reg));
4664   match(RegL);
4665   match(iRegLNoSp);
4666   op_cost(0);
4667   format %{ %}
4668   interface(REG_INTER);
4669 %}
4670 
4671 // Long 64 bit Register R11 only
4672 operand iRegL_R11()
4673 %{
4674   constraint(ALLOC_IN_RC(r11_reg));
4675   match(RegL);
4676   match(iRegLNoSp);
4677   op_cost(0);
4678   format %{ %}
4679   interface(REG_INTER);
4680 %}
4681 
4682 // Pointer 64 bit Register FP only
4683 operand iRegP_FP()
4684 %{
4685   constraint(ALLOC_IN_RC(fp_reg));
4686   match(RegP);
4687   // match(iRegP);
4688   op_cost(0);
4689   format %{ %}
4690   interface(REG_INTER);
4691 %}
4692 
4693 // Register R0 only
4694 operand iRegI_R0()
4695 %{
4696   constraint(ALLOC_IN_RC(int_r0_reg));
4697   match(RegI);
4698   match(iRegINoSp);
4699   op_cost(0);
4700   format %{ %}
4701   interface(REG_INTER);
4702 %}
4703 
4704 // Register R2 only
4705 operand iRegI_R2()
4706 %{
4707   constraint(ALLOC_IN_RC(int_r2_reg));
4708   match(RegI);
4709   match(iRegINoSp);
4710   op_cost(0);
4711   format %{ %}
4712   interface(REG_INTER);
4713 %}
4714 
4715 // Register R3 only
4716 operand iRegI_R3()
4717 %{
4718   constraint(ALLOC_IN_RC(int_r3_reg));
4719   match(RegI);
4720   match(iRegINoSp);
4721   op_cost(0);
4722   format %{ %}
4723   interface(REG_INTER);
4724 %}
4725 
4726 
4727 // Register R4 only
4728 operand iRegI_R4()
4729 %{
4730   constraint(ALLOC_IN_RC(int_r4_reg));
4731   match(RegI);
4732   match(iRegINoSp);
4733   op_cost(0);
4734   format %{ %}
4735   interface(REG_INTER);
4736 %}
4737 
4738 
4739 // Pointer Register Operands
4740 // Narrow Pointer Register
4741 operand iRegN()
4742 %{
4743   constraint(ALLOC_IN_RC(any_reg32));
4744   match(RegN);
4745   match(iRegNNoSp);
4746   op_cost(0);
4747   format %{ %}
4748   interface(REG_INTER);
4749 %}
4750 
4751 operand iRegN_R0()
4752 %{
4753   constraint(ALLOC_IN_RC(r0_reg));
4754   match(iRegN);
4755   op_cost(0);
4756   format %{ %}
4757   interface(REG_INTER);
4758 %}
4759 
4760 operand iRegN_R2()
4761 %{
4762   constraint(ALLOC_IN_RC(r2_reg));
4763   match(iRegN);
4764   op_cost(0);
4765   format %{ %}
4766   interface(REG_INTER);
4767 %}
4768 
4769 operand iRegN_R3()
4770 %{
4771   constraint(ALLOC_IN_RC(r3_reg));
4772   match(iRegN);
4773   op_cost(0);
4774   format %{ %}
4775   interface(REG_INTER);
4776 %}
4777 
4778 // Integer 64 bit Register not Special
4779 operand iRegNNoSp()
4780 %{
4781   constraint(ALLOC_IN_RC(no_special_reg32));
4782   match(RegN);
4783   op_cost(0);
4784   format %{ %}
4785   interface(REG_INTER);
4786 %}
4787 
4788 // heap base register -- used for encoding immN0
4789 
4790 operand iRegIHeapbase()
4791 %{
4792   constraint(ALLOC_IN_RC(heapbase_reg));
4793   match(RegI);
4794   op_cost(0);
4795   format %{ %}
4796   interface(REG_INTER);
4797 %}
4798 
4799 // Float Register
4800 // Float register operands
4801 operand vRegF()
4802 %{
4803   constraint(ALLOC_IN_RC(float_reg));
4804   match(RegF);
4805 
4806   op_cost(0);
4807   format %{ %}
4808   interface(REG_INTER);
4809 %}
4810 
4811 // Double Register
4812 // Double register operands
4813 operand vRegD()
4814 %{
4815   constraint(ALLOC_IN_RC(double_reg));
4816   match(RegD);
4817 
4818   op_cost(0);
4819   format %{ %}
4820   interface(REG_INTER);
4821 %}
4822 
4823 operand vecD()
4824 %{
4825   constraint(ALLOC_IN_RC(vectord_reg));
4826   match(VecD);
4827 
4828   op_cost(0);
4829   format %{ %}
4830   interface(REG_INTER);
4831 %}
4832 
4833 operand vecX()
4834 %{
4835   constraint(ALLOC_IN_RC(vectorx_reg));
4836   match(VecX);
4837 
4838   op_cost(0);
4839   format %{ %}
4840   interface(REG_INTER);
4841 %}
4842 
4843 operand vRegD_V0()
4844 %{
4845   constraint(ALLOC_IN_RC(v0_reg));
4846   match(RegD);
4847   op_cost(0);
4848   format %{ %}
4849   interface(REG_INTER);
4850 %}
4851 
4852 operand vRegD_V1()
4853 %{
4854   constraint(ALLOC_IN_RC(v1_reg));
4855   match(RegD);
4856   op_cost(0);
4857   format %{ %}
4858   interface(REG_INTER);
4859 %}
4860 
4861 operand vRegD_V2()
4862 %{
4863   constraint(ALLOC_IN_RC(v2_reg));
4864   match(RegD);
4865   op_cost(0);
4866   format %{ %}
4867   interface(REG_INTER);
4868 %}
4869 
4870 operand vRegD_V3()
4871 %{
4872   constraint(ALLOC_IN_RC(v3_reg));
4873   match(RegD);
4874   op_cost(0);
4875   format %{ %}
4876   interface(REG_INTER);
4877 %}
4878 
4879 operand vRegD_V4()
4880 %{
4881   constraint(ALLOC_IN_RC(v4_reg));
4882   match(RegD);
4883   op_cost(0);
4884   format %{ %}
4885   interface(REG_INTER);
4886 %}
4887 
4888 operand vRegD_V5()
4889 %{
4890   constraint(ALLOC_IN_RC(v5_reg));
4891   match(RegD);
4892   op_cost(0);
4893   format %{ %}
4894   interface(REG_INTER);
4895 %}
4896 
4897 operand vRegD_V6()
4898 %{
4899   constraint(ALLOC_IN_RC(v6_reg));
4900   match(RegD);
4901   op_cost(0);
4902   format %{ %}
4903   interface(REG_INTER);
4904 %}
4905 
4906 operand vRegD_V7()
4907 %{
4908   constraint(ALLOC_IN_RC(v7_reg));
4909   match(RegD);
4910   op_cost(0);
4911   format %{ %}
4912   interface(REG_INTER);
4913 %}
4914 
4915 operand vRegD_V8()
4916 %{
4917   constraint(ALLOC_IN_RC(v8_reg));
4918   match(RegD);
4919   op_cost(0);
4920   format %{ %}
4921   interface(REG_INTER);
4922 %}
4923 
4924 operand vRegD_V9()
4925 %{
4926   constraint(ALLOC_IN_RC(v9_reg));
4927   match(RegD);
4928   op_cost(0);
4929   format %{ %}
4930   interface(REG_INTER);
4931 %}
4932 
4933 operand vRegD_V10()
4934 %{
4935   constraint(ALLOC_IN_RC(v10_reg));
4936   match(RegD);
4937   op_cost(0);
4938   format %{ %}
4939   interface(REG_INTER);
4940 %}
4941 
4942 operand vRegD_V11()
4943 %{
4944   constraint(ALLOC_IN_RC(v11_reg));
4945   match(RegD);
4946   op_cost(0);
4947   format %{ %}
4948   interface(REG_INTER);
4949 %}
4950 
4951 operand vRegD_V12()
4952 %{
4953   constraint(ALLOC_IN_RC(v12_reg));
4954   match(RegD);
4955   op_cost(0);
4956   format %{ %}
4957   interface(REG_INTER);
4958 %}
4959 
4960 operand vRegD_V13()
4961 %{
4962   constraint(ALLOC_IN_RC(v13_reg));
4963   match(RegD);
4964   op_cost(0);
4965   format %{ %}
4966   interface(REG_INTER);
4967 %}
4968 
4969 operand vRegD_V14()
4970 %{
4971   constraint(ALLOC_IN_RC(v14_reg));
4972   match(RegD);
4973   op_cost(0);
4974   format %{ %}
4975   interface(REG_INTER);
4976 %}
4977 
4978 operand vRegD_V15()
4979 %{
4980   constraint(ALLOC_IN_RC(v15_reg));
4981   match(RegD);
4982   op_cost(0);
4983   format %{ %}
4984   interface(REG_INTER);
4985 %}
4986 
4987 operand vRegD_V16()
4988 %{
4989   constraint(ALLOC_IN_RC(v16_reg));
4990   match(RegD);
4991   op_cost(0);
4992   format %{ %}
4993   interface(REG_INTER);
4994 %}
4995 
4996 operand vRegD_V17()
4997 %{
4998   constraint(ALLOC_IN_RC(v17_reg));
4999   match(RegD);
5000   op_cost(0);
5001   format %{ %}
5002   interface(REG_INTER);
5003 %}
5004 
5005 operand vRegD_V18()
5006 %{
5007   constraint(ALLOC_IN_RC(v18_reg));
5008   match(RegD);
5009   op_cost(0);
5010   format %{ %}
5011   interface(REG_INTER);
5012 %}
5013 
5014 operand vRegD_V19()
5015 %{
5016   constraint(ALLOC_IN_RC(v19_reg));
5017   match(RegD);
5018   op_cost(0);
5019   format %{ %}
5020   interface(REG_INTER);
5021 %}
5022 
5023 operand vRegD_V20()
5024 %{
5025   constraint(ALLOC_IN_RC(v20_reg));
5026   match(RegD);
5027   op_cost(0);
5028   format %{ %}
5029   interface(REG_INTER);
5030 %}
5031 
5032 operand vRegD_V21()
5033 %{
5034   constraint(ALLOC_IN_RC(v21_reg));
5035   match(RegD);
5036   op_cost(0);
5037   format %{ %}
5038   interface(REG_INTER);
5039 %}
5040 
5041 operand vRegD_V22()
5042 %{
5043   constraint(ALLOC_IN_RC(v22_reg));
5044   match(RegD);
5045   op_cost(0);
5046   format %{ %}
5047   interface(REG_INTER);
5048 %}
5049 
5050 operand vRegD_V23()
5051 %{
5052   constraint(ALLOC_IN_RC(v23_reg));
5053   match(RegD);
5054   op_cost(0);
5055   format %{ %}
5056   interface(REG_INTER);
5057 %}
5058 
5059 operand vRegD_V24()
5060 %{
5061   constraint(ALLOC_IN_RC(v24_reg));
5062   match(RegD);
5063   op_cost(0);
5064   format %{ %}
5065   interface(REG_INTER);
5066 %}
5067 
5068 operand vRegD_V25()
5069 %{
5070   constraint(ALLOC_IN_RC(v25_reg));
5071   match(RegD);
5072   op_cost(0);
5073   format %{ %}
5074   interface(REG_INTER);
5075 %}
5076 
5077 operand vRegD_V26()
5078 %{
5079   constraint(ALLOC_IN_RC(v26_reg));
5080   match(RegD);
5081   op_cost(0);
5082   format %{ %}
5083   interface(REG_INTER);
5084 %}
5085 
5086 operand vRegD_V27()
5087 %{
5088   constraint(ALLOC_IN_RC(v27_reg));
5089   match(RegD);
5090   op_cost(0);
5091   format %{ %}
5092   interface(REG_INTER);
5093 %}
5094 
5095 operand vRegD_V28()
5096 %{
5097   constraint(ALLOC_IN_RC(v28_reg));
5098   match(RegD);
5099   op_cost(0);
5100   format %{ %}
5101   interface(REG_INTER);
5102 %}
5103 
5104 operand vRegD_V29()
5105 %{
5106   constraint(ALLOC_IN_RC(v29_reg));
5107   match(RegD);
5108   op_cost(0);
5109   format %{ %}
5110   interface(REG_INTER);
5111 %}
5112 
5113 operand vRegD_V30()
5114 %{
5115   constraint(ALLOC_IN_RC(v30_reg));
5116   match(RegD);
5117   op_cost(0);
5118   format %{ %}
5119   interface(REG_INTER);
5120 %}
5121 
5122 operand vRegD_V31()
5123 %{
5124   constraint(ALLOC_IN_RC(v31_reg));
5125   match(RegD);
5126   op_cost(0);
5127   format %{ %}
5128   interface(REG_INTER);
5129 %}
5130 
5131 // Flags register, used as output of signed compare instructions
5132 
5133 // note that on AArch64 we also use this register as the output for
5134 // for floating point compare instructions (CmpF CmpD). this ensures
5135 // that ordered inequality tests use GT, GE, LT or LE none of which
5136 // pass through cases where the result is unordered i.e. one or both
5137 // inputs to the compare is a NaN. this means that the ideal code can
5138 // replace e.g. a GT with an LE and not end up capturing the NaN case
5139 // (where the comparison should always fail). EQ and NE tests are
5140 // always generated in ideal code so that unordered folds into the NE
5141 // case, matching the behaviour of AArch64 NE.
5142 //
5143 // This differs from x86 where the outputs of FP compares use a
5144 // special FP flags registers and where compares based on this
5145 // register are distinguished into ordered inequalities (cmpOpUCF) and
5146 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5147 // to explicitly handle the unordered case in branches. x86 also has
5148 // to include extra CMoveX rules to accept a cmpOpUCF input.
5149 
5150 operand rFlagsReg()
5151 %{
5152   constraint(ALLOC_IN_RC(int_flags));
5153   match(RegFlags);
5154 
5155   op_cost(0);
5156   format %{ "RFLAGS" %}
5157   interface(REG_INTER);
5158 %}
5159 
5160 // Flags register, used as output of unsigned compare instructions
5161 operand rFlagsRegU()
5162 %{
5163   constraint(ALLOC_IN_RC(int_flags));
5164   match(RegFlags);
5165 
5166   op_cost(0);
5167   format %{ "RFLAGSU" %}
5168   interface(REG_INTER);
5169 %}
5170 
5171 // Special Registers
5172 
5173 // Method Register
5174 operand inline_cache_RegP(iRegP reg)
5175 %{
5176   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5177   match(reg);
5178   match(iRegPNoSp);
5179   op_cost(0);
5180   format %{ %}
5181   interface(REG_INTER);
5182 %}
5183 
5184 operand interpreter_method_oop_RegP(iRegP reg)
5185 %{
5186   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5187   match(reg);
5188   match(iRegPNoSp);
5189   op_cost(0);
5190   format %{ %}
5191   interface(REG_INTER);
5192 %}
5193 
5194 // Thread Register
5195 operand thread_RegP(iRegP reg)
5196 %{
5197   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5198   match(reg);
5199   op_cost(0);
5200   format %{ %}
5201   interface(REG_INTER);
5202 %}
5203 
5204 operand lr_RegP(iRegP reg)
5205 %{
5206   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5207   match(reg);
5208   op_cost(0);
5209   format %{ %}
5210   interface(REG_INTER);
5211 %}
5212 
5213 //----------Memory Operands----------------------------------------------------
5214 
5215 operand indirect(iRegP reg)
5216 %{
5217   constraint(ALLOC_IN_RC(ptr_reg));
5218   match(reg);
5219   op_cost(0);
5220   format %{ "[$reg]" %}
5221   interface(MEMORY_INTER) %{
5222     base($reg);
5223     index(0xffffffff);
5224     scale(0x0);
5225     disp(0x0);
5226   %}
5227 %}
5228 
5229 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5230 %{
5231   constraint(ALLOC_IN_RC(ptr_reg));
5232   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5233   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5234   op_cost(0);
5235   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5236   interface(MEMORY_INTER) %{
5237     base($reg);
5238     index($ireg);
5239     scale($scale);
5240     disp(0x0);
5241   %}
5242 %}
5243 
5244 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5245 %{
5246   constraint(ALLOC_IN_RC(ptr_reg));
5247   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5248   match(AddP reg (LShiftL lreg scale));
5249   op_cost(0);
5250   format %{ "$reg, $lreg lsl($scale)" %}
5251   interface(MEMORY_INTER) %{
5252     base($reg);
5253     index($lreg);
5254     scale($scale);
5255     disp(0x0);
5256   %}
5257 %}
5258 
5259 operand indIndexI2L(iRegP reg, iRegI ireg)
5260 %{
5261   constraint(ALLOC_IN_RC(ptr_reg));
5262   match(AddP reg (ConvI2L ireg));
5263   op_cost(0);
5264   format %{ "$reg, $ireg, 0, I2L" %}
5265   interface(MEMORY_INTER) %{
5266     base($reg);
5267     index($ireg);
5268     scale(0x0);
5269     disp(0x0);
5270   %}
5271 %}
5272 
5273 operand indIndex(iRegP reg, iRegL lreg)
5274 %{
5275   constraint(ALLOC_IN_RC(ptr_reg));
5276   match(AddP reg lreg);
5277   op_cost(0);
5278   format %{ "$reg, $lreg" %}
5279   interface(MEMORY_INTER) %{
5280     base($reg);
5281     index($lreg);
5282     scale(0x0);
5283     disp(0x0);
5284   %}
5285 %}
5286 
5287 operand indOffI(iRegP reg, immIOffset off)
5288 %{
5289   constraint(ALLOC_IN_RC(ptr_reg));
5290   match(AddP reg off);
5291   op_cost(0);
5292   format %{ "[$reg, $off]" %}
5293   interface(MEMORY_INTER) %{
5294     base($reg);
5295     index(0xffffffff);
5296     scale(0x0);
5297     disp($off);
5298   %}
5299 %}
5300 
5301 operand indOffI4(iRegP reg, immIOffset4 off)
5302 %{
5303   constraint(ALLOC_IN_RC(ptr_reg));
5304   match(AddP reg off);
5305   op_cost(0);
5306   format %{ "[$reg, $off]" %}
5307   interface(MEMORY_INTER) %{
5308     base($reg);
5309     index(0xffffffff);
5310     scale(0x0);
5311     disp($off);
5312   %}
5313 %}
5314 
5315 operand indOffI8(iRegP reg, immIOffset8 off)
5316 %{
5317   constraint(ALLOC_IN_RC(ptr_reg));
5318   match(AddP reg off);
5319   op_cost(0);
5320   format %{ "[$reg, $off]" %}
5321   interface(MEMORY_INTER) %{
5322     base($reg);
5323     index(0xffffffff);
5324     scale(0x0);
5325     disp($off);
5326   %}
5327 %}
5328 
5329 operand indOffI16(iRegP reg, immIOffset16 off)
5330 %{
5331   constraint(ALLOC_IN_RC(ptr_reg));
5332   match(AddP reg off);
5333   op_cost(0);
5334   format %{ "[$reg, $off]" %}
5335   interface(MEMORY_INTER) %{
5336     base($reg);
5337     index(0xffffffff);
5338     scale(0x0);
5339     disp($off);
5340   %}
5341 %}
5342 
5343 operand indOffL(iRegP reg, immLoffset off)
5344 %{
5345   constraint(ALLOC_IN_RC(ptr_reg));
5346   match(AddP reg off);
5347   op_cost(0);
5348   format %{ "[$reg, $off]" %}
5349   interface(MEMORY_INTER) %{
5350     base($reg);
5351     index(0xffffffff);
5352     scale(0x0);
5353     disp($off);
5354   %}
5355 %}
5356 
5357 operand indOffL4(iRegP reg, immLoffset4 off)
5358 %{
5359   constraint(ALLOC_IN_RC(ptr_reg));
5360   match(AddP reg off);
5361   op_cost(0);
5362   format %{ "[$reg, $off]" %}
5363   interface(MEMORY_INTER) %{
5364     base($reg);
5365     index(0xffffffff);
5366     scale(0x0);
5367     disp($off);
5368   %}
5369 %}
5370 
5371 operand indOffL8(iRegP reg, immLoffset8 off)
5372 %{
5373   constraint(ALLOC_IN_RC(ptr_reg));
5374   match(AddP reg off);
5375   op_cost(0);
5376   format %{ "[$reg, $off]" %}
5377   interface(MEMORY_INTER) %{
5378     base($reg);
5379     index(0xffffffff);
5380     scale(0x0);
5381     disp($off);
5382   %}
5383 %}
5384 
5385 operand indOffL16(iRegP reg, immLoffset16 off)
5386 %{
5387   constraint(ALLOC_IN_RC(ptr_reg));
5388   match(AddP reg off);
5389   op_cost(0);
5390   format %{ "[$reg, $off]" %}
5391   interface(MEMORY_INTER) %{
5392     base($reg);
5393     index(0xffffffff);
5394     scale(0x0);
5395     disp($off);
5396   %}
5397 %}
5398 
5399 operand indirectN(iRegN reg)
5400 %{
5401   predicate(CompressedOops::shift() == 0);
5402   constraint(ALLOC_IN_RC(ptr_reg));
5403   match(DecodeN reg);
5404   op_cost(0);
5405   format %{ "[$reg]\t# narrow" %}
5406   interface(MEMORY_INTER) %{
5407     base($reg);
5408     index(0xffffffff);
5409     scale(0x0);
5410     disp(0x0);
5411   %}
5412 %}
5413 
5414 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5415 %{
5416   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5417   constraint(ALLOC_IN_RC(ptr_reg));
5418   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5419   op_cost(0);
5420   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5421   interface(MEMORY_INTER) %{
5422     base($reg);
5423     index($ireg);
5424     scale($scale);
5425     disp(0x0);
5426   %}
5427 %}
5428 
5429 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5430 %{
5431   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5432   constraint(ALLOC_IN_RC(ptr_reg));
5433   match(AddP (DecodeN reg) (LShiftL lreg scale));
5434   op_cost(0);
5435   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5436   interface(MEMORY_INTER) %{
5437     base($reg);
5438     index($lreg);
5439     scale($scale);
5440     disp(0x0);
5441   %}
5442 %}
5443 
5444 operand indIndexI2LN(iRegN reg, iRegI ireg)
5445 %{
5446   predicate(CompressedOops::shift() == 0);
5447   constraint(ALLOC_IN_RC(ptr_reg));
5448   match(AddP (DecodeN reg) (ConvI2L ireg));
5449   op_cost(0);
5450   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5451   interface(MEMORY_INTER) %{
5452     base($reg);
5453     index($ireg);
5454     scale(0x0);
5455     disp(0x0);
5456   %}
5457 %}
5458 
5459 operand indIndexN(iRegN reg, iRegL lreg)
5460 %{
5461   predicate(CompressedOops::shift() == 0);
5462   constraint(ALLOC_IN_RC(ptr_reg));
5463   match(AddP (DecodeN reg) lreg);
5464   op_cost(0);
5465   format %{ "$reg, $lreg\t# narrow" %}
5466   interface(MEMORY_INTER) %{
5467     base($reg);
5468     index($lreg);
5469     scale(0x0);
5470     disp(0x0);
5471   %}
5472 %}
5473 
5474 operand indOffIN(iRegN reg, immIOffset off)
5475 %{
5476   predicate(CompressedOops::shift() == 0);
5477   constraint(ALLOC_IN_RC(ptr_reg));
5478   match(AddP (DecodeN reg) off);
5479   op_cost(0);
5480   format %{ "[$reg, $off]\t# narrow" %}
5481   interface(MEMORY_INTER) %{
5482     base($reg);
5483     index(0xffffffff);
5484     scale(0x0);
5485     disp($off);
5486   %}
5487 %}
5488 
5489 operand indOffLN(iRegN reg, immLoffset off)
5490 %{
5491   predicate(CompressedOops::shift() == 0);
5492   constraint(ALLOC_IN_RC(ptr_reg));
5493   match(AddP (DecodeN reg) off);
5494   op_cost(0);
5495   format %{ "[$reg, $off]\t# narrow" %}
5496   interface(MEMORY_INTER) %{
5497     base($reg);
5498     index(0xffffffff);
5499     scale(0x0);
5500     disp($off);
5501   %}
5502 %}
5503 
5504 
5505 
5506 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5507 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5508 %{
5509   constraint(ALLOC_IN_RC(ptr_reg));
5510   match(AddP reg off);
5511   op_cost(0);
5512   format %{ "[$reg, $off]" %}
5513   interface(MEMORY_INTER) %{
5514     base($reg);
5515     index(0xffffffff);
5516     scale(0x0);
5517     disp($off);
5518   %}
5519 %}
5520 
5521 //----------Special Memory Operands--------------------------------------------
5522 // Stack Slot Operand - This operand is used for loading and storing temporary
5523 //                      values on the stack where a match requires a value to
5524 //                      flow through memory.
5525 operand stackSlotP(sRegP reg)
5526 %{
5527   constraint(ALLOC_IN_RC(stack_slots));
5528   op_cost(100);
5529   // No match rule because this operand is only generated in matching
5530   // match(RegP);
5531   format %{ "[$reg]" %}
5532   interface(MEMORY_INTER) %{
5533     base(0x1e);  // RSP
5534     index(0x0);  // No Index
5535     scale(0x0);  // No Scale
5536     disp($reg);  // Stack Offset
5537   %}
5538 %}
5539 
5540 operand stackSlotI(sRegI reg)
5541 %{
5542   constraint(ALLOC_IN_RC(stack_slots));
5543   // No match rule because this operand is only generated in matching
5544   // match(RegI);
5545   format %{ "[$reg]" %}
5546   interface(MEMORY_INTER) %{
5547     base(0x1e);  // RSP
5548     index(0x0);  // No Index
5549     scale(0x0);  // No Scale
5550     disp($reg);  // Stack Offset
5551   %}
5552 %}
5553 
5554 operand stackSlotF(sRegF reg)
5555 %{
5556   constraint(ALLOC_IN_RC(stack_slots));
5557   // No match rule because this operand is only generated in matching
5558   // match(RegF);
5559   format %{ "[$reg]" %}
5560   interface(MEMORY_INTER) %{
5561     base(0x1e);  // RSP
5562     index(0x0);  // No Index
5563     scale(0x0);  // No Scale
5564     disp($reg);  // Stack Offset
5565   %}
5566 %}
5567 
5568 operand stackSlotD(sRegD reg)
5569 %{
5570   constraint(ALLOC_IN_RC(stack_slots));
5571   // No match rule because this operand is only generated in matching
5572   // match(RegD);
5573   format %{ "[$reg]" %}
5574   interface(MEMORY_INTER) %{
5575     base(0x1e);  // RSP
5576     index(0x0);  // No Index
5577     scale(0x0);  // No Scale
5578     disp($reg);  // Stack Offset
5579   %}
5580 %}
5581 
5582 operand stackSlotL(sRegL reg)
5583 %{
5584   constraint(ALLOC_IN_RC(stack_slots));
5585   // No match rule because this operand is only generated in matching
5586   // match(RegL);
5587   format %{ "[$reg]" %}
5588   interface(MEMORY_INTER) %{
5589     base(0x1e);  // RSP
5590     index(0x0);  // No Index
5591     scale(0x0);  // No Scale
5592     disp($reg);  // Stack Offset
5593   %}
5594 %}
5595 
5596 // Operands for expressing Control Flow
5597 // NOTE: Label is a predefined operand which should not be redefined in
5598 //       the AD file. It is generically handled within the ADLC.
5599 
5600 //----------Conditional Branch Operands----------------------------------------
5601 // Comparison Op  - This is the operation of the comparison, and is limited to
5602 //                  the following set of codes:
5603 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5604 //
5605 // Other attributes of the comparison, such as unsignedness, are specified
5606 // by the comparison instruction that sets a condition code flags register.
5607 // That result is represented by a flags operand whose subtype is appropriate
5608 // to the unsignedness (etc.) of the comparison.
5609 //
5610 // Later, the instruction which matches both the Comparison Op (a Bool) and
5611 // the flags (produced by the Cmp) specifies the coding of the comparison op
5612 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5613 
5614 // used for signed integral comparisons and fp comparisons
5615 
5616 operand cmpOp()
5617 %{
5618   match(Bool);
5619 
5620   format %{ "" %}
5621   interface(COND_INTER) %{
5622     equal(0x0, "eq");
5623     not_equal(0x1, "ne");
5624     less(0xb, "lt");
5625     greater_equal(0xa, "ge");
5626     less_equal(0xd, "le");
5627     greater(0xc, "gt");
5628     overflow(0x6, "vs");
5629     no_overflow(0x7, "vc");
5630   %}
5631 %}
5632 
5633 // used for unsigned integral comparisons
5634 
5635 operand cmpOpU()
5636 %{
5637   match(Bool);
5638 
5639   format %{ "" %}
5640   interface(COND_INTER) %{
5641     equal(0x0, "eq");
5642     not_equal(0x1, "ne");
5643     less(0x3, "lo");
5644     greater_equal(0x2, "hs");
5645     less_equal(0x9, "ls");
5646     greater(0x8, "hi");
5647     overflow(0x6, "vs");
5648     no_overflow(0x7, "vc");
5649   %}
5650 %}
5651 
5652 // used for certain integral comparisons which can be
5653 // converted to cbxx or tbxx instructions
5654 
5655 operand cmpOpEqNe()
5656 %{
5657   match(Bool);
5658   match(CmpOp);
5659   op_cost(0);
5660   predicate(n->as_Bool()->_test._test == BoolTest::ne
5661             || n->as_Bool()->_test._test == BoolTest::eq);
5662 
5663   format %{ "" %}
5664   interface(COND_INTER) %{
5665     equal(0x0, "eq");
5666     not_equal(0x1, "ne");
5667     less(0xb, "lt");
5668     greater_equal(0xa, "ge");
5669     less_equal(0xd, "le");
5670     greater(0xc, "gt");
5671     overflow(0x6, "vs");
5672     no_overflow(0x7, "vc");
5673   %}
5674 %}
5675 
5676 // used for certain integral comparisons which can be
5677 // converted to cbxx or tbxx instructions
5678 
5679 operand cmpOpLtGe()
5680 %{
5681   match(Bool);
5682   match(CmpOp);
5683   op_cost(0);
5684 
5685   predicate(n->as_Bool()->_test._test == BoolTest::lt
5686             || n->as_Bool()->_test._test == BoolTest::ge);
5687 
5688   format %{ "" %}
5689   interface(COND_INTER) %{
5690     equal(0x0, "eq");
5691     not_equal(0x1, "ne");
5692     less(0xb, "lt");
5693     greater_equal(0xa, "ge");
5694     less_equal(0xd, "le");
5695     greater(0xc, "gt");
5696     overflow(0x6, "vs");
5697     no_overflow(0x7, "vc");
5698   %}
5699 %}
5700 
5701 // used for certain unsigned integral comparisons which can be
5702 // converted to cbxx or tbxx instructions
5703 
5704 operand cmpOpUEqNeLtGe()
5705 %{
5706   match(Bool);
5707   match(CmpOp);
5708   op_cost(0);
5709 
5710   predicate(n->as_Bool()->_test._test == BoolTest::eq
5711             || n->as_Bool()->_test._test == BoolTest::ne
5712             || n->as_Bool()->_test._test == BoolTest::lt
5713             || n->as_Bool()->_test._test == BoolTest::ge);
5714 
5715   format %{ "" %}
5716   interface(COND_INTER) %{
5717     equal(0x0, "eq");
5718     not_equal(0x1, "ne");
5719     less(0xb, "lt");
5720     greater_equal(0xa, "ge");
5721     less_equal(0xd, "le");
5722     greater(0xc, "gt");
5723     overflow(0x6, "vs");
5724     no_overflow(0x7, "vc");
5725   %}
5726 %}
5727 
5728 // Special operand allowing long args to int ops to be truncated for free
5729 
5730 operand iRegL2I(iRegL reg) %{
5731 
5732   op_cost(0);
5733 
5734   match(ConvL2I reg);
5735 
5736   format %{ "l2i($reg)" %}
5737 
5738   interface(REG_INTER)
5739 %}
5740 
5741 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5742 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5743 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5744 
5745 //----------OPERAND CLASSES----------------------------------------------------
5746 // Operand Classes are groups of operands that are used as to simplify
5747 // instruction definitions by not requiring the AD writer to specify
5748 // separate instructions for every form of operand when the
5749 // instruction accepts multiple operand types with the same basic
5750 // encoding and format. The classic case of this is memory operands.
5751 
5752 // memory is used to define read/write location for load/store
5753 // instruction defs. we can turn a memory op into an Address
5754 
5755 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5756                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5757 
5758 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5759 // operations. it allows the src to be either an iRegI or a (ConvL2I
5760 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5761 // can be elided because the 32-bit instruction will just employ the
5762 // lower 32 bits anyway.
5763 //
5764 // n.b. this does not elide all L2I conversions. if the truncated
5765 // value is consumed by more than one operation then the ConvL2I
5766 // cannot be bundled into the consuming nodes so an l2i gets planted
5767 // (actually a movw $dst $src) and the downstream instructions consume
5768 // the result of the l2i as an iRegI input. That's a shame since the
5769 // movw is actually redundant but its not too costly.
5770 
5771 opclass iRegIorL2I(iRegI, iRegL2I);
5772 
5773 //----------PIPELINE-----------------------------------------------------------
5774 // Rules which define the behavior of the target architectures pipeline.
5775 
5776 // For specific pipelines, eg A53, define the stages of that pipeline
5777 //pipe_desc(ISS, EX1, EX2, WR);
5778 #define ISS S0
5779 #define EX1 S1
5780 #define EX2 S2
5781 #define WR  S3
5782 
5783 // Integer ALU reg operation
5784 pipeline %{
5785 
5786 attributes %{
5787   // ARM instructions are of fixed length
5788   fixed_size_instructions;        // Fixed size instructions TODO does
5789   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5790   // ARM instructions come in 32-bit word units
5791   instruction_unit_size = 4;         // An instruction is 4 bytes long
5792   instruction_fetch_unit_size = 64;  // The processor fetches one line
5793   instruction_fetch_units = 1;       // of 64 bytes
5794 
5795   // List of nop instructions
5796   nops( MachNop );
5797 %}
5798 
5799 // We don't use an actual pipeline model so don't care about resources
5800 // or description. we do use pipeline classes to introduce fixed
5801 // latencies
5802 
5803 //----------RESOURCES----------------------------------------------------------
5804 // Resources are the functional units available to the machine
5805 
5806 resources( INS0, INS1, INS01 = INS0 | INS1,
5807            ALU0, ALU1, ALU = ALU0 | ALU1,
5808            MAC,
5809            DIV,
5810            BRANCH,
5811            LDST,
5812            NEON_FP);
5813 
5814 //----------PIPELINE DESCRIPTION-----------------------------------------------
5815 // Pipeline Description specifies the stages in the machine's pipeline
5816 
5817 // Define the pipeline as a generic 6 stage pipeline
5818 pipe_desc(S0, S1, S2, S3, S4, S5);
5819 
5820 //----------PIPELINE CLASSES---------------------------------------------------
5821 // Pipeline Classes describe the stages in which input and output are
5822 // referenced by the hardware pipeline.
5823 
5824 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5825 %{
5826   single_instruction;
5827   src1   : S1(read);
5828   src2   : S2(read);
5829   dst    : S5(write);
5830   INS01  : ISS;
5831   NEON_FP : S5;
5832 %}
5833 
5834 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5835 %{
5836   single_instruction;
5837   src1   : S1(read);
5838   src2   : S2(read);
5839   dst    : S5(write);
5840   INS01  : ISS;
5841   NEON_FP : S5;
5842 %}
5843 
5844 pipe_class fp_uop_s(vRegF dst, vRegF src)
5845 %{
5846   single_instruction;
5847   src    : S1(read);
5848   dst    : S5(write);
5849   INS01  : ISS;
5850   NEON_FP : S5;
5851 %}
5852 
5853 pipe_class fp_uop_d(vRegD dst, vRegD src)
5854 %{
5855   single_instruction;
5856   src    : S1(read);
5857   dst    : S5(write);
5858   INS01  : ISS;
5859   NEON_FP : S5;
5860 %}
5861 
5862 pipe_class fp_d2f(vRegF dst, vRegD src)
5863 %{
5864   single_instruction;
5865   src    : S1(read);
5866   dst    : S5(write);
5867   INS01  : ISS;
5868   NEON_FP : S5;
5869 %}
5870 
5871 pipe_class fp_f2d(vRegD dst, vRegF src)
5872 %{
5873   single_instruction;
5874   src    : S1(read);
5875   dst    : S5(write);
5876   INS01  : ISS;
5877   NEON_FP : S5;
5878 %}
5879 
5880 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5881 %{
5882   single_instruction;
5883   src    : S1(read);
5884   dst    : S5(write);
5885   INS01  : ISS;
5886   NEON_FP : S5;
5887 %}
5888 
5889 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5890 %{
5891   single_instruction;
5892   src    : S1(read);
5893   dst    : S5(write);
5894   INS01  : ISS;
5895   NEON_FP : S5;
5896 %}
5897 
5898 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5899 %{
5900   single_instruction;
5901   src    : S1(read);
5902   dst    : S5(write);
5903   INS01  : ISS;
5904   NEON_FP : S5;
5905 %}
5906 
5907 pipe_class fp_l2f(vRegF dst, iRegL src)
5908 %{
5909   single_instruction;
5910   src    : S1(read);
5911   dst    : S5(write);
5912   INS01  : ISS;
5913   NEON_FP : S5;
5914 %}
5915 
5916 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5917 %{
5918   single_instruction;
5919   src    : S1(read);
5920   dst    : S5(write);
5921   INS01  : ISS;
5922   NEON_FP : S5;
5923 %}
5924 
5925 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5926 %{
5927   single_instruction;
5928   src    : S1(read);
5929   dst    : S5(write);
5930   INS01  : ISS;
5931   NEON_FP : S5;
5932 %}
5933 
5934 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5935 %{
5936   single_instruction;
5937   src    : S1(read);
5938   dst    : S5(write);
5939   INS01  : ISS;
5940   NEON_FP : S5;
5941 %}
5942 
5943 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5944 %{
5945   single_instruction;
5946   src    : S1(read);
5947   dst    : S5(write);
5948   INS01  : ISS;
5949   NEON_FP : S5;
5950 %}
5951 
5952 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5953 %{
5954   single_instruction;
5955   src1   : S1(read);
5956   src2   : S2(read);
5957   dst    : S5(write);
5958   INS0   : ISS;
5959   NEON_FP : S5;
5960 %}
5961 
5962 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5963 %{
5964   single_instruction;
5965   src1   : S1(read);
5966   src2   : S2(read);
5967   dst    : S5(write);
5968   INS0   : ISS;
5969   NEON_FP : S5;
5970 %}
5971 
5972 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5973 %{
5974   single_instruction;
5975   cr     : S1(read);
5976   src1   : S1(read);
5977   src2   : S1(read);
5978   dst    : S3(write);
5979   INS01  : ISS;
5980   NEON_FP : S3;
5981 %}
5982 
5983 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5984 %{
5985   single_instruction;
5986   cr     : S1(read);
5987   src1   : S1(read);
5988   src2   : S1(read);
5989   dst    : S3(write);
5990   INS01  : ISS;
5991   NEON_FP : S3;
5992 %}
5993 
5994 pipe_class fp_imm_s(vRegF dst)
5995 %{
5996   single_instruction;
5997   dst    : S3(write);
5998   INS01  : ISS;
5999   NEON_FP : S3;
6000 %}
6001 
6002 pipe_class fp_imm_d(vRegD dst)
6003 %{
6004   single_instruction;
6005   dst    : S3(write);
6006   INS01  : ISS;
6007   NEON_FP : S3;
6008 %}
6009 
6010 pipe_class fp_load_constant_s(vRegF dst)
6011 %{
6012   single_instruction;
6013   dst    : S4(write);
6014   INS01  : ISS;
6015   NEON_FP : S4;
6016 %}
6017 
6018 pipe_class fp_load_constant_d(vRegD dst)
6019 %{
6020   single_instruction;
6021   dst    : S4(write);
6022   INS01  : ISS;
6023   NEON_FP : S4;
6024 %}
6025 
6026 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6027 %{
6028   single_instruction;
6029   dst    : S5(write);
6030   src1   : S1(read);
6031   src2   : S1(read);
6032   INS01  : ISS;
6033   NEON_FP : S5;
6034 %}
6035 
6036 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6037 %{
6038   single_instruction;
6039   dst    : S5(write);
6040   src1   : S1(read);
6041   src2   : S1(read);
6042   INS0   : ISS;
6043   NEON_FP : S5;
6044 %}
6045 
6046 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6047 %{
6048   single_instruction;
6049   dst    : S5(write);
6050   src1   : S1(read);
6051   src2   : S1(read);
6052   dst    : S1(read);
6053   INS01  : ISS;
6054   NEON_FP : S5;
6055 %}
6056 
6057 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6058 %{
6059   single_instruction;
6060   dst    : S5(write);
6061   src1   : S1(read);
6062   src2   : S1(read);
6063   dst    : S1(read);
6064   INS0   : ISS;
6065   NEON_FP : S5;
6066 %}
6067 
6068 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6069 %{
6070   single_instruction;
6071   dst    : S4(write);
6072   src1   : S2(read);
6073   src2   : S2(read);
6074   INS01  : ISS;
6075   NEON_FP : S4;
6076 %}
6077 
6078 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6079 %{
6080   single_instruction;
6081   dst    : S4(write);
6082   src1   : S2(read);
6083   src2   : S2(read);
6084   INS0   : ISS;
6085   NEON_FP : S4;
6086 %}
6087 
6088 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6089 %{
6090   single_instruction;
6091   dst    : S3(write);
6092   src1   : S2(read);
6093   src2   : S2(read);
6094   INS01  : ISS;
6095   NEON_FP : S3;
6096 %}
6097 
6098 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6099 %{
6100   single_instruction;
6101   dst    : S3(write);
6102   src1   : S2(read);
6103   src2   : S2(read);
6104   INS0   : ISS;
6105   NEON_FP : S3;
6106 %}
6107 
6108 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6109 %{
6110   single_instruction;
6111   dst    : S3(write);
6112   src    : S1(read);
6113   shift  : S1(read);
6114   INS01  : ISS;
6115   NEON_FP : S3;
6116 %}
6117 
6118 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6119 %{
6120   single_instruction;
6121   dst    : S3(write);
6122   src    : S1(read);
6123   shift  : S1(read);
6124   INS0   : ISS;
6125   NEON_FP : S3;
6126 %}
6127 
6128 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6129 %{
6130   single_instruction;
6131   dst    : S3(write);
6132   src    : S1(read);
6133   INS01  : ISS;
6134   NEON_FP : S3;
6135 %}
6136 
6137 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6138 %{
6139   single_instruction;
6140   dst    : S3(write);
6141   src    : S1(read);
6142   INS0   : ISS;
6143   NEON_FP : S3;
6144 %}
6145 
6146 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6147 %{
6148   single_instruction;
6149   dst    : S5(write);
6150   src1   : S1(read);
6151   src2   : S1(read);
6152   INS01  : ISS;
6153   NEON_FP : S5;
6154 %}
6155 
6156 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6157 %{
6158   single_instruction;
6159   dst    : S5(write);
6160   src1   : S1(read);
6161   src2   : S1(read);
6162   INS0   : ISS;
6163   NEON_FP : S5;
6164 %}
6165 
6166 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6167 %{
6168   single_instruction;
6169   dst    : S5(write);
6170   src1   : S1(read);
6171   src2   : S1(read);
6172   INS0   : ISS;
6173   NEON_FP : S5;
6174 %}
6175 
6176 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6177 %{
6178   single_instruction;
6179   dst    : S5(write);
6180   src1   : S1(read);
6181   src2   : S1(read);
6182   INS0   : ISS;
6183   NEON_FP : S5;
6184 %}
6185 
6186 pipe_class vsqrt_fp128(vecX dst, vecX src)
6187 %{
6188   single_instruction;
6189   dst    : S5(write);
6190   src    : S1(read);
6191   INS0   : ISS;
6192   NEON_FP : S5;
6193 %}
6194 
6195 pipe_class vunop_fp64(vecD dst, vecD src)
6196 %{
6197   single_instruction;
6198   dst    : S5(write);
6199   src    : S1(read);
6200   INS01  : ISS;
6201   NEON_FP : S5;
6202 %}
6203 
6204 pipe_class vunop_fp128(vecX dst, vecX src)
6205 %{
6206   single_instruction;
6207   dst    : S5(write);
6208   src    : S1(read);
6209   INS0   : ISS;
6210   NEON_FP : S5;
6211 %}
6212 
6213 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6214 %{
6215   single_instruction;
6216   dst    : S3(write);
6217   src    : S1(read);
6218   INS01  : ISS;
6219   NEON_FP : S3;
6220 %}
6221 
6222 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6223 %{
6224   single_instruction;
6225   dst    : S3(write);
6226   src    : S1(read);
6227   INS01  : ISS;
6228   NEON_FP : S3;
6229 %}
6230 
6231 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6232 %{
6233   single_instruction;
6234   dst    : S3(write);
6235   src    : S1(read);
6236   INS01  : ISS;
6237   NEON_FP : S3;
6238 %}
6239 
6240 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6241 %{
6242   single_instruction;
6243   dst    : S3(write);
6244   src    : S1(read);
6245   INS01  : ISS;
6246   NEON_FP : S3;
6247 %}
6248 
6249 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6250 %{
6251   single_instruction;
6252   dst    : S3(write);
6253   src    : S1(read);
6254   INS01  : ISS;
6255   NEON_FP : S3;
6256 %}
6257 
6258 pipe_class vmovi_reg_imm64(vecD dst)
6259 %{
6260   single_instruction;
6261   dst    : S3(write);
6262   INS01  : ISS;
6263   NEON_FP : S3;
6264 %}
6265 
6266 pipe_class vmovi_reg_imm128(vecX dst)
6267 %{
6268   single_instruction;
6269   dst    : S3(write);
6270   INS0   : ISS;
6271   NEON_FP : S3;
6272 %}
6273 
6274 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6275 %{
6276   single_instruction;
6277   dst    : S5(write);
6278   mem    : ISS(read);
6279   INS01  : ISS;
6280   NEON_FP : S3;
6281 %}
6282 
6283 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6284 %{
6285   single_instruction;
6286   dst    : S5(write);
6287   mem    : ISS(read);
6288   INS01  : ISS;
6289   NEON_FP : S3;
6290 %}
6291 
6292 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6293 %{
6294   single_instruction;
6295   mem    : ISS(read);
6296   src    : S2(read);
6297   INS01  : ISS;
6298   NEON_FP : S3;
6299 %}
6300 
6301 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6302 %{
6303   single_instruction;
6304   mem    : ISS(read);
6305   src    : S2(read);
6306   INS01  : ISS;
6307   NEON_FP : S3;
6308 %}
6309 
6310 //------- Integer ALU operations --------------------------
6311 
6312 // Integer ALU reg-reg operation
6313 // Operands needed in EX1, result generated in EX2
6314 // Eg.  ADD     x0, x1, x2
6315 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6316 %{
6317   single_instruction;
6318   dst    : EX2(write);
6319   src1   : EX1(read);
6320   src2   : EX1(read);
6321   INS01  : ISS; // Dual issue as instruction 0 or 1
6322   ALU    : EX2;
6323 %}
6324 
6325 // Integer ALU reg-reg operation with constant shift
6326 // Shifted register must be available in LATE_ISS instead of EX1
6327 // Eg.  ADD     x0, x1, x2, LSL #2
6328 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6329 %{
6330   single_instruction;
6331   dst    : EX2(write);
6332   src1   : EX1(read);
6333   src2   : ISS(read);
6334   INS01  : ISS;
6335   ALU    : EX2;
6336 %}
6337 
6338 // Integer ALU reg operation with constant shift
6339 // Eg.  LSL     x0, x1, #shift
6340 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6341 %{
6342   single_instruction;
6343   dst    : EX2(write);
6344   src1   : ISS(read);
6345   INS01  : ISS;
6346   ALU    : EX2;
6347 %}
6348 
6349 // Integer ALU reg-reg operation with variable shift
6350 // Both operands must be available in LATE_ISS instead of EX1
6351 // Result is available in EX1 instead of EX2
6352 // Eg.  LSLV    x0, x1, x2
6353 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6354 %{
6355   single_instruction;
6356   dst    : EX1(write);
6357   src1   : ISS(read);
6358   src2   : ISS(read);
6359   INS01  : ISS;
6360   ALU    : EX1;
6361 %}
6362 
6363 // Integer ALU reg-reg operation with extract
6364 // As for _vshift above, but result generated in EX2
6365 // Eg.  EXTR    x0, x1, x2, #N
6366 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6367 %{
6368   single_instruction;
6369   dst    : EX2(write);
6370   src1   : ISS(read);
6371   src2   : ISS(read);
6372   INS1   : ISS; // Can only dual issue as Instruction 1
6373   ALU    : EX1;
6374 %}
6375 
6376 // Integer ALU reg operation
6377 // Eg.  NEG     x0, x1
6378 pipe_class ialu_reg(iRegI dst, iRegI src)
6379 %{
6380   single_instruction;
6381   dst    : EX2(write);
6382   src    : EX1(read);
6383   INS01  : ISS;
6384   ALU    : EX2;
6385 %}
6386 
6387 // Integer ALU reg mmediate operation
6388 // Eg.  ADD     x0, x1, #N
6389 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6390 %{
6391   single_instruction;
6392   dst    : EX2(write);
6393   src1   : EX1(read);
6394   INS01  : ISS;
6395   ALU    : EX2;
6396 %}
6397 
6398 // Integer ALU immediate operation (no source operands)
6399 // Eg.  MOV     x0, #N
6400 pipe_class ialu_imm(iRegI dst)
6401 %{
6402   single_instruction;
6403   dst    : EX1(write);
6404   INS01  : ISS;
6405   ALU    : EX1;
6406 %}
6407 
6408 //------- Compare operation -------------------------------
6409 
6410 // Compare reg-reg
6411 // Eg.  CMP     x0, x1
6412 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6413 %{
6414   single_instruction;
6415 //  fixed_latency(16);
6416   cr     : EX2(write);
6417   op1    : EX1(read);
6418   op2    : EX1(read);
6419   INS01  : ISS;
6420   ALU    : EX2;
6421 %}
6422 
6423 // Compare reg-reg
6424 // Eg.  CMP     x0, #N
6425 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6426 %{
6427   single_instruction;
6428 //  fixed_latency(16);
6429   cr     : EX2(write);
6430   op1    : EX1(read);
6431   INS01  : ISS;
6432   ALU    : EX2;
6433 %}
6434 
6435 //------- Conditional instructions ------------------------
6436 
6437 // Conditional no operands
6438 // Eg.  CSINC   x0, zr, zr, <cond>
6439 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6440 %{
6441   single_instruction;
6442   cr     : EX1(read);
6443   dst    : EX2(write);
6444   INS01  : ISS;
6445   ALU    : EX2;
6446 %}
6447 
6448 // Conditional 2 operand
6449 // EG.  CSEL    X0, X1, X2, <cond>
6450 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6451 %{
6452   single_instruction;
6453   cr     : EX1(read);
6454   src1   : EX1(read);
6455   src2   : EX1(read);
6456   dst    : EX2(write);
6457   INS01  : ISS;
6458   ALU    : EX2;
6459 %}
6460 
6461 // Conditional 2 operand
6462 // EG.  CSEL    X0, X1, X2, <cond>
6463 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6464 %{
6465   single_instruction;
6466   cr     : EX1(read);
6467   src    : EX1(read);
6468   dst    : EX2(write);
6469   INS01  : ISS;
6470   ALU    : EX2;
6471 %}
6472 
6473 //------- Multiply pipeline operations --------------------
6474 
6475 // Multiply reg-reg
6476 // Eg.  MUL     w0, w1, w2
6477 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6478 %{
6479   single_instruction;
6480   dst    : WR(write);
6481   src1   : ISS(read);
6482   src2   : ISS(read);
6483   INS01  : ISS;
6484   MAC    : WR;
6485 %}
6486 
6487 // Multiply accumulate
6488 // Eg.  MADD    w0, w1, w2, w3
6489 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6490 %{
6491   single_instruction;
6492   dst    : WR(write);
6493   src1   : ISS(read);
6494   src2   : ISS(read);
6495   src3   : ISS(read);
6496   INS01  : ISS;
6497   MAC    : WR;
6498 %}
6499 
6500 // Eg.  MUL     w0, w1, w2
6501 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6502 %{
6503   single_instruction;
6504   fixed_latency(3); // Maximum latency for 64 bit mul
6505   dst    : WR(write);
6506   src1   : ISS(read);
6507   src2   : ISS(read);
6508   INS01  : ISS;
6509   MAC    : WR;
6510 %}
6511 
6512 // Multiply accumulate
6513 // Eg.  MADD    w0, w1, w2, w3
6514 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6515 %{
6516   single_instruction;
6517   fixed_latency(3); // Maximum latency for 64 bit mul
6518   dst    : WR(write);
6519   src1   : ISS(read);
6520   src2   : ISS(read);
6521   src3   : ISS(read);
6522   INS01  : ISS;
6523   MAC    : WR;
6524 %}
6525 
6526 //------- Divide pipeline operations --------------------
6527 
6528 // Eg.  SDIV    w0, w1, w2
6529 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6530 %{
6531   single_instruction;
6532   fixed_latency(8); // Maximum latency for 32 bit divide
6533   dst    : WR(write);
6534   src1   : ISS(read);
6535   src2   : ISS(read);
6536   INS0   : ISS; // Can only dual issue as instruction 0
6537   DIV    : WR;
6538 %}
6539 
6540 // Eg.  SDIV    x0, x1, x2
6541 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6542 %{
6543   single_instruction;
6544   fixed_latency(16); // Maximum latency for 64 bit divide
6545   dst    : WR(write);
6546   src1   : ISS(read);
6547   src2   : ISS(read);
6548   INS0   : ISS; // Can only dual issue as instruction 0
6549   DIV    : WR;
6550 %}
6551 
6552 //------- Load pipeline operations ------------------------
6553 
6554 // Load - prefetch
6555 // Eg.  PFRM    <mem>
6556 pipe_class iload_prefetch(memory mem)
6557 %{
6558   single_instruction;
6559   mem    : ISS(read);
6560   INS01  : ISS;
6561   LDST   : WR;
6562 %}
6563 
6564 // Load - reg, mem
6565 // Eg.  LDR     x0, <mem>
6566 pipe_class iload_reg_mem(iRegI dst, memory mem)
6567 %{
6568   single_instruction;
6569   dst    : WR(write);
6570   mem    : ISS(read);
6571   INS01  : ISS;
6572   LDST   : WR;
6573 %}
6574 
6575 // Load - reg, reg
6576 // Eg.  LDR     x0, [sp, x1]
6577 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6578 %{
6579   single_instruction;
6580   dst    : WR(write);
6581   src    : ISS(read);
6582   INS01  : ISS;
6583   LDST   : WR;
6584 %}
6585 
6586 //------- Store pipeline operations -----------------------
6587 
6588 // Store - zr, mem
6589 // Eg.  STR     zr, <mem>
6590 pipe_class istore_mem(memory mem)
6591 %{
6592   single_instruction;
6593   mem    : ISS(read);
6594   INS01  : ISS;
6595   LDST   : WR;
6596 %}
6597 
6598 // Store - reg, mem
6599 // Eg.  STR     x0, <mem>
6600 pipe_class istore_reg_mem(iRegI src, memory mem)
6601 %{
6602   single_instruction;
6603   mem    : ISS(read);
6604   src    : EX2(read);
6605   INS01  : ISS;
6606   LDST   : WR;
6607 %}
6608 
6609 // Store - reg, reg
6610 // Eg. STR      x0, [sp, x1]
6611 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6612 %{
6613   single_instruction;
6614   dst    : ISS(read);
6615   src    : EX2(read);
6616   INS01  : ISS;
6617   LDST   : WR;
6618 %}
6619 
6620 //------- Store pipeline operations -----------------------
6621 
6622 // Branch
6623 pipe_class pipe_branch()
6624 %{
6625   single_instruction;
6626   INS01  : ISS;
6627   BRANCH : EX1;
6628 %}
6629 
6630 // Conditional branch
6631 pipe_class pipe_branch_cond(rFlagsReg cr)
6632 %{
6633   single_instruction;
6634   cr     : EX1(read);
6635   INS01  : ISS;
6636   BRANCH : EX1;
6637 %}
6638 
6639 // Compare & Branch
6640 // EG.  CBZ/CBNZ
6641 pipe_class pipe_cmp_branch(iRegI op1)
6642 %{
6643   single_instruction;
6644   op1    : EX1(read);
6645   INS01  : ISS;
6646   BRANCH : EX1;
6647 %}
6648 
6649 //------- Synchronisation operations ----------------------
6650 
6651 // Any operation requiring serialization.
6652 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6653 pipe_class pipe_serial()
6654 %{
6655   single_instruction;
6656   force_serialization;
6657   fixed_latency(16);
6658   INS01  : ISS(2); // Cannot dual issue with any other instruction
6659   LDST   : WR;
6660 %}
6661 
6662 // Generic big/slow expanded idiom - also serialized
6663 pipe_class pipe_slow()
6664 %{
6665   instruction_count(10);
6666   multiple_bundles;
6667   force_serialization;
6668   fixed_latency(16);
6669   INS01  : ISS(2); // Cannot dual issue with any other instruction
6670   LDST   : WR;
6671 %}
6672 
6673 // Empty pipeline class
6674 pipe_class pipe_class_empty()
6675 %{
6676   single_instruction;
6677   fixed_latency(0);
6678 %}
6679 
6680 // Default pipeline class.
6681 pipe_class pipe_class_default()
6682 %{
6683   single_instruction;
6684   fixed_latency(2);
6685 %}
6686 
6687 // Pipeline class for compares.
6688 pipe_class pipe_class_compare()
6689 %{
6690   single_instruction;
6691   fixed_latency(16);
6692 %}
6693 
6694 // Pipeline class for memory operations.
6695 pipe_class pipe_class_memory()
6696 %{
6697   single_instruction;
6698   fixed_latency(16);
6699 %}
6700 
6701 // Pipeline class for call.
6702 pipe_class pipe_class_call()
6703 %{
6704   single_instruction;
6705   fixed_latency(100);
6706 %}
6707 
6708 // Define the class for the Nop node.
6709 define %{
6710    MachNop = pipe_class_empty;
6711 %}
6712 
6713 %}
6714 //----------INSTRUCTIONS-------------------------------------------------------
6715 //
6716 // match      -- States which machine-independent subtree may be replaced
6717 //               by this instruction.
6718 // ins_cost   -- The estimated cost of this instruction is used by instruction
6719 //               selection to identify a minimum cost tree of machine
6720 //               instructions that matches a tree of machine-independent
6721 //               instructions.
6722 // format     -- A string providing the disassembly for this instruction.
6723 //               The value of an instruction's operand may be inserted
6724 //               by referring to it with a '$' prefix.
6725 // opcode     -- Three instruction opcodes may be provided.  These are referred
6726 //               to within an encode class as $primary, $secondary, and $tertiary
6727 //               rrspectively.  The primary opcode is commonly used to
6728 //               indicate the type of machine instruction, while secondary
6729 //               and tertiary are often used for prefix options or addressing
6730 //               modes.
6731 // ins_encode -- A list of encode classes with parameters. The encode class
6732 //               name must have been defined in an 'enc_class' specification
6733 //               in the encode section of the architecture description.
6734 
6735 // ============================================================================
6736 // Memory (Load/Store) Instructions
6737 
6738 // Load Instructions
6739 
6740 // Load Byte (8 bit signed)
6741 instruct loadB(iRegINoSp dst, memory mem)
6742 %{
6743   match(Set dst (LoadB mem));
6744   predicate(!needs_acquiring_load(n));
6745 
6746   ins_cost(4 * INSN_COST);
6747   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6748 
6749   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6750 
6751   ins_pipe(iload_reg_mem);
6752 %}
6753 
6754 // Load Byte (8 bit signed) into long
6755 instruct loadB2L(iRegLNoSp dst, memory mem)
6756 %{
6757   match(Set dst (ConvI2L (LoadB mem)));
6758   predicate(!needs_acquiring_load(n->in(1)));
6759 
6760   ins_cost(4 * INSN_COST);
6761   format %{ "ldrsb  $dst, $mem\t# byte" %}
6762 
6763   ins_encode(aarch64_enc_ldrsb(dst, mem));
6764 
6765   ins_pipe(iload_reg_mem);
6766 %}
6767 
6768 // Load Byte (8 bit unsigned)
6769 instruct loadUB(iRegINoSp dst, memory mem)
6770 %{
6771   match(Set dst (LoadUB mem));
6772   predicate(!needs_acquiring_load(n));
6773 
6774   ins_cost(4 * INSN_COST);
6775   format %{ "ldrbw  $dst, $mem\t# byte" %}
6776 
6777   ins_encode(aarch64_enc_ldrb(dst, mem));
6778 
6779   ins_pipe(iload_reg_mem);
6780 %}
6781 
6782 // Load Byte (8 bit unsigned) into long
6783 instruct loadUB2L(iRegLNoSp dst, memory mem)
6784 %{
6785   match(Set dst (ConvI2L (LoadUB mem)));
6786   predicate(!needs_acquiring_load(n->in(1)));
6787 
6788   ins_cost(4 * INSN_COST);
6789   format %{ "ldrb  $dst, $mem\t# byte" %}
6790 
6791   ins_encode(aarch64_enc_ldrb(dst, mem));
6792 
6793   ins_pipe(iload_reg_mem);
6794 %}
6795 
6796 // Load Short (16 bit signed)
6797 instruct loadS(iRegINoSp dst, memory mem)
6798 %{
6799   match(Set dst (LoadS mem));
6800   predicate(!needs_acquiring_load(n));
6801 
6802   ins_cost(4 * INSN_COST);
6803   format %{ "ldrshw  $dst, $mem\t# short" %}
6804 
6805   ins_encode(aarch64_enc_ldrshw(dst, mem));
6806 
6807   ins_pipe(iload_reg_mem);
6808 %}
6809 
6810 // Load Short (16 bit signed) into long
6811 instruct loadS2L(iRegLNoSp dst, memory mem)
6812 %{
6813   match(Set dst (ConvI2L (LoadS mem)));
6814   predicate(!needs_acquiring_load(n->in(1)));
6815 
6816   ins_cost(4 * INSN_COST);
6817   format %{ "ldrsh  $dst, $mem\t# short" %}
6818 
6819   ins_encode(aarch64_enc_ldrsh(dst, mem));
6820 
6821   ins_pipe(iload_reg_mem);
6822 %}
6823 
6824 // Load Char (16 bit unsigned)
6825 instruct loadUS(iRegINoSp dst, memory mem)
6826 %{
6827   match(Set dst (LoadUS mem));
6828   predicate(!needs_acquiring_load(n));
6829 
6830   ins_cost(4 * INSN_COST);
6831   format %{ "ldrh  $dst, $mem\t# short" %}
6832 
6833   ins_encode(aarch64_enc_ldrh(dst, mem));
6834 
6835   ins_pipe(iload_reg_mem);
6836 %}
6837 
6838 // Load Short/Char (16 bit unsigned) into long
6839 instruct loadUS2L(iRegLNoSp dst, memory mem)
6840 %{
6841   match(Set dst (ConvI2L (LoadUS mem)));
6842   predicate(!needs_acquiring_load(n->in(1)));
6843 
6844   ins_cost(4 * INSN_COST);
6845   format %{ "ldrh  $dst, $mem\t# short" %}
6846 
6847   ins_encode(aarch64_enc_ldrh(dst, mem));
6848 
6849   ins_pipe(iload_reg_mem);
6850 %}
6851 
6852 // Load Integer (32 bit signed)
6853 instruct loadI(iRegINoSp dst, memory mem)
6854 %{
6855   match(Set dst (LoadI mem));
6856   predicate(!needs_acquiring_load(n));
6857 
6858   ins_cost(4 * INSN_COST);
6859   format %{ "ldrw  $dst, $mem\t# int" %}
6860 
6861   ins_encode(aarch64_enc_ldrw(dst, mem));
6862 
6863   ins_pipe(iload_reg_mem);
6864 %}
6865 
6866 // Load Integer (32 bit signed) into long
6867 instruct loadI2L(iRegLNoSp dst, memory mem)
6868 %{
6869   match(Set dst (ConvI2L (LoadI mem)));
6870   predicate(!needs_acquiring_load(n->in(1)));
6871 
6872   ins_cost(4 * INSN_COST);
6873   format %{ "ldrsw  $dst, $mem\t# int" %}
6874 
6875   ins_encode(aarch64_enc_ldrsw(dst, mem));
6876 
6877   ins_pipe(iload_reg_mem);
6878 %}
6879 
6880 // Load Integer (32 bit unsigned) into long
6881 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6882 %{
6883   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6884   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6885 
6886   ins_cost(4 * INSN_COST);
6887   format %{ "ldrw  $dst, $mem\t# int" %}
6888 
6889   ins_encode(aarch64_enc_ldrw(dst, mem));
6890 
6891   ins_pipe(iload_reg_mem);
6892 %}
6893 
6894 // Load Long (64 bit signed)
6895 instruct loadL(iRegLNoSp dst, memory mem)
6896 %{
6897   match(Set dst (LoadL mem));
6898   predicate(!needs_acquiring_load(n));
6899 
6900   ins_cost(4 * INSN_COST);
6901   format %{ "ldr  $dst, $mem\t# int" %}
6902 
6903   ins_encode(aarch64_enc_ldr(dst, mem));
6904 
6905   ins_pipe(iload_reg_mem);
6906 %}
6907 
6908 // Load Range
6909 instruct loadRange(iRegINoSp dst, memory mem)
6910 %{
6911   match(Set dst (LoadRange mem));
6912 
6913   ins_cost(4 * INSN_COST);
6914   format %{ "ldrw  $dst, $mem\t# range" %}
6915 
6916   ins_encode(aarch64_enc_ldrw(dst, mem));
6917 
6918   ins_pipe(iload_reg_mem);
6919 %}
6920 
6921 // Load Pointer
6922 instruct loadP(iRegPNoSp dst, memory mem)
6923 %{
6924   match(Set dst (LoadP mem));
6925   predicate(!needs_acquiring_load(n));
6926 
6927   ins_cost(4 * INSN_COST);
6928   format %{ "ldr  $dst, $mem\t# ptr" %}
6929 
6930   ins_encode(aarch64_enc_ldr(dst, mem));
6931 
6932   ins_pipe(iload_reg_mem);
6933 %}
6934 
6935 // Load Compressed Pointer
6936 instruct loadN(iRegNNoSp dst, memory mem)
6937 %{
6938   match(Set dst (LoadN mem));
6939   predicate(!needs_acquiring_load(n));
6940 
6941   ins_cost(4 * INSN_COST);
6942   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6943 
6944   ins_encode(aarch64_enc_ldrw(dst, mem));
6945 
6946   ins_pipe(iload_reg_mem);
6947 %}
6948 
6949 // Load Klass Pointer
6950 instruct loadKlass(iRegPNoSp dst, memory mem)
6951 %{
6952   match(Set dst (LoadKlass mem));
6953   predicate(!needs_acquiring_load(n));
6954 
6955   ins_cost(4 * INSN_COST);
6956   format %{ "ldr  $dst, $mem\t# class" %}
6957 
6958   ins_encode(aarch64_enc_ldr(dst, mem));
6959 
6960   ins_pipe(iload_reg_mem);
6961 %}
6962 
6963 // Load Narrow Klass Pointer
6964 instruct loadNKlass(iRegNNoSp dst, memory mem)
6965 %{
6966   match(Set dst (LoadNKlass mem));
6967   predicate(!needs_acquiring_load(n));
6968 
6969   ins_cost(4 * INSN_COST);
6970   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6971 
6972   ins_encode(aarch64_enc_ldrw(dst, mem));
6973 
6974   ins_pipe(iload_reg_mem);
6975 %}
6976 
6977 // Load Float
6978 instruct loadF(vRegF dst, memory mem)
6979 %{
6980   match(Set dst (LoadF mem));
6981   predicate(!needs_acquiring_load(n));
6982 
6983   ins_cost(4 * INSN_COST);
6984   format %{ "ldrs  $dst, $mem\t# float" %}
6985 
6986   ins_encode( aarch64_enc_ldrs(dst, mem) );
6987 
6988   ins_pipe(pipe_class_memory);
6989 %}
6990 
6991 // Load Double
6992 instruct loadD(vRegD dst, memory mem)
6993 %{
6994   match(Set dst (LoadD mem));
6995   predicate(!needs_acquiring_load(n));
6996 
6997   ins_cost(4 * INSN_COST);
6998   format %{ "ldrd  $dst, $mem\t# double" %}
6999 
7000   ins_encode( aarch64_enc_ldrd(dst, mem) );
7001 
7002   ins_pipe(pipe_class_memory);
7003 %}
7004 
7005 
7006 // Load Int Constant
7007 instruct loadConI(iRegINoSp dst, immI src)
7008 %{
7009   match(Set dst src);
7010 
7011   ins_cost(INSN_COST);
7012   format %{ "mov $dst, $src\t# int" %}
7013 
7014   ins_encode( aarch64_enc_movw_imm(dst, src) );
7015 
7016   ins_pipe(ialu_imm);
7017 %}
7018 
7019 // Load Long Constant
7020 instruct loadConL(iRegLNoSp dst, immL src)
7021 %{
7022   match(Set dst src);
7023 
7024   ins_cost(INSN_COST);
7025   format %{ "mov $dst, $src\t# long" %}
7026 
7027   ins_encode( aarch64_enc_mov_imm(dst, src) );
7028 
7029   ins_pipe(ialu_imm);
7030 %}
7031 
7032 // Load Pointer Constant
7033 
7034 instruct loadConP(iRegPNoSp dst, immP con)
7035 %{
7036   match(Set dst con);
7037 
7038   ins_cost(INSN_COST * 4);
7039   format %{
7040     "mov  $dst, $con\t# ptr\n\t"
7041   %}
7042 
7043   ins_encode(aarch64_enc_mov_p(dst, con));
7044 
7045   ins_pipe(ialu_imm);
7046 %}
7047 
7048 // Load Null Pointer Constant
7049 
7050 instruct loadConP0(iRegPNoSp dst, immP0 con)
7051 %{
7052   match(Set dst con);
7053 
7054   ins_cost(INSN_COST);
7055   format %{ "mov  $dst, $con\t# NULL ptr" %}
7056 
7057   ins_encode(aarch64_enc_mov_p0(dst, con));
7058 
7059   ins_pipe(ialu_imm);
7060 %}
7061 
7062 // Load Pointer Constant One
7063 
7064 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7065 %{
7066   match(Set dst con);
7067 
7068   ins_cost(INSN_COST);
7069   format %{ "mov  $dst, $con\t# NULL ptr" %}
7070 
7071   ins_encode(aarch64_enc_mov_p1(dst, con));
7072 
7073   ins_pipe(ialu_imm);
7074 %}
7075 
7076 // Load Poll Page Constant
7077 
7078 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7079 %{
7080   match(Set dst con);
7081 
7082   ins_cost(INSN_COST);
7083   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7084 
7085   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7086 
7087   ins_pipe(ialu_imm);
7088 %}
7089 
7090 // Load Byte Map Base Constant
7091 
7092 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7093 %{
7094   match(Set dst con);
7095 
7096   ins_cost(INSN_COST);
7097   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7098 
7099   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7100 
7101   ins_pipe(ialu_imm);
7102 %}
7103 
7104 // Load Narrow Pointer Constant
7105 
7106 instruct loadConN(iRegNNoSp dst, immN con)
7107 %{
7108   match(Set dst con);
7109 
7110   ins_cost(INSN_COST * 4);
7111   format %{ "mov  $dst, $con\t# compressed ptr" %}
7112 
7113   ins_encode(aarch64_enc_mov_n(dst, con));
7114 
7115   ins_pipe(ialu_imm);
7116 %}
7117 
7118 // Load Narrow Null Pointer Constant
7119 
7120 instruct loadConN0(iRegNNoSp dst, immN0 con)
7121 %{
7122   match(Set dst con);
7123 
7124   ins_cost(INSN_COST);
7125   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7126 
7127   ins_encode(aarch64_enc_mov_n0(dst, con));
7128 
7129   ins_pipe(ialu_imm);
7130 %}
7131 
7132 // Load Narrow Klass Constant
7133 
7134 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7135 %{
7136   match(Set dst con);
7137 
7138   ins_cost(INSN_COST);
7139   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7140 
7141   ins_encode(aarch64_enc_mov_nk(dst, con));
7142 
7143   ins_pipe(ialu_imm);
7144 %}
7145 
7146 // Load Packed Float Constant
7147 
7148 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7149   match(Set dst con);
7150   ins_cost(INSN_COST * 4);
7151   format %{ "fmovs  $dst, $con"%}
7152   ins_encode %{
7153     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7154   %}
7155 
7156   ins_pipe(fp_imm_s);
7157 %}
7158 
7159 // Load Float Constant
7160 
7161 instruct loadConF(vRegF dst, immF con) %{
7162   match(Set dst con);
7163 
7164   ins_cost(INSN_COST * 4);
7165 
7166   format %{
7167     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7168   %}
7169 
7170   ins_encode %{
7171     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7172   %}
7173 
7174   ins_pipe(fp_load_constant_s);
7175 %}
7176 
7177 // Load Packed Double Constant
7178 
7179 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7180   match(Set dst con);
7181   ins_cost(INSN_COST);
7182   format %{ "fmovd  $dst, $con"%}
7183   ins_encode %{
7184     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7185   %}
7186 
7187   ins_pipe(fp_imm_d);
7188 %}
7189 
7190 // Load Double Constant
7191 
7192 instruct loadConD(vRegD dst, immD con) %{
7193   match(Set dst con);
7194 
7195   ins_cost(INSN_COST * 5);
7196   format %{
7197     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7198   %}
7199 
7200   ins_encode %{
7201     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7202   %}
7203 
7204   ins_pipe(fp_load_constant_d);
7205 %}
7206 
7207 // Store Instructions
7208 
7209 // Store CMS card-mark Immediate
7210 instruct storeimmCM0(immI0 zero, memory mem)
7211 %{
7212   match(Set mem (StoreCM mem zero));
7213   predicate(unnecessary_storestore(n));
7214 
7215   ins_cost(INSN_COST);
7216   format %{ "storestore (elided)\n\t"
7217             "strb zr, $mem\t# byte" %}
7218 
7219   ins_encode(aarch64_enc_strb0(mem));
7220 
7221   ins_pipe(istore_mem);
7222 %}
7223 
7224 // Store CMS card-mark Immediate with intervening StoreStore
7225 // needed when using CMS with no conditional card marking
7226 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7227 %{
7228   match(Set mem (StoreCM mem zero));
7229 
7230   ins_cost(INSN_COST * 2);
7231   format %{ "storestore\n\t"
7232             "dmb ishst"
7233             "\n\tstrb zr, $mem\t# byte" %}
7234 
7235   ins_encode(aarch64_enc_strb0_ordered(mem));
7236 
7237   ins_pipe(istore_mem);
7238 %}
7239 
7240 // Store Byte
7241 instruct storeB(iRegIorL2I src, memory mem)
7242 %{
7243   match(Set mem (StoreB mem src));
7244   predicate(!needs_releasing_store(n));
7245 
7246   ins_cost(INSN_COST);
7247   format %{ "strb  $src, $mem\t# byte" %}
7248 
7249   ins_encode(aarch64_enc_strb(src, mem));
7250 
7251   ins_pipe(istore_reg_mem);
7252 %}
7253 
7254 
7255 instruct storeimmB0(immI0 zero, memory mem)
7256 %{
7257   match(Set mem (StoreB mem zero));
7258   predicate(!needs_releasing_store(n));
7259 
7260   ins_cost(INSN_COST);
7261   format %{ "strb rscractch2, $mem\t# byte" %}
7262 
7263   ins_encode(aarch64_enc_strb0(mem));
7264 
7265   ins_pipe(istore_mem);
7266 %}
7267 
7268 // Store Char/Short
7269 instruct storeC(iRegIorL2I src, memory mem)
7270 %{
7271   match(Set mem (StoreC mem src));
7272   predicate(!needs_releasing_store(n));
7273 
7274   ins_cost(INSN_COST);
7275   format %{ "strh  $src, $mem\t# short" %}
7276 
7277   ins_encode(aarch64_enc_strh(src, mem));
7278 
7279   ins_pipe(istore_reg_mem);
7280 %}
7281 
7282 instruct storeimmC0(immI0 zero, memory mem)
7283 %{
7284   match(Set mem (StoreC mem zero));
7285   predicate(!needs_releasing_store(n));
7286 
7287   ins_cost(INSN_COST);
7288   format %{ "strh  zr, $mem\t# short" %}
7289 
7290   ins_encode(aarch64_enc_strh0(mem));
7291 
7292   ins_pipe(istore_mem);
7293 %}
7294 
7295 // Store Integer
7296 
7297 instruct storeI(iRegIorL2I src, memory mem)
7298 %{
7299   match(Set mem(StoreI mem src));
7300   predicate(!needs_releasing_store(n));
7301 
7302   ins_cost(INSN_COST);
7303   format %{ "strw  $src, $mem\t# int" %}
7304 
7305   ins_encode(aarch64_enc_strw(src, mem));
7306 
7307   ins_pipe(istore_reg_mem);
7308 %}
7309 
7310 instruct storeimmI0(immI0 zero, memory mem)
7311 %{
7312   match(Set mem(StoreI mem zero));
7313   predicate(!needs_releasing_store(n));
7314 
7315   ins_cost(INSN_COST);
7316   format %{ "strw  zr, $mem\t# int" %}
7317 
7318   ins_encode(aarch64_enc_strw0(mem));
7319 
7320   ins_pipe(istore_mem);
7321 %}
7322 
7323 // Store Long (64 bit signed)
7324 instruct storeL(iRegL src, memory mem)
7325 %{
7326   match(Set mem (StoreL mem src));
7327   predicate(!needs_releasing_store(n));
7328 
7329   ins_cost(INSN_COST);
7330   format %{ "str  $src, $mem\t# int" %}
7331 
7332   ins_encode(aarch64_enc_str(src, mem));
7333 
7334   ins_pipe(istore_reg_mem);
7335 %}
7336 
7337 // Store Long (64 bit signed)
7338 instruct storeimmL0(immL0 zero, memory mem)
7339 %{
7340   match(Set mem (StoreL mem zero));
7341   predicate(!needs_releasing_store(n));
7342 
7343   ins_cost(INSN_COST);
7344   format %{ "str  zr, $mem\t# int" %}
7345 
7346   ins_encode(aarch64_enc_str0(mem));
7347 
7348   ins_pipe(istore_mem);
7349 %}
7350 
7351 // Store Pointer
7352 instruct storeP(iRegP src, memory mem)
7353 %{
7354   match(Set mem (StoreP mem src));
7355   predicate(!needs_releasing_store(n));
7356 
7357   ins_cost(INSN_COST);
7358   format %{ "str  $src, $mem\t# ptr" %}
7359 
7360   ins_encode(aarch64_enc_str(src, mem));
7361 
7362   ins_pipe(istore_reg_mem);
7363 %}
7364 
7365 // Store Pointer
7366 instruct storeimmP0(immP0 zero, memory mem)
7367 %{
7368   match(Set mem (StoreP mem zero));
7369   predicate(!needs_releasing_store(n));
7370 
7371   ins_cost(INSN_COST);
7372   format %{ "str zr, $mem\t# ptr" %}
7373 
7374   ins_encode(aarch64_enc_str0(mem));
7375 
7376   ins_pipe(istore_mem);
7377 %}
7378 
7379 // Store Compressed Pointer
7380 instruct storeN(iRegN src, memory mem)
7381 %{
7382   match(Set mem (StoreN mem src));
7383   predicate(!needs_releasing_store(n));
7384 
7385   ins_cost(INSN_COST);
7386   format %{ "strw  $src, $mem\t# compressed ptr" %}
7387 
7388   ins_encode(aarch64_enc_strw(src, mem));
7389 
7390   ins_pipe(istore_reg_mem);
7391 %}
7392 
7393 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7394 %{
7395   match(Set mem (StoreN mem zero));
7396   predicate(CompressedOops::base() == NULL &&
7397             CompressedKlassPointers::base() == NULL &&
7398             (!needs_releasing_store(n)));
7399 
7400   ins_cost(INSN_COST);
7401   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7402 
7403   ins_encode(aarch64_enc_strw(heapbase, mem));
7404 
7405   ins_pipe(istore_reg_mem);
7406 %}
7407 
7408 // Store Float
7409 instruct storeF(vRegF src, memory mem)
7410 %{
7411   match(Set mem (StoreF mem src));
7412   predicate(!needs_releasing_store(n));
7413 
7414   ins_cost(INSN_COST);
7415   format %{ "strs  $src, $mem\t# float" %}
7416 
7417   ins_encode( aarch64_enc_strs(src, mem) );
7418 
7419   ins_pipe(pipe_class_memory);
7420 %}
7421 
7422 // TODO
7423 // implement storeImmF0 and storeFImmPacked
7424 
7425 // Store Double
7426 instruct storeD(vRegD src, memory mem)
7427 %{
7428   match(Set mem (StoreD mem src));
7429   predicate(!needs_releasing_store(n));
7430 
7431   ins_cost(INSN_COST);
7432   format %{ "strd  $src, $mem\t# double" %}
7433 
7434   ins_encode( aarch64_enc_strd(src, mem) );
7435 
7436   ins_pipe(pipe_class_memory);
7437 %}
7438 
7439 // Store Compressed Klass Pointer
7440 instruct storeNKlass(iRegN src, memory mem)
7441 %{
7442   predicate(!needs_releasing_store(n));
7443   match(Set mem (StoreNKlass mem src));
7444 
7445   ins_cost(INSN_COST);
7446   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7447 
7448   ins_encode(aarch64_enc_strw(src, mem));
7449 
7450   ins_pipe(istore_reg_mem);
7451 %}
7452 
7453 // TODO
7454 // implement storeImmD0 and storeDImmPacked
7455 
7456 // prefetch instructions
7457 // Must be safe to execute with invalid address (cannot fault).
7458 
7459 instruct prefetchalloc( memory mem ) %{
7460   match(PrefetchAllocation mem);
7461 
7462   ins_cost(INSN_COST);
7463   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7464 
7465   ins_encode( aarch64_enc_prefetchw(mem) );
7466 
7467   ins_pipe(iload_prefetch);
7468 %}
7469 
7470 //  ---------------- volatile loads and stores ----------------
7471 
7472 // Load Byte (8 bit signed)
7473 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7474 %{
7475   match(Set dst (LoadB mem));
7476 
7477   ins_cost(VOLATILE_REF_COST);
7478   format %{ "ldarsb  $dst, $mem\t# byte" %}
7479 
7480   ins_encode(aarch64_enc_ldarsb(dst, mem));
7481 
7482   ins_pipe(pipe_serial);
7483 %}
7484 
7485 // Load Byte (8 bit signed) into long
7486 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7487 %{
7488   match(Set dst (ConvI2L (LoadB mem)));
7489 
7490   ins_cost(VOLATILE_REF_COST);
7491   format %{ "ldarsb  $dst, $mem\t# byte" %}
7492 
7493   ins_encode(aarch64_enc_ldarsb(dst, mem));
7494 
7495   ins_pipe(pipe_serial);
7496 %}
7497 
7498 // Load Byte (8 bit unsigned)
7499 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7500 %{
7501   match(Set dst (LoadUB mem));
7502 
7503   ins_cost(VOLATILE_REF_COST);
7504   format %{ "ldarb  $dst, $mem\t# byte" %}
7505 
7506   ins_encode(aarch64_enc_ldarb(dst, mem));
7507 
7508   ins_pipe(pipe_serial);
7509 %}
7510 
7511 // Load Byte (8 bit unsigned) into long
7512 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7513 %{
7514   match(Set dst (ConvI2L (LoadUB mem)));
7515 
7516   ins_cost(VOLATILE_REF_COST);
7517   format %{ "ldarb  $dst, $mem\t# byte" %}
7518 
7519   ins_encode(aarch64_enc_ldarb(dst, mem));
7520 
7521   ins_pipe(pipe_serial);
7522 %}
7523 
7524 // Load Short (16 bit signed)
7525 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7526 %{
7527   match(Set dst (LoadS mem));
7528 
7529   ins_cost(VOLATILE_REF_COST);
7530   format %{ "ldarshw  $dst, $mem\t# short" %}
7531 
7532   ins_encode(aarch64_enc_ldarshw(dst, mem));
7533 
7534   ins_pipe(pipe_serial);
7535 %}
7536 
7537 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7538 %{
7539   match(Set dst (LoadUS mem));
7540 
7541   ins_cost(VOLATILE_REF_COST);
7542   format %{ "ldarhw  $dst, $mem\t# short" %}
7543 
7544   ins_encode(aarch64_enc_ldarhw(dst, mem));
7545 
7546   ins_pipe(pipe_serial);
7547 %}
7548 
7549 // Load Short/Char (16 bit unsigned) into long
7550 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7551 %{
7552   match(Set dst (ConvI2L (LoadUS mem)));
7553 
7554   ins_cost(VOLATILE_REF_COST);
7555   format %{ "ldarh  $dst, $mem\t# short" %}
7556 
7557   ins_encode(aarch64_enc_ldarh(dst, mem));
7558 
7559   ins_pipe(pipe_serial);
7560 %}
7561 
7562 // Load Short/Char (16 bit signed) into long
7563 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7564 %{
7565   match(Set dst (ConvI2L (LoadS mem)));
7566 
7567   ins_cost(VOLATILE_REF_COST);
7568   format %{ "ldarh  $dst, $mem\t# short" %}
7569 
7570   ins_encode(aarch64_enc_ldarsh(dst, mem));
7571 
7572   ins_pipe(pipe_serial);
7573 %}
7574 
7575 // Load Integer (32 bit signed)
7576 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7577 %{
7578   match(Set dst (LoadI mem));
7579 
7580   ins_cost(VOLATILE_REF_COST);
7581   format %{ "ldarw  $dst, $mem\t# int" %}
7582 
7583   ins_encode(aarch64_enc_ldarw(dst, mem));
7584 
7585   ins_pipe(pipe_serial);
7586 %}
7587 
7588 // Load Integer (32 bit unsigned) into long
7589 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7590 %{
7591   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7592 
7593   ins_cost(VOLATILE_REF_COST);
7594   format %{ "ldarw  $dst, $mem\t# int" %}
7595 
7596   ins_encode(aarch64_enc_ldarw(dst, mem));
7597 
7598   ins_pipe(pipe_serial);
7599 %}
7600 
7601 // Load Long (64 bit signed)
7602 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7603 %{
7604   match(Set dst (LoadL mem));
7605 
7606   ins_cost(VOLATILE_REF_COST);
7607   format %{ "ldar  $dst, $mem\t# int" %}
7608 
7609   ins_encode(aarch64_enc_ldar(dst, mem));
7610 
7611   ins_pipe(pipe_serial);
7612 %}
7613 
7614 // Load Pointer
7615 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7616 %{
7617   match(Set dst (LoadP mem));
7618 
7619   ins_cost(VOLATILE_REF_COST);
7620   format %{ "ldar  $dst, $mem\t# ptr" %}
7621 
7622   ins_encode(aarch64_enc_ldar(dst, mem));
7623 
7624   ins_pipe(pipe_serial);
7625 %}
7626 
7627 // Load Compressed Pointer
7628 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7629 %{
7630   match(Set dst (LoadN mem));
7631 
7632   ins_cost(VOLATILE_REF_COST);
7633   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7634 
7635   ins_encode(aarch64_enc_ldarw(dst, mem));
7636 
7637   ins_pipe(pipe_serial);
7638 %}
7639 
7640 // Load Float
7641 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7642 %{
7643   match(Set dst (LoadF mem));
7644 
7645   ins_cost(VOLATILE_REF_COST);
7646   format %{ "ldars  $dst, $mem\t# float" %}
7647 
7648   ins_encode( aarch64_enc_fldars(dst, mem) );
7649 
7650   ins_pipe(pipe_serial);
7651 %}
7652 
7653 // Load Double
7654 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7655 %{
7656   match(Set dst (LoadD mem));
7657 
7658   ins_cost(VOLATILE_REF_COST);
7659   format %{ "ldard  $dst, $mem\t# double" %}
7660 
7661   ins_encode( aarch64_enc_fldard(dst, mem) );
7662 
7663   ins_pipe(pipe_serial);
7664 %}
7665 
7666 // Store Byte
7667 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7668 %{
7669   match(Set mem (StoreB mem src));
7670 
7671   ins_cost(VOLATILE_REF_COST);
7672   format %{ "stlrb  $src, $mem\t# byte" %}
7673 
7674   ins_encode(aarch64_enc_stlrb(src, mem));
7675 
7676   ins_pipe(pipe_class_memory);
7677 %}
7678 
7679 // Store Char/Short
7680 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7681 %{
7682   match(Set mem (StoreC mem src));
7683 
7684   ins_cost(VOLATILE_REF_COST);
7685   format %{ "stlrh  $src, $mem\t# short" %}
7686 
7687   ins_encode(aarch64_enc_stlrh(src, mem));
7688 
7689   ins_pipe(pipe_class_memory);
7690 %}
7691 
7692 // Store Integer
7693 
7694 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7695 %{
7696   match(Set mem(StoreI mem src));
7697 
7698   ins_cost(VOLATILE_REF_COST);
7699   format %{ "stlrw  $src, $mem\t# int" %}
7700 
7701   ins_encode(aarch64_enc_stlrw(src, mem));
7702 
7703   ins_pipe(pipe_class_memory);
7704 %}
7705 
7706 // Store Long (64 bit signed)
7707 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7708 %{
7709   match(Set mem (StoreL mem src));
7710 
7711   ins_cost(VOLATILE_REF_COST);
7712   format %{ "stlr  $src, $mem\t# int" %}
7713 
7714   ins_encode(aarch64_enc_stlr(src, mem));
7715 
7716   ins_pipe(pipe_class_memory);
7717 %}
7718 
7719 // Store Pointer
7720 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7721 %{
7722   match(Set mem (StoreP mem src));
7723 
7724   ins_cost(VOLATILE_REF_COST);
7725   format %{ "stlr  $src, $mem\t# ptr" %}
7726 
7727   ins_encode(aarch64_enc_stlr(src, mem));
7728 
7729   ins_pipe(pipe_class_memory);
7730 %}
7731 
7732 // Store Compressed Pointer
7733 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7734 %{
7735   match(Set mem (StoreN mem src));
7736 
7737   ins_cost(VOLATILE_REF_COST);
7738   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7739 
7740   ins_encode(aarch64_enc_stlrw(src, mem));
7741 
7742   ins_pipe(pipe_class_memory);
7743 %}
7744 
7745 // Store Float
7746 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7747 %{
7748   match(Set mem (StoreF mem src));
7749 
7750   ins_cost(VOLATILE_REF_COST);
7751   format %{ "stlrs  $src, $mem\t# float" %}
7752 
7753   ins_encode( aarch64_enc_fstlrs(src, mem) );
7754 
7755   ins_pipe(pipe_class_memory);
7756 %}
7757 
7758 // TODO
7759 // implement storeImmF0 and storeFImmPacked
7760 
7761 // Store Double
7762 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7763 %{
7764   match(Set mem (StoreD mem src));
7765 
7766   ins_cost(VOLATILE_REF_COST);
7767   format %{ "stlrd  $src, $mem\t# double" %}
7768 
7769   ins_encode( aarch64_enc_fstlrd(src, mem) );
7770 
7771   ins_pipe(pipe_class_memory);
7772 %}
7773 
7774 //  ---------------- end of volatile loads and stores ----------------
7775 
7776 instruct cacheWB(indirect addr)
7777 %{
7778   predicate(VM_Version::supports_data_cache_line_flush());
7779   match(CacheWB addr);
7780 
7781   ins_cost(100);
7782   format %{"cache wb $addr" %}
7783   ins_encode %{
7784     assert($addr->index_position() < 0, "should be");
7785     assert($addr$$disp == 0, "should be");
7786     __ cache_wb(Address($addr$$base$$Register, 0));
7787   %}
7788   ins_pipe(pipe_slow); // XXX
7789 %}
7790 
7791 instruct cacheWBPreSync()
7792 %{
7793   predicate(VM_Version::supports_data_cache_line_flush());
7794   match(CacheWBPreSync);
7795 
7796   ins_cost(100);
7797   format %{"cache wb presync" %}
7798   ins_encode %{
7799     __ cache_wbsync(true);
7800   %}
7801   ins_pipe(pipe_slow); // XXX
7802 %}
7803 
7804 instruct cacheWBPostSync()
7805 %{
7806   predicate(VM_Version::supports_data_cache_line_flush());
7807   match(CacheWBPostSync);
7808 
7809   ins_cost(100);
7810   format %{"cache wb postsync" %}
7811   ins_encode %{
7812     __ cache_wbsync(false);
7813   %}
7814   ins_pipe(pipe_slow); // XXX
7815 %}
7816 
7817 // ============================================================================
7818 // BSWAP Instructions
7819 
7820 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7821   match(Set dst (ReverseBytesI src));
7822 
7823   ins_cost(INSN_COST);
7824   format %{ "revw  $dst, $src" %}
7825 
7826   ins_encode %{
7827     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7828   %}
7829 
7830   ins_pipe(ialu_reg);
7831 %}
7832 
7833 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7834   match(Set dst (ReverseBytesL src));
7835 
7836   ins_cost(INSN_COST);
7837   format %{ "rev  $dst, $src" %}
7838 
7839   ins_encode %{
7840     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7841   %}
7842 
7843   ins_pipe(ialu_reg);
7844 %}
7845 
7846 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7847   match(Set dst (ReverseBytesUS src));
7848 
7849   ins_cost(INSN_COST);
7850   format %{ "rev16w  $dst, $src" %}
7851 
7852   ins_encode %{
7853     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7854   %}
7855 
7856   ins_pipe(ialu_reg);
7857 %}
7858 
7859 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7860   match(Set dst (ReverseBytesS src));
7861 
7862   ins_cost(INSN_COST);
7863   format %{ "rev16w  $dst, $src\n\t"
7864             "sbfmw $dst, $dst, #0, #15" %}
7865 
7866   ins_encode %{
7867     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7868     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7869   %}
7870 
7871   ins_pipe(ialu_reg);
7872 %}
7873 
7874 // ============================================================================
7875 // Zero Count Instructions
7876 
7877 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7878   match(Set dst (CountLeadingZerosI src));
7879 
7880   ins_cost(INSN_COST);
7881   format %{ "clzw  $dst, $src" %}
7882   ins_encode %{
7883     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7884   %}
7885 
7886   ins_pipe(ialu_reg);
7887 %}
7888 
7889 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7890   match(Set dst (CountLeadingZerosL src));
7891 
7892   ins_cost(INSN_COST);
7893   format %{ "clz   $dst, $src" %}
7894   ins_encode %{
7895     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7896   %}
7897 
7898   ins_pipe(ialu_reg);
7899 %}
7900 
7901 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7902   match(Set dst (CountTrailingZerosI src));
7903 
7904   ins_cost(INSN_COST * 2);
7905   format %{ "rbitw  $dst, $src\n\t"
7906             "clzw   $dst, $dst" %}
7907   ins_encode %{
7908     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7909     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7910   %}
7911 
7912   ins_pipe(ialu_reg);
7913 %}
7914 
7915 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7916   match(Set dst (CountTrailingZerosL src));
7917 
7918   ins_cost(INSN_COST * 2);
7919   format %{ "rbit   $dst, $src\n\t"
7920             "clz    $dst, $dst" %}
7921   ins_encode %{
7922     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7923     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7924   %}
7925 
7926   ins_pipe(ialu_reg);
7927 %}
7928 
7929 //---------- Population Count Instructions -------------------------------------
7930 //
7931 
7932 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7933   predicate(UsePopCountInstruction);
7934   match(Set dst (PopCountI src));
7935   effect(TEMP tmp);
7936   ins_cost(INSN_COST * 13);
7937 
7938   format %{ "movw   $src, $src\n\t"
7939             "mov    $tmp, $src\t# vector (1D)\n\t"
7940             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7941             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7942             "mov    $dst, $tmp\t# vector (1D)" %}
7943   ins_encode %{
7944     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7945     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7946     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7947     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7948     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7949   %}
7950 
7951   ins_pipe(pipe_class_default);
7952 %}
7953 
7954 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7955   predicate(UsePopCountInstruction);
7956   match(Set dst (PopCountI (LoadI mem)));
7957   effect(TEMP tmp);
7958   ins_cost(INSN_COST * 13);
7959 
7960   format %{ "ldrs   $tmp, $mem\n\t"
7961             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7962             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7963             "mov    $dst, $tmp\t# vector (1D)" %}
7964   ins_encode %{
7965     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7966     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7968     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7969     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7970     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7971   %}
7972 
7973   ins_pipe(pipe_class_default);
7974 %}
7975 
7976 // Note: Long.bitCount(long) returns an int.
7977 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7978   predicate(UsePopCountInstruction);
7979   match(Set dst (PopCountL src));
7980   effect(TEMP tmp);
7981   ins_cost(INSN_COST * 13);
7982 
7983   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7984             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7985             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7986             "mov    $dst, $tmp\t# vector (1D)" %}
7987   ins_encode %{
7988     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7989     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7990     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7991     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7992   %}
7993 
7994   ins_pipe(pipe_class_default);
7995 %}
7996 
7997 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7998   predicate(UsePopCountInstruction);
7999   match(Set dst (PopCountL (LoadL mem)));
8000   effect(TEMP tmp);
8001   ins_cost(INSN_COST * 13);
8002 
8003   format %{ "ldrd   $tmp, $mem\n\t"
8004             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8005             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8006             "mov    $dst, $tmp\t# vector (1D)" %}
8007   ins_encode %{
8008     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8009     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8010                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8011     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8012     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8013     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8014   %}
8015 
8016   ins_pipe(pipe_class_default);
8017 %}
8018 
8019 // ============================================================================
8020 // MemBar Instruction
8021 
8022 instruct load_fence() %{
8023   match(LoadFence);
8024   ins_cost(VOLATILE_REF_COST);
8025 
8026   format %{ "load_fence" %}
8027 
8028   ins_encode %{
8029     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8030   %}
8031   ins_pipe(pipe_serial);
8032 %}
8033 
8034 instruct unnecessary_membar_acquire() %{
8035   predicate(unnecessary_acquire(n));
8036   match(MemBarAcquire);
8037   ins_cost(0);
8038 
8039   format %{ "membar_acquire (elided)" %}
8040 
8041   ins_encode %{
8042     __ block_comment("membar_acquire (elided)");
8043   %}
8044 
8045   ins_pipe(pipe_class_empty);
8046 %}
8047 
8048 instruct membar_acquire() %{
8049   match(MemBarAcquire);
8050   ins_cost(VOLATILE_REF_COST);
8051 
8052   format %{ "membar_acquire\n\t"
8053             "dmb ish" %}
8054 
8055   ins_encode %{
8056     __ block_comment("membar_acquire");
8057     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8058   %}
8059 
8060   ins_pipe(pipe_serial);
8061 %}
8062 
8063 
8064 instruct membar_acquire_lock() %{
8065   match(MemBarAcquireLock);
8066   ins_cost(VOLATILE_REF_COST);
8067 
8068   format %{ "membar_acquire_lock (elided)" %}
8069 
8070   ins_encode %{
8071     __ block_comment("membar_acquire_lock (elided)");
8072   %}
8073 
8074   ins_pipe(pipe_serial);
8075 %}
8076 
8077 instruct store_fence() %{
8078   match(StoreFence);
8079   ins_cost(VOLATILE_REF_COST);
8080 
8081   format %{ "store_fence" %}
8082 
8083   ins_encode %{
8084     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8085   %}
8086   ins_pipe(pipe_serial);
8087 %}
8088 
8089 instruct unnecessary_membar_release() %{
8090   predicate(unnecessary_release(n));
8091   match(MemBarRelease);
8092   ins_cost(0);
8093 
8094   format %{ "membar_release (elided)" %}
8095 
8096   ins_encode %{
8097     __ block_comment("membar_release (elided)");
8098   %}
8099   ins_pipe(pipe_serial);
8100 %}
8101 
8102 instruct membar_release() %{
8103   match(MemBarRelease);
8104   ins_cost(VOLATILE_REF_COST);
8105 
8106   format %{ "membar_release\n\t"
8107             "dmb ish" %}
8108 
8109   ins_encode %{
8110     __ block_comment("membar_release");
8111     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8112   %}
8113   ins_pipe(pipe_serial);
8114 %}
8115 
8116 instruct membar_storestore() %{
8117   match(MemBarStoreStore);
8118   ins_cost(VOLATILE_REF_COST);
8119 
8120   format %{ "MEMBAR-store-store" %}
8121 
8122   ins_encode %{
8123     __ membar(Assembler::StoreStore);
8124   %}
8125   ins_pipe(pipe_serial);
8126 %}
8127 
8128 instruct membar_release_lock() %{
8129   match(MemBarReleaseLock);
8130   ins_cost(VOLATILE_REF_COST);
8131 
8132   format %{ "membar_release_lock (elided)" %}
8133 
8134   ins_encode %{
8135     __ block_comment("membar_release_lock (elided)");
8136   %}
8137 
8138   ins_pipe(pipe_serial);
8139 %}
8140 
8141 instruct unnecessary_membar_volatile() %{
8142   predicate(unnecessary_volatile(n));
8143   match(MemBarVolatile);
8144   ins_cost(0);
8145 
8146   format %{ "membar_volatile (elided)" %}
8147 
8148   ins_encode %{
8149     __ block_comment("membar_volatile (elided)");
8150   %}
8151 
8152   ins_pipe(pipe_serial);
8153 %}
8154 
8155 instruct membar_volatile() %{
8156   match(MemBarVolatile);
8157   ins_cost(VOLATILE_REF_COST*100);
8158 
8159   format %{ "membar_volatile\n\t"
8160              "dmb ish"%}
8161 
8162   ins_encode %{
8163     __ block_comment("membar_volatile");
8164     __ membar(Assembler::StoreLoad);
8165   %}
8166 
8167   ins_pipe(pipe_serial);
8168 %}
8169 
8170 // ============================================================================
8171 // Cast/Convert Instructions
8172 
8173 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8174   match(Set dst (CastX2P src));
8175 
8176   ins_cost(INSN_COST);
8177   format %{ "mov $dst, $src\t# long -> ptr" %}
8178 
8179   ins_encode %{
8180     if ($dst$$reg != $src$$reg) {
8181       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8182     }
8183   %}
8184 
8185   ins_pipe(ialu_reg);
8186 %}
8187 
8188 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8189   match(Set dst (CastP2X src));
8190 
8191   ins_cost(INSN_COST);
8192   format %{ "mov $dst, $src\t# ptr -> long" %}
8193 
8194   ins_encode %{
8195     if ($dst$$reg != $src$$reg) {
8196       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8197     }
8198   %}
8199 
8200   ins_pipe(ialu_reg);
8201 %}
8202 
8203 // Convert oop into int for vectors alignment masking
8204 instruct convP2I(iRegINoSp dst, iRegP src) %{
8205   match(Set dst (ConvL2I (CastP2X src)));
8206 
8207   ins_cost(INSN_COST);
8208   format %{ "movw $dst, $src\t# ptr -> int" %}
8209   ins_encode %{
8210     __ movw($dst$$Register, $src$$Register);
8211   %}
8212 
8213   ins_pipe(ialu_reg);
8214 %}
8215 
8216 // Convert compressed oop into int for vectors alignment masking
8217 // in case of 32bit oops (heap < 4Gb).
8218 instruct convN2I(iRegINoSp dst, iRegN src)
8219 %{
8220   predicate(CompressedOops::shift() == 0);
8221   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8222 
8223   ins_cost(INSN_COST);
8224   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8225   ins_encode %{
8226     __ movw($dst$$Register, $src$$Register);
8227   %}
8228 
8229   ins_pipe(ialu_reg);
8230 %}
8231 
8232 
8233 // Convert oop pointer into compressed form
8234 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8235   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8236   match(Set dst (EncodeP src));
8237   effect(KILL cr);
8238   ins_cost(INSN_COST * 3);
8239   format %{ "encode_heap_oop $dst, $src" %}
8240   ins_encode %{
8241     Register s = $src$$Register;
8242     Register d = $dst$$Register;
8243     __ encode_heap_oop(d, s);
8244   %}
8245   ins_pipe(ialu_reg);
8246 %}
8247 
8248 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8249   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8250   match(Set dst (EncodeP src));
8251   ins_cost(INSN_COST * 3);
8252   format %{ "encode_heap_oop_not_null $dst, $src" %}
8253   ins_encode %{
8254     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8255   %}
8256   ins_pipe(ialu_reg);
8257 %}
8258 
8259 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8260   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8261             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8262   match(Set dst (DecodeN src));
8263   ins_cost(INSN_COST * 3);
8264   format %{ "decode_heap_oop $dst, $src" %}
8265   ins_encode %{
8266     Register s = $src$$Register;
8267     Register d = $dst$$Register;
8268     __ decode_heap_oop(d, s);
8269   %}
8270   ins_pipe(ialu_reg);
8271 %}
8272 
8273 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8274   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8275             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8276   match(Set dst (DecodeN src));
8277   ins_cost(INSN_COST * 3);
8278   format %{ "decode_heap_oop_not_null $dst, $src" %}
8279   ins_encode %{
8280     Register s = $src$$Register;
8281     Register d = $dst$$Register;
8282     __ decode_heap_oop_not_null(d, s);
8283   %}
8284   ins_pipe(ialu_reg);
8285 %}
8286 
8287 // n.b. AArch64 implementations of encode_klass_not_null and
8288 // decode_klass_not_null do not modify the flags register so, unlike
8289 // Intel, we don't kill CR as a side effect here
8290 
8291 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8292   match(Set dst (EncodePKlass src));
8293 
8294   ins_cost(INSN_COST * 3);
8295   format %{ "encode_klass_not_null $dst,$src" %}
8296 
8297   ins_encode %{
8298     Register src_reg = as_Register($src$$reg);
8299     Register dst_reg = as_Register($dst$$reg);
8300     __ encode_klass_not_null(dst_reg, src_reg);
8301   %}
8302 
8303    ins_pipe(ialu_reg);
8304 %}
8305 
8306 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8307   match(Set dst (DecodeNKlass src));
8308 
8309   ins_cost(INSN_COST * 3);
8310   format %{ "decode_klass_not_null $dst,$src" %}
8311 
8312   ins_encode %{
8313     Register src_reg = as_Register($src$$reg);
8314     Register dst_reg = as_Register($dst$$reg);
8315     if (dst_reg != src_reg) {
8316       __ decode_klass_not_null(dst_reg, src_reg);
8317     } else {
8318       __ decode_klass_not_null(dst_reg);
8319     }
8320   %}
8321 
8322    ins_pipe(ialu_reg);
8323 %}
8324 
8325 instruct checkCastPP(iRegPNoSp dst)
8326 %{
8327   match(Set dst (CheckCastPP dst));
8328 
8329   size(0);
8330   format %{ "# checkcastPP of $dst" %}
8331   ins_encode(/* empty encoding */);
8332   ins_pipe(pipe_class_empty);
8333 %}
8334 
8335 instruct castPP(iRegPNoSp dst)
8336 %{
8337   match(Set dst (CastPP dst));
8338 
8339   size(0);
8340   format %{ "# castPP of $dst" %}
8341   ins_encode(/* empty encoding */);
8342   ins_pipe(pipe_class_empty);
8343 %}
8344 
8345 instruct castII(iRegI dst)
8346 %{
8347   match(Set dst (CastII dst));
8348 
8349   size(0);
8350   format %{ "# castII of $dst" %}
8351   ins_encode(/* empty encoding */);
8352   ins_cost(0);
8353   ins_pipe(pipe_class_empty);
8354 %}
8355 
8356 // ============================================================================
8357 // Atomic operation instructions
8358 //
8359 // Intel and SPARC both implement Ideal Node LoadPLocked and
8360 // Store{PIL}Conditional instructions using a normal load for the
8361 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8362 //
8363 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8364 // pair to lock object allocations from Eden space when not using
8365 // TLABs.
8366 //
8367 // There does not appear to be a Load{IL}Locked Ideal Node and the
8368 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8369 // and to use StoreIConditional only for 32-bit and StoreLConditional
8370 // only for 64-bit.
8371 //
8372 // We implement LoadPLocked and StorePLocked instructions using,
8373 // respectively the AArch64 hw load-exclusive and store-conditional
8374 // instructions. Whereas we must implement each of
8375 // Store{IL}Conditional using a CAS which employs a pair of
8376 // instructions comprising a load-exclusive followed by a
8377 // store-conditional.
8378 
8379 
8380 // Locked-load (linked load) of the current heap-top
8381 // used when updating the eden heap top
8382 // implemented using ldaxr on AArch64
8383 
8384 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8385 %{
8386   match(Set dst (LoadPLocked mem));
8387 
8388   ins_cost(VOLATILE_REF_COST);
8389 
8390   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8391 
8392   ins_encode(aarch64_enc_ldaxr(dst, mem));
8393 
8394   ins_pipe(pipe_serial);
8395 %}
8396 
8397 // Conditional-store of the updated heap-top.
8398 // Used during allocation of the shared heap.
8399 // Sets flag (EQ) on success.
8400 // implemented using stlxr on AArch64.
8401 
8402 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8403 %{
8404   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8405 
8406   ins_cost(VOLATILE_REF_COST);
8407 
8408  // TODO
8409  // do we need to do a store-conditional release or can we just use a
8410  // plain store-conditional?
8411 
8412   format %{
8413     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8414     "cmpw rscratch1, zr\t# EQ on successful write"
8415   %}
8416 
8417   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8418 
8419   ins_pipe(pipe_serial);
8420 %}
8421 
8422 
8423 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8424 // when attempting to rebias a lock towards the current thread.  We
8425 // must use the acquire form of cmpxchg in order to guarantee acquire
8426 // semantics in this case.
8427 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8428 %{
8429   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8430 
8431   ins_cost(VOLATILE_REF_COST);
8432 
8433   format %{
8434     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8435     "cmpw rscratch1, zr\t# EQ on successful write"
8436   %}
8437 
8438   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8439 
8440   ins_pipe(pipe_slow);
8441 %}
8442 
8443 // storeIConditional also has acquire semantics, for no better reason
8444 // than matching storeLConditional.  At the time of writing this
8445 // comment storeIConditional was not used anywhere by AArch64.
8446 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8447 %{
8448   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8449 
8450   ins_cost(VOLATILE_REF_COST);
8451 
8452   format %{
8453     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8454     "cmpw rscratch1, zr\t# EQ on successful write"
8455   %}
8456 
8457   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8458 
8459   ins_pipe(pipe_slow);
8460 %}
8461 
8462 // standard CompareAndSwapX when we are using barriers
8463 // these have higher priority than the rules selected by a predicate
8464 
8465 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8466 // can't match them
8467 
8468 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8469 
8470   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8471   ins_cost(2 * VOLATILE_REF_COST);
8472 
8473   effect(KILL cr);
8474 
8475   format %{
8476     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8477     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8478   %}
8479 
8480   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8481             aarch64_enc_cset_eq(res));
8482 
8483   ins_pipe(pipe_slow);
8484 %}
8485 
8486 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8487 
8488   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8489   ins_cost(2 * VOLATILE_REF_COST);
8490 
8491   effect(KILL cr);
8492 
8493   format %{
8494     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8495     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8496   %}
8497 
8498   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8499             aarch64_enc_cset_eq(res));
8500 
8501   ins_pipe(pipe_slow);
8502 %}
8503 
8504 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8505 
8506   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8507   ins_cost(2 * VOLATILE_REF_COST);
8508 
8509   effect(KILL cr);
8510 
8511  format %{
8512     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8513     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8514  %}
8515 
8516  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8517             aarch64_enc_cset_eq(res));
8518 
8519   ins_pipe(pipe_slow);
8520 %}
8521 
8522 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8523 
8524   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8525   ins_cost(2 * VOLATILE_REF_COST);
8526 
8527   effect(KILL cr);
8528 
8529  format %{
8530     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8531     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8532  %}
8533 
8534  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8535             aarch64_enc_cset_eq(res));
8536 
8537   ins_pipe(pipe_slow);
8538 %}
8539 
8540 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8541 
8542   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8543   ins_cost(2 * VOLATILE_REF_COST);
8544 
8545   effect(KILL cr);
8546 
8547  format %{
8548     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8549     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8550  %}
8551 
8552  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8553             aarch64_enc_cset_eq(res));
8554 
8555   ins_pipe(pipe_slow);
8556 %}
8557 
8558 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8559 
8560   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8561   ins_cost(2 * VOLATILE_REF_COST);
8562 
8563   effect(KILL cr);
8564 
8565  format %{
8566     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8567     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8568  %}
8569 
8570  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8571             aarch64_enc_cset_eq(res));
8572 
8573   ins_pipe(pipe_slow);
8574 %}
8575 
8576 // alternative CompareAndSwapX when we are eliding barriers
8577 
8578 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8579 
8580   predicate(needs_acquiring_load_exclusive(n));
8581   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8582   ins_cost(VOLATILE_REF_COST);
8583 
8584   effect(KILL cr);
8585 
8586   format %{
8587     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8588     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8589   %}
8590 
8591   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8592             aarch64_enc_cset_eq(res));
8593 
8594   ins_pipe(pipe_slow);
8595 %}
8596 
8597 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8598 
8599   predicate(needs_acquiring_load_exclusive(n));
8600   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8601   ins_cost(VOLATILE_REF_COST);
8602 
8603   effect(KILL cr);
8604 
8605   format %{
8606     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8607     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8608   %}
8609 
8610   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8611             aarch64_enc_cset_eq(res));
8612 
8613   ins_pipe(pipe_slow);
8614 %}
8615 
8616 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8617 
8618   predicate(needs_acquiring_load_exclusive(n));
8619   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8620   ins_cost(VOLATILE_REF_COST);
8621 
8622   effect(KILL cr);
8623 
8624  format %{
8625     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8626     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8627  %}
8628 
8629  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8630             aarch64_enc_cset_eq(res));
8631 
8632   ins_pipe(pipe_slow);
8633 %}
8634 
8635 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8636 
8637   predicate(needs_acquiring_load_exclusive(n));
8638   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8639   ins_cost(VOLATILE_REF_COST);
8640 
8641   effect(KILL cr);
8642 
8643  format %{
8644     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8645     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8646  %}
8647 
8648  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8649             aarch64_enc_cset_eq(res));
8650 
8651   ins_pipe(pipe_slow);
8652 %}
8653 
8654 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8655 
8656   predicate(needs_acquiring_load_exclusive(n));
8657   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8658   ins_cost(VOLATILE_REF_COST);
8659 
8660   effect(KILL cr);
8661 
8662  format %{
8663     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8664     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8665  %}
8666 
8667  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8668             aarch64_enc_cset_eq(res));
8669 
8670   ins_pipe(pipe_slow);
8671 %}
8672 
8673 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8674 
8675   predicate(needs_acquiring_load_exclusive(n));
8676   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8677   ins_cost(VOLATILE_REF_COST);
8678 
8679   effect(KILL cr);
8680 
8681  format %{
8682     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8683     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8684  %}
8685 
8686  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8687             aarch64_enc_cset_eq(res));
8688 
8689   ins_pipe(pipe_slow);
8690 %}
8691 
8692 
8693 // ---------------------------------------------------------------------
8694 
8695 
8696 // BEGIN This section of the file is automatically generated. Do not edit --------------
8697 
8698 // Sundry CAS operations.  Note that release is always true,
8699 // regardless of the memory ordering of the CAS.  This is because we
8700 // need the volatile case to be sequentially consistent but there is
8701 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8702 // can't check the type of memory ordering here, so we always emit a
8703 // STLXR.
8704 
8705 // This section is generated from aarch64_ad_cas.m4
8706 
8707 
8708 
8709 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8710   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8711   ins_cost(2 * VOLATILE_REF_COST);
8712   effect(TEMP_DEF res, KILL cr);
8713   format %{
8714     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8715   %}
8716   ins_encode %{
8717     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8718                Assembler::byte, /*acquire*/ false, /*release*/ true,
8719                /*weak*/ false, $res$$Register);
8720     __ sxtbw($res$$Register, $res$$Register);
8721   %}
8722   ins_pipe(pipe_slow);
8723 %}
8724 
8725 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8726   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8727   ins_cost(2 * VOLATILE_REF_COST);
8728   effect(TEMP_DEF res, KILL cr);
8729   format %{
8730     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8731   %}
8732   ins_encode %{
8733     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8734                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8735                /*weak*/ false, $res$$Register);
8736     __ sxthw($res$$Register, $res$$Register);
8737   %}
8738   ins_pipe(pipe_slow);
8739 %}
8740 
8741 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8742   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8743   ins_cost(2 * VOLATILE_REF_COST);
8744   effect(TEMP_DEF res, KILL cr);
8745   format %{
8746     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8747   %}
8748   ins_encode %{
8749     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8750                Assembler::word, /*acquire*/ false, /*release*/ true,
8751                /*weak*/ false, $res$$Register);
8752   %}
8753   ins_pipe(pipe_slow);
8754 %}
8755 
8756 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8757   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8758   ins_cost(2 * VOLATILE_REF_COST);
8759   effect(TEMP_DEF res, KILL cr);
8760   format %{
8761     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8762   %}
8763   ins_encode %{
8764     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8765                Assembler::xword, /*acquire*/ false, /*release*/ true,
8766                /*weak*/ false, $res$$Register);
8767   %}
8768   ins_pipe(pipe_slow);
8769 %}
8770 
8771 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8772   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8773   ins_cost(2 * VOLATILE_REF_COST);
8774   effect(TEMP_DEF res, KILL cr);
8775   format %{
8776     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8777   %}
8778   ins_encode %{
8779     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8780                Assembler::word, /*acquire*/ false, /*release*/ true,
8781                /*weak*/ false, $res$$Register);
8782   %}
8783   ins_pipe(pipe_slow);
8784 %}
8785 
8786 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8787   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8788   ins_cost(2 * VOLATILE_REF_COST);
8789   effect(TEMP_DEF res, KILL cr);
8790   format %{
8791     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8792   %}
8793   ins_encode %{
8794     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8795                Assembler::xword, /*acquire*/ false, /*release*/ true,
8796                /*weak*/ false, $res$$Register);
8797   %}
8798   ins_pipe(pipe_slow);
8799 %}
8800 
8801 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8802   predicate(needs_acquiring_load_exclusive(n));
8803   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8804   ins_cost(VOLATILE_REF_COST);
8805   effect(TEMP_DEF res, KILL cr);
8806   format %{
8807     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8808   %}
8809   ins_encode %{
8810     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8811                Assembler::byte, /*acquire*/ true, /*release*/ true,
8812                /*weak*/ false, $res$$Register);
8813     __ sxtbw($res$$Register, $res$$Register);
8814   %}
8815   ins_pipe(pipe_slow);
8816 %}
8817 
8818 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8819   predicate(needs_acquiring_load_exclusive(n));
8820   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8821   ins_cost(VOLATILE_REF_COST);
8822   effect(TEMP_DEF res, KILL cr);
8823   format %{
8824     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8825   %}
8826   ins_encode %{
8827     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8828                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8829                /*weak*/ false, $res$$Register);
8830     __ sxthw($res$$Register, $res$$Register);
8831   %}
8832   ins_pipe(pipe_slow);
8833 %}
8834 
8835 
8836 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8837   predicate(needs_acquiring_load_exclusive(n));
8838   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8839   ins_cost(VOLATILE_REF_COST);
8840   effect(TEMP_DEF res, KILL cr);
8841   format %{
8842     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8843   %}
8844   ins_encode %{
8845     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8846                Assembler::word, /*acquire*/ true, /*release*/ true,
8847                /*weak*/ false, $res$$Register);
8848   %}
8849   ins_pipe(pipe_slow);
8850 %}
8851 
8852 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8853   predicate(needs_acquiring_load_exclusive(n));
8854   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8855   ins_cost(VOLATILE_REF_COST);
8856   effect(TEMP_DEF res, KILL cr);
8857   format %{
8858     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8859   %}
8860   ins_encode %{
8861     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8862                Assembler::xword, /*acquire*/ true, /*release*/ true,
8863                /*weak*/ false, $res$$Register);
8864   %}
8865   ins_pipe(pipe_slow);
8866 %}
8867 
8868 
8869 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8870   predicate(needs_acquiring_load_exclusive(n));
8871   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8872   ins_cost(VOLATILE_REF_COST);
8873   effect(TEMP_DEF res, KILL cr);
8874   format %{
8875     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8876   %}
8877   ins_encode %{
8878     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8879                Assembler::word, /*acquire*/ true, /*release*/ true,
8880                /*weak*/ false, $res$$Register);
8881   %}
8882   ins_pipe(pipe_slow);
8883 %}
8884 
8885 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8886   predicate(needs_acquiring_load_exclusive(n));
8887   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8888   ins_cost(VOLATILE_REF_COST);
8889   effect(TEMP_DEF res, KILL cr);
8890   format %{
8891     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8892   %}
8893   ins_encode %{
8894     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8895                Assembler::xword, /*acquire*/ true, /*release*/ true,
8896                /*weak*/ false, $res$$Register);
8897   %}
8898   ins_pipe(pipe_slow);
8899 %}
8900 
8901 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8902   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8903   ins_cost(2 * VOLATILE_REF_COST);
8904   effect(KILL cr);
8905   format %{
8906     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8907     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8908   %}
8909   ins_encode %{
8910     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8911                Assembler::byte, /*acquire*/ false, /*release*/ true,
8912                /*weak*/ true, noreg);
8913     __ csetw($res$$Register, Assembler::EQ);
8914   %}
8915   ins_pipe(pipe_slow);
8916 %}
8917 
8918 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8919   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8920   ins_cost(2 * VOLATILE_REF_COST);
8921   effect(KILL cr);
8922   format %{
8923     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8924     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8925   %}
8926   ins_encode %{
8927     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8928                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8929                /*weak*/ true, noreg);
8930     __ csetw($res$$Register, Assembler::EQ);
8931   %}
8932   ins_pipe(pipe_slow);
8933 %}
8934 
8935 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8936   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8937   ins_cost(2 * VOLATILE_REF_COST);
8938   effect(KILL cr);
8939   format %{
8940     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8941     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8942   %}
8943   ins_encode %{
8944     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8945                Assembler::word, /*acquire*/ false, /*release*/ true,
8946                /*weak*/ true, noreg);
8947     __ csetw($res$$Register, Assembler::EQ);
8948   %}
8949   ins_pipe(pipe_slow);
8950 %}
8951 
8952 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8953   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8954   ins_cost(2 * VOLATILE_REF_COST);
8955   effect(KILL cr);
8956   format %{
8957     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8958     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8959   %}
8960   ins_encode %{
8961     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8962                Assembler::xword, /*acquire*/ false, /*release*/ true,
8963                /*weak*/ true, noreg);
8964     __ csetw($res$$Register, Assembler::EQ);
8965   %}
8966   ins_pipe(pipe_slow);
8967 %}
8968 
8969 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8970   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8971   ins_cost(2 * VOLATILE_REF_COST);
8972   effect(KILL cr);
8973   format %{
8974     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8975     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8976   %}
8977   ins_encode %{
8978     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8979                Assembler::word, /*acquire*/ false, /*release*/ true,
8980                /*weak*/ true, noreg);
8981     __ csetw($res$$Register, Assembler::EQ);
8982   %}
8983   ins_pipe(pipe_slow);
8984 %}
8985 
8986 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8987   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8988   ins_cost(2 * VOLATILE_REF_COST);
8989   effect(KILL cr);
8990   format %{
8991     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8992     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8993   %}
8994   ins_encode %{
8995     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8996                Assembler::xword, /*acquire*/ false, /*release*/ true,
8997                /*weak*/ true, noreg);
8998     __ csetw($res$$Register, Assembler::EQ);
8999   %}
9000   ins_pipe(pipe_slow);
9001 %}
9002 
9003 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9004   predicate(needs_acquiring_load_exclusive(n));
9005   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9006   ins_cost(VOLATILE_REF_COST);
9007   effect(KILL cr);
9008   format %{
9009     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9010     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9011   %}
9012   ins_encode %{
9013     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9014                Assembler::byte, /*acquire*/ true, /*release*/ true,
9015                /*weak*/ true, noreg);
9016     __ csetw($res$$Register, Assembler::EQ);
9017   %}
9018   ins_pipe(pipe_slow);
9019 %}
9020 
9021 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9022   predicate(needs_acquiring_load_exclusive(n));
9023   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9024   ins_cost(VOLATILE_REF_COST);
9025   effect(KILL cr);
9026   format %{
9027     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9028     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9029   %}
9030   ins_encode %{
9031     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9032                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9033                /*weak*/ true, noreg);
9034     __ csetw($res$$Register, Assembler::EQ);
9035   %}
9036   ins_pipe(pipe_slow);
9037 %}
9038 
9039 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9040   predicate(needs_acquiring_load_exclusive(n));
9041   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9042   ins_cost(VOLATILE_REF_COST);
9043   effect(KILL cr);
9044   format %{
9045     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9046     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9047   %}
9048   ins_encode %{
9049     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9050                Assembler::word, /*acquire*/ true, /*release*/ true,
9051                /*weak*/ true, noreg);
9052     __ csetw($res$$Register, Assembler::EQ);
9053   %}
9054   ins_pipe(pipe_slow);
9055 %}
9056 
9057 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9058   predicate(needs_acquiring_load_exclusive(n));
9059   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9060   ins_cost(VOLATILE_REF_COST);
9061   effect(KILL cr);
9062   format %{
9063     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9064     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9065   %}
9066   ins_encode %{
9067     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9068                Assembler::xword, /*acquire*/ true, /*release*/ true,
9069                /*weak*/ true, noreg);
9070     __ csetw($res$$Register, Assembler::EQ);
9071   %}
9072   ins_pipe(pipe_slow);
9073 %}
9074 
9075 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9076   predicate(needs_acquiring_load_exclusive(n));
9077   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9078   ins_cost(VOLATILE_REF_COST);
9079   effect(KILL cr);
9080   format %{
9081     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9082     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9083   %}
9084   ins_encode %{
9085     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9086                Assembler::word, /*acquire*/ true, /*release*/ true,
9087                /*weak*/ true, noreg);
9088     __ csetw($res$$Register, Assembler::EQ);
9089   %}
9090   ins_pipe(pipe_slow);
9091 %}
9092 
9093 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9094   predicate(needs_acquiring_load_exclusive(n));
9095   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9096   ins_cost(VOLATILE_REF_COST);
9097   effect(KILL cr);
9098   format %{
9099     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9100     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9101   %}
9102   ins_encode %{
9103     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9104                Assembler::xword, /*acquire*/ true, /*release*/ true,
9105                /*weak*/ true, noreg);
9106     __ csetw($res$$Register, Assembler::EQ);
9107   %}
9108   ins_pipe(pipe_slow);
9109 %}
9110 
9111 // END This section of the file is automatically generated. Do not edit --------------
9112 // ---------------------------------------------------------------------
9113 
9114 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9115   match(Set prev (GetAndSetI mem newv));
9116   ins_cost(2 * VOLATILE_REF_COST);
9117   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9118   ins_encode %{
9119     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9120   %}
9121   ins_pipe(pipe_serial);
9122 %}
9123 
9124 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9125   match(Set prev (GetAndSetL mem newv));
9126   ins_cost(2 * VOLATILE_REF_COST);
9127   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9128   ins_encode %{
9129     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9130   %}
9131   ins_pipe(pipe_serial);
9132 %}
9133 
9134 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9135   match(Set prev (GetAndSetN mem newv));
9136   ins_cost(2 * VOLATILE_REF_COST);
9137   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9138   ins_encode %{
9139     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9140   %}
9141   ins_pipe(pipe_serial);
9142 %}
9143 
9144 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9145   match(Set prev (GetAndSetP mem newv));
9146   ins_cost(2 * VOLATILE_REF_COST);
9147   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9148   ins_encode %{
9149     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9150   %}
9151   ins_pipe(pipe_serial);
9152 %}
9153 
9154 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9155   predicate(needs_acquiring_load_exclusive(n));
9156   match(Set prev (GetAndSetI mem newv));
9157   ins_cost(VOLATILE_REF_COST);
9158   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9159   ins_encode %{
9160     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9161   %}
9162   ins_pipe(pipe_serial);
9163 %}
9164 
9165 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9166   predicate(needs_acquiring_load_exclusive(n));
9167   match(Set prev (GetAndSetL mem newv));
9168   ins_cost(VOLATILE_REF_COST);
9169   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9170   ins_encode %{
9171     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9172   %}
9173   ins_pipe(pipe_serial);
9174 %}
9175 
9176 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9177   predicate(needs_acquiring_load_exclusive(n));
9178   match(Set prev (GetAndSetN mem newv));
9179   ins_cost(VOLATILE_REF_COST);
9180   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9181   ins_encode %{
9182     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9183   %}
9184   ins_pipe(pipe_serial);
9185 %}
9186 
9187 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9188   predicate(needs_acquiring_load_exclusive(n));
9189   match(Set prev (GetAndSetP mem newv));
9190   ins_cost(VOLATILE_REF_COST);
9191   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9192   ins_encode %{
9193     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9194   %}
9195   ins_pipe(pipe_serial);
9196 %}
9197 
9198 
9199 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9200   match(Set newval (GetAndAddL mem incr));
9201   ins_cost(2 * VOLATILE_REF_COST + 1);
9202   format %{ "get_and_addL $newval, [$mem], $incr" %}
9203   ins_encode %{
9204     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9205   %}
9206   ins_pipe(pipe_serial);
9207 %}
9208 
9209 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9210   predicate(n->as_LoadStore()->result_not_used());
9211   match(Set dummy (GetAndAddL mem incr));
9212   ins_cost(2 * VOLATILE_REF_COST);
9213   format %{ "get_and_addL [$mem], $incr" %}
9214   ins_encode %{
9215     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9216   %}
9217   ins_pipe(pipe_serial);
9218 %}
9219 
9220 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9221   match(Set newval (GetAndAddL mem incr));
9222   ins_cost(2 * VOLATILE_REF_COST + 1);
9223   format %{ "get_and_addL $newval, [$mem], $incr" %}
9224   ins_encode %{
9225     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9226   %}
9227   ins_pipe(pipe_serial);
9228 %}
9229 
9230 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9231   predicate(n->as_LoadStore()->result_not_used());
9232   match(Set dummy (GetAndAddL mem incr));
9233   ins_cost(2 * VOLATILE_REF_COST);
9234   format %{ "get_and_addL [$mem], $incr" %}
9235   ins_encode %{
9236     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9237   %}
9238   ins_pipe(pipe_serial);
9239 %}
9240 
9241 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9242   match(Set newval (GetAndAddI mem incr));
9243   ins_cost(2 * VOLATILE_REF_COST + 1);
9244   format %{ "get_and_addI $newval, [$mem], $incr" %}
9245   ins_encode %{
9246     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9247   %}
9248   ins_pipe(pipe_serial);
9249 %}
9250 
9251 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9252   predicate(n->as_LoadStore()->result_not_used());
9253   match(Set dummy (GetAndAddI mem incr));
9254   ins_cost(2 * VOLATILE_REF_COST);
9255   format %{ "get_and_addI [$mem], $incr" %}
9256   ins_encode %{
9257     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9258   %}
9259   ins_pipe(pipe_serial);
9260 %}
9261 
9262 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9263   match(Set newval (GetAndAddI mem incr));
9264   ins_cost(2 * VOLATILE_REF_COST + 1);
9265   format %{ "get_and_addI $newval, [$mem], $incr" %}
9266   ins_encode %{
9267     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9268   %}
9269   ins_pipe(pipe_serial);
9270 %}
9271 
9272 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9273   predicate(n->as_LoadStore()->result_not_used());
9274   match(Set dummy (GetAndAddI mem incr));
9275   ins_cost(2 * VOLATILE_REF_COST);
9276   format %{ "get_and_addI [$mem], $incr" %}
9277   ins_encode %{
9278     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9279   %}
9280   ins_pipe(pipe_serial);
9281 %}
9282 
9283 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9284   predicate(needs_acquiring_load_exclusive(n));
9285   match(Set newval (GetAndAddL mem incr));
9286   ins_cost(VOLATILE_REF_COST + 1);
9287   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9288   ins_encode %{
9289     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9290   %}
9291   ins_pipe(pipe_serial);
9292 %}
9293 
9294 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9295   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9296   match(Set dummy (GetAndAddL mem incr));
9297   ins_cost(VOLATILE_REF_COST);
9298   format %{ "get_and_addL_acq [$mem], $incr" %}
9299   ins_encode %{
9300     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9301   %}
9302   ins_pipe(pipe_serial);
9303 %}
9304 
9305 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9306   predicate(needs_acquiring_load_exclusive(n));
9307   match(Set newval (GetAndAddL mem incr));
9308   ins_cost(VOLATILE_REF_COST + 1);
9309   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9310   ins_encode %{
9311     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9312   %}
9313   ins_pipe(pipe_serial);
9314 %}
9315 
9316 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9317   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9318   match(Set dummy (GetAndAddL mem incr));
9319   ins_cost(VOLATILE_REF_COST);
9320   format %{ "get_and_addL_acq [$mem], $incr" %}
9321   ins_encode %{
9322     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9323   %}
9324   ins_pipe(pipe_serial);
9325 %}
9326 
9327 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9328   predicate(needs_acquiring_load_exclusive(n));
9329   match(Set newval (GetAndAddI mem incr));
9330   ins_cost(VOLATILE_REF_COST + 1);
9331   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9332   ins_encode %{
9333     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9334   %}
9335   ins_pipe(pipe_serial);
9336 %}
9337 
9338 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9339   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9340   match(Set dummy (GetAndAddI mem incr));
9341   ins_cost(VOLATILE_REF_COST);
9342   format %{ "get_and_addI_acq [$mem], $incr" %}
9343   ins_encode %{
9344     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9345   %}
9346   ins_pipe(pipe_serial);
9347 %}
9348 
9349 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9350   predicate(needs_acquiring_load_exclusive(n));
9351   match(Set newval (GetAndAddI mem incr));
9352   ins_cost(VOLATILE_REF_COST + 1);
9353   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9354   ins_encode %{
9355     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9356   %}
9357   ins_pipe(pipe_serial);
9358 %}
9359 
9360 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9361   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9362   match(Set dummy (GetAndAddI mem incr));
9363   ins_cost(VOLATILE_REF_COST);
9364   format %{ "get_and_addI_acq [$mem], $incr" %}
9365   ins_encode %{
9366     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9367   %}
9368   ins_pipe(pipe_serial);
9369 %}
9370 
9371 // Manifest a CmpL result in an integer register.
9372 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9373 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9374 %{
9375   match(Set dst (CmpL3 src1 src2));
9376   effect(KILL flags);
9377 
9378   ins_cost(INSN_COST * 6);
9379   format %{
9380       "cmp $src1, $src2"
9381       "csetw $dst, ne"
9382       "cnegw $dst, lt"
9383   %}
9384   // format %{ "CmpL3 $dst, $src1, $src2" %}
9385   ins_encode %{
9386     __ cmp($src1$$Register, $src2$$Register);
9387     __ csetw($dst$$Register, Assembler::NE);
9388     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9389   %}
9390 
9391   ins_pipe(pipe_class_default);
9392 %}
9393 
9394 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9395 %{
9396   match(Set dst (CmpL3 src1 src2));
9397   effect(KILL flags);
9398 
9399   ins_cost(INSN_COST * 6);
9400   format %{
9401       "cmp $src1, $src2"
9402       "csetw $dst, ne"
9403       "cnegw $dst, lt"
9404   %}
9405   ins_encode %{
9406     int32_t con = (int32_t)$src2$$constant;
9407      if (con < 0) {
9408       __ adds(zr, $src1$$Register, -con);
9409     } else {
9410       __ subs(zr, $src1$$Register, con);
9411     }
9412     __ csetw($dst$$Register, Assembler::NE);
9413     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9414   %}
9415 
9416   ins_pipe(pipe_class_default);
9417 %}
9418 
9419 // ============================================================================
9420 // Conditional Move Instructions
9421 
9422 // n.b. we have identical rules for both a signed compare op (cmpOp)
9423 // and an unsigned compare op (cmpOpU). it would be nice if we could
9424 // define an op class which merged both inputs and use it to type the
9425 // argument to a single rule. unfortunatelyt his fails because the
9426 // opclass does not live up to the COND_INTER interface of its
9427 // component operands. When the generic code tries to negate the
9428 // operand it ends up running the generci Machoper::negate method
9429 // which throws a ShouldNotHappen. So, we have to provide two flavours
9430 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9431 
9432 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9433   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9434 
9435   ins_cost(INSN_COST * 2);
9436   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9437 
9438   ins_encode %{
9439     __ cselw(as_Register($dst$$reg),
9440              as_Register($src2$$reg),
9441              as_Register($src1$$reg),
9442              (Assembler::Condition)$cmp$$cmpcode);
9443   %}
9444 
9445   ins_pipe(icond_reg_reg);
9446 %}
9447 
9448 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9449   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9450 
9451   ins_cost(INSN_COST * 2);
9452   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9453 
9454   ins_encode %{
9455     __ cselw(as_Register($dst$$reg),
9456              as_Register($src2$$reg),
9457              as_Register($src1$$reg),
9458              (Assembler::Condition)$cmp$$cmpcode);
9459   %}
9460 
9461   ins_pipe(icond_reg_reg);
9462 %}
9463 
9464 // special cases where one arg is zero
9465 
9466 // n.b. this is selected in preference to the rule above because it
9467 // avoids loading constant 0 into a source register
9468 
9469 // TODO
9470 // we ought only to be able to cull one of these variants as the ideal
9471 // transforms ought always to order the zero consistently (to left/right?)
9472 
9473 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9474   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9475 
9476   ins_cost(INSN_COST * 2);
9477   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9478 
9479   ins_encode %{
9480     __ cselw(as_Register($dst$$reg),
9481              as_Register($src$$reg),
9482              zr,
9483              (Assembler::Condition)$cmp$$cmpcode);
9484   %}
9485 
9486   ins_pipe(icond_reg);
9487 %}
9488 
9489 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9490   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9491 
9492   ins_cost(INSN_COST * 2);
9493   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9494 
9495   ins_encode %{
9496     __ cselw(as_Register($dst$$reg),
9497              as_Register($src$$reg),
9498              zr,
9499              (Assembler::Condition)$cmp$$cmpcode);
9500   %}
9501 
9502   ins_pipe(icond_reg);
9503 %}
9504 
9505 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9506   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9507 
9508   ins_cost(INSN_COST * 2);
9509   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9510 
9511   ins_encode %{
9512     __ cselw(as_Register($dst$$reg),
9513              zr,
9514              as_Register($src$$reg),
9515              (Assembler::Condition)$cmp$$cmpcode);
9516   %}
9517 
9518   ins_pipe(icond_reg);
9519 %}
9520 
9521 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9522   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9523 
9524   ins_cost(INSN_COST * 2);
9525   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9526 
9527   ins_encode %{
9528     __ cselw(as_Register($dst$$reg),
9529              zr,
9530              as_Register($src$$reg),
9531              (Assembler::Condition)$cmp$$cmpcode);
9532   %}
9533 
9534   ins_pipe(icond_reg);
9535 %}
9536 
9537 // special case for creating a boolean 0 or 1
9538 
9539 // n.b. this is selected in preference to the rule above because it
9540 // avoids loading constants 0 and 1 into a source register
9541 
9542 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9543   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9544 
9545   ins_cost(INSN_COST * 2);
9546   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9547 
9548   ins_encode %{
9549     // equivalently
9550     // cset(as_Register($dst$$reg),
9551     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9552     __ csincw(as_Register($dst$$reg),
9553              zr,
9554              zr,
9555              (Assembler::Condition)$cmp$$cmpcode);
9556   %}
9557 
9558   ins_pipe(icond_none);
9559 %}
9560 
9561 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9562   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9563 
9564   ins_cost(INSN_COST * 2);
9565   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9566 
9567   ins_encode %{
9568     // equivalently
9569     // cset(as_Register($dst$$reg),
9570     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9571     __ csincw(as_Register($dst$$reg),
9572              zr,
9573              zr,
9574              (Assembler::Condition)$cmp$$cmpcode);
9575   %}
9576 
9577   ins_pipe(icond_none);
9578 %}
9579 
9580 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9581   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9582 
9583   ins_cost(INSN_COST * 2);
9584   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9585 
9586   ins_encode %{
9587     __ csel(as_Register($dst$$reg),
9588             as_Register($src2$$reg),
9589             as_Register($src1$$reg),
9590             (Assembler::Condition)$cmp$$cmpcode);
9591   %}
9592 
9593   ins_pipe(icond_reg_reg);
9594 %}
9595 
9596 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9597   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9598 
9599   ins_cost(INSN_COST * 2);
9600   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9601 
9602   ins_encode %{
9603     __ csel(as_Register($dst$$reg),
9604             as_Register($src2$$reg),
9605             as_Register($src1$$reg),
9606             (Assembler::Condition)$cmp$$cmpcode);
9607   %}
9608 
9609   ins_pipe(icond_reg_reg);
9610 %}
9611 
9612 // special cases where one arg is zero
9613 
9614 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9615   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9616 
9617   ins_cost(INSN_COST * 2);
9618   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9619 
9620   ins_encode %{
9621     __ csel(as_Register($dst$$reg),
9622             zr,
9623             as_Register($src$$reg),
9624             (Assembler::Condition)$cmp$$cmpcode);
9625   %}
9626 
9627   ins_pipe(icond_reg);
9628 %}
9629 
9630 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9631   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9632 
9633   ins_cost(INSN_COST * 2);
9634   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9635 
9636   ins_encode %{
9637     __ csel(as_Register($dst$$reg),
9638             zr,
9639             as_Register($src$$reg),
9640             (Assembler::Condition)$cmp$$cmpcode);
9641   %}
9642 
9643   ins_pipe(icond_reg);
9644 %}
9645 
9646 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9647   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9648 
9649   ins_cost(INSN_COST * 2);
9650   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9651 
9652   ins_encode %{
9653     __ csel(as_Register($dst$$reg),
9654             as_Register($src$$reg),
9655             zr,
9656             (Assembler::Condition)$cmp$$cmpcode);
9657   %}
9658 
9659   ins_pipe(icond_reg);
9660 %}
9661 
9662 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9663   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9664 
9665   ins_cost(INSN_COST * 2);
9666   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9667 
9668   ins_encode %{
9669     __ csel(as_Register($dst$$reg),
9670             as_Register($src$$reg),
9671             zr,
9672             (Assembler::Condition)$cmp$$cmpcode);
9673   %}
9674 
9675   ins_pipe(icond_reg);
9676 %}
9677 
9678 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9679   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9680 
9681   ins_cost(INSN_COST * 2);
9682   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9683 
9684   ins_encode %{
9685     __ csel(as_Register($dst$$reg),
9686             as_Register($src2$$reg),
9687             as_Register($src1$$reg),
9688             (Assembler::Condition)$cmp$$cmpcode);
9689   %}
9690 
9691   ins_pipe(icond_reg_reg);
9692 %}
9693 
9694 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9695   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9696 
9697   ins_cost(INSN_COST * 2);
9698   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9699 
9700   ins_encode %{
9701     __ csel(as_Register($dst$$reg),
9702             as_Register($src2$$reg),
9703             as_Register($src1$$reg),
9704             (Assembler::Condition)$cmp$$cmpcode);
9705   %}
9706 
9707   ins_pipe(icond_reg_reg);
9708 %}
9709 
9710 // special cases where one arg is zero
9711 
9712 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9713   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9714 
9715   ins_cost(INSN_COST * 2);
9716   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9717 
9718   ins_encode %{
9719     __ csel(as_Register($dst$$reg),
9720             zr,
9721             as_Register($src$$reg),
9722             (Assembler::Condition)$cmp$$cmpcode);
9723   %}
9724 
9725   ins_pipe(icond_reg);
9726 %}
9727 
9728 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9729   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9730 
9731   ins_cost(INSN_COST * 2);
9732   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9733 
9734   ins_encode %{
9735     __ csel(as_Register($dst$$reg),
9736             zr,
9737             as_Register($src$$reg),
9738             (Assembler::Condition)$cmp$$cmpcode);
9739   %}
9740 
9741   ins_pipe(icond_reg);
9742 %}
9743 
9744 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9745   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9746 
9747   ins_cost(INSN_COST * 2);
9748   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9749 
9750   ins_encode %{
9751     __ csel(as_Register($dst$$reg),
9752             as_Register($src$$reg),
9753             zr,
9754             (Assembler::Condition)$cmp$$cmpcode);
9755   %}
9756 
9757   ins_pipe(icond_reg);
9758 %}
9759 
9760 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9761   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9762 
9763   ins_cost(INSN_COST * 2);
9764   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9765 
9766   ins_encode %{
9767     __ csel(as_Register($dst$$reg),
9768             as_Register($src$$reg),
9769             zr,
9770             (Assembler::Condition)$cmp$$cmpcode);
9771   %}
9772 
9773   ins_pipe(icond_reg);
9774 %}
9775 
9776 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9777   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9778 
9779   ins_cost(INSN_COST * 2);
9780   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9781 
9782   ins_encode %{
9783     __ cselw(as_Register($dst$$reg),
9784              as_Register($src2$$reg),
9785              as_Register($src1$$reg),
9786              (Assembler::Condition)$cmp$$cmpcode);
9787   %}
9788 
9789   ins_pipe(icond_reg_reg);
9790 %}
9791 
9792 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9793   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9794 
9795   ins_cost(INSN_COST * 2);
9796   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9797 
9798   ins_encode %{
9799     __ cselw(as_Register($dst$$reg),
9800              as_Register($src2$$reg),
9801              as_Register($src1$$reg),
9802              (Assembler::Condition)$cmp$$cmpcode);
9803   %}
9804 
9805   ins_pipe(icond_reg_reg);
9806 %}
9807 
9808 // special cases where one arg is zero
9809 
9810 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9811   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9812 
9813   ins_cost(INSN_COST * 2);
9814   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9815 
9816   ins_encode %{
9817     __ cselw(as_Register($dst$$reg),
9818              zr,
9819              as_Register($src$$reg),
9820              (Assembler::Condition)$cmp$$cmpcode);
9821   %}
9822 
9823   ins_pipe(icond_reg);
9824 %}
9825 
9826 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9827   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9828 
9829   ins_cost(INSN_COST * 2);
9830   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9831 
9832   ins_encode %{
9833     __ cselw(as_Register($dst$$reg),
9834              zr,
9835              as_Register($src$$reg),
9836              (Assembler::Condition)$cmp$$cmpcode);
9837   %}
9838 
9839   ins_pipe(icond_reg);
9840 %}
9841 
9842 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9843   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9844 
9845   ins_cost(INSN_COST * 2);
9846   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9847 
9848   ins_encode %{
9849     __ cselw(as_Register($dst$$reg),
9850              as_Register($src$$reg),
9851              zr,
9852              (Assembler::Condition)$cmp$$cmpcode);
9853   %}
9854 
9855   ins_pipe(icond_reg);
9856 %}
9857 
9858 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9859   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9860 
9861   ins_cost(INSN_COST * 2);
9862   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9863 
9864   ins_encode %{
9865     __ cselw(as_Register($dst$$reg),
9866              as_Register($src$$reg),
9867              zr,
9868              (Assembler::Condition)$cmp$$cmpcode);
9869   %}
9870 
9871   ins_pipe(icond_reg);
9872 %}
9873 
9874 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9875 %{
9876   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9877 
9878   ins_cost(INSN_COST * 3);
9879 
9880   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9881   ins_encode %{
9882     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9883     __ fcsels(as_FloatRegister($dst$$reg),
9884               as_FloatRegister($src2$$reg),
9885               as_FloatRegister($src1$$reg),
9886               cond);
9887   %}
9888 
9889   ins_pipe(fp_cond_reg_reg_s);
9890 %}
9891 
9892 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9893 %{
9894   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9895 
9896   ins_cost(INSN_COST * 3);
9897 
9898   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9899   ins_encode %{
9900     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9901     __ fcsels(as_FloatRegister($dst$$reg),
9902               as_FloatRegister($src2$$reg),
9903               as_FloatRegister($src1$$reg),
9904               cond);
9905   %}
9906 
9907   ins_pipe(fp_cond_reg_reg_s);
9908 %}
9909 
9910 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9911 %{
9912   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9913 
9914   ins_cost(INSN_COST * 3);
9915 
9916   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9917   ins_encode %{
9918     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9919     __ fcseld(as_FloatRegister($dst$$reg),
9920               as_FloatRegister($src2$$reg),
9921               as_FloatRegister($src1$$reg),
9922               cond);
9923   %}
9924 
9925   ins_pipe(fp_cond_reg_reg_d);
9926 %}
9927 
9928 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9929 %{
9930   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9931 
9932   ins_cost(INSN_COST * 3);
9933 
9934   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9935   ins_encode %{
9936     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9937     __ fcseld(as_FloatRegister($dst$$reg),
9938               as_FloatRegister($src2$$reg),
9939               as_FloatRegister($src1$$reg),
9940               cond);
9941   %}
9942 
9943   ins_pipe(fp_cond_reg_reg_d);
9944 %}
9945 
9946 // ============================================================================
9947 // Arithmetic Instructions
9948 //
9949 
9950 // Integer Addition
9951 
9952 // TODO
9953 // these currently employ operations which do not set CR and hence are
9954 // not flagged as killing CR but we would like to isolate the cases
9955 // where we want to set flags from those where we don't. need to work
9956 // out how to do that.
9957 
9958 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9959   match(Set dst (AddI src1 src2));
9960 
9961   ins_cost(INSN_COST);
9962   format %{ "addw  $dst, $src1, $src2" %}
9963 
9964   ins_encode %{
9965     __ addw(as_Register($dst$$reg),
9966             as_Register($src1$$reg),
9967             as_Register($src2$$reg));
9968   %}
9969 
9970   ins_pipe(ialu_reg_reg);
9971 %}
9972 
9973 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9974   match(Set dst (AddI src1 src2));
9975 
9976   ins_cost(INSN_COST);
9977   format %{ "addw $dst, $src1, $src2" %}
9978 
9979   // use opcode to indicate that this is an add not a sub
9980   opcode(0x0);
9981 
9982   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9983 
9984   ins_pipe(ialu_reg_imm);
9985 %}
9986 
9987 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9988   match(Set dst (AddI (ConvL2I src1) src2));
9989 
9990   ins_cost(INSN_COST);
9991   format %{ "addw $dst, $src1, $src2" %}
9992 
9993   // use opcode to indicate that this is an add not a sub
9994   opcode(0x0);
9995 
9996   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9997 
9998   ins_pipe(ialu_reg_imm);
9999 %}
10000 
10001 // Pointer Addition
10002 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10003   match(Set dst (AddP src1 src2));
10004 
10005   ins_cost(INSN_COST);
10006   format %{ "add $dst, $src1, $src2\t# ptr" %}
10007 
10008   ins_encode %{
10009     __ add(as_Register($dst$$reg),
10010            as_Register($src1$$reg),
10011            as_Register($src2$$reg));
10012   %}
10013 
10014   ins_pipe(ialu_reg_reg);
10015 %}
10016 
10017 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10018   match(Set dst (AddP src1 (ConvI2L src2)));
10019 
10020   ins_cost(1.9 * INSN_COST);
10021   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10022 
10023   ins_encode %{
10024     __ add(as_Register($dst$$reg),
10025            as_Register($src1$$reg),
10026            as_Register($src2$$reg), ext::sxtw);
10027   %}
10028 
10029   ins_pipe(ialu_reg_reg);
10030 %}
10031 
10032 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10033   match(Set dst (AddP src1 (LShiftL src2 scale)));
10034 
10035   ins_cost(1.9 * INSN_COST);
10036   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10037 
10038   ins_encode %{
10039     __ lea(as_Register($dst$$reg),
10040            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10041                    Address::lsl($scale$$constant)));
10042   %}
10043 
10044   ins_pipe(ialu_reg_reg_shift);
10045 %}
10046 
10047 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10048   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10049 
10050   ins_cost(1.9 * INSN_COST);
10051   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10052 
10053   ins_encode %{
10054     __ lea(as_Register($dst$$reg),
10055            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10056                    Address::sxtw($scale$$constant)));
10057   %}
10058 
10059   ins_pipe(ialu_reg_reg_shift);
10060 %}
10061 
10062 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10063   match(Set dst (LShiftL (ConvI2L src) scale));
10064 
10065   ins_cost(INSN_COST);
10066   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10067 
10068   ins_encode %{
10069     __ sbfiz(as_Register($dst$$reg),
10070           as_Register($src$$reg),
10071           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10072   %}
10073 
10074   ins_pipe(ialu_reg_shift);
10075 %}
10076 
10077 // Pointer Immediate Addition
10078 // n.b. this needs to be more expensive than using an indirect memory
10079 // operand
10080 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10081   match(Set dst (AddP src1 src2));
10082 
10083   ins_cost(INSN_COST);
10084   format %{ "add $dst, $src1, $src2\t# ptr" %}
10085 
10086   // use opcode to indicate that this is an add not a sub
10087   opcode(0x0);
10088 
10089   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10090 
10091   ins_pipe(ialu_reg_imm);
10092 %}
10093 
10094 // Long Addition
10095 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10096 
10097   match(Set dst (AddL src1 src2));
10098 
10099   ins_cost(INSN_COST);
10100   format %{ "add  $dst, $src1, $src2" %}
10101 
10102   ins_encode %{
10103     __ add(as_Register($dst$$reg),
10104            as_Register($src1$$reg),
10105            as_Register($src2$$reg));
10106   %}
10107 
10108   ins_pipe(ialu_reg_reg);
10109 %}
10110 
10111 // No constant pool entries requiredLong Immediate Addition.
10112 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10113   match(Set dst (AddL src1 src2));
10114 
10115   ins_cost(INSN_COST);
10116   format %{ "add $dst, $src1, $src2" %}
10117 
10118   // use opcode to indicate that this is an add not a sub
10119   opcode(0x0);
10120 
10121   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10122 
10123   ins_pipe(ialu_reg_imm);
10124 %}
10125 
10126 // Integer Subtraction
10127 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10128   match(Set dst (SubI src1 src2));
10129 
10130   ins_cost(INSN_COST);
10131   format %{ "subw  $dst, $src1, $src2" %}
10132 
10133   ins_encode %{
10134     __ subw(as_Register($dst$$reg),
10135             as_Register($src1$$reg),
10136             as_Register($src2$$reg));
10137   %}
10138 
10139   ins_pipe(ialu_reg_reg);
10140 %}
10141 
10142 // Immediate Subtraction
10143 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10144   match(Set dst (SubI src1 src2));
10145 
10146   ins_cost(INSN_COST);
10147   format %{ "subw $dst, $src1, $src2" %}
10148 
10149   // use opcode to indicate that this is a sub not an add
10150   opcode(0x1);
10151 
10152   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10153 
10154   ins_pipe(ialu_reg_imm);
10155 %}
10156 
10157 // Long Subtraction
10158 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10159 
10160   match(Set dst (SubL src1 src2));
10161 
10162   ins_cost(INSN_COST);
10163   format %{ "sub  $dst, $src1, $src2" %}
10164 
10165   ins_encode %{
10166     __ sub(as_Register($dst$$reg),
10167            as_Register($src1$$reg),
10168            as_Register($src2$$reg));
10169   %}
10170 
10171   ins_pipe(ialu_reg_reg);
10172 %}
10173 
10174 // No constant pool entries requiredLong Immediate Subtraction.
10175 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10176   match(Set dst (SubL src1 src2));
10177 
10178   ins_cost(INSN_COST);
10179   format %{ "sub$dst, $src1, $src2" %}
10180 
10181   // use opcode to indicate that this is a sub not an add
10182   opcode(0x1);
10183 
10184   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10185 
10186   ins_pipe(ialu_reg_imm);
10187 %}
10188 
10189 // Integer Negation (special case for sub)
10190 
10191 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10192   match(Set dst (SubI zero src));
10193 
10194   ins_cost(INSN_COST);
10195   format %{ "negw $dst, $src\t# int" %}
10196 
10197   ins_encode %{
10198     __ negw(as_Register($dst$$reg),
10199             as_Register($src$$reg));
10200   %}
10201 
10202   ins_pipe(ialu_reg);
10203 %}
10204 
10205 // Long Negation
10206 
10207 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10208   match(Set dst (SubL zero src));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "neg $dst, $src\t# long" %}
10212 
10213   ins_encode %{
10214     __ neg(as_Register($dst$$reg),
10215            as_Register($src$$reg));
10216   %}
10217 
10218   ins_pipe(ialu_reg);
10219 %}
10220 
10221 // Integer Multiply
10222 
10223 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10224   match(Set dst (MulI src1 src2));
10225 
10226   ins_cost(INSN_COST * 3);
10227   format %{ "mulw  $dst, $src1, $src2" %}
10228 
10229   ins_encode %{
10230     __ mulw(as_Register($dst$$reg),
10231             as_Register($src1$$reg),
10232             as_Register($src2$$reg));
10233   %}
10234 
10235   ins_pipe(imul_reg_reg);
10236 %}
10237 
10238 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10239   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10240 
10241   ins_cost(INSN_COST * 3);
10242   format %{ "smull  $dst, $src1, $src2" %}
10243 
10244   ins_encode %{
10245     __ smull(as_Register($dst$$reg),
10246              as_Register($src1$$reg),
10247              as_Register($src2$$reg));
10248   %}
10249 
10250   ins_pipe(imul_reg_reg);
10251 %}
10252 
10253 // Long Multiply
10254 
10255 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10256   match(Set dst (MulL src1 src2));
10257 
10258   ins_cost(INSN_COST * 5);
10259   format %{ "mul  $dst, $src1, $src2" %}
10260 
10261   ins_encode %{
10262     __ mul(as_Register($dst$$reg),
10263            as_Register($src1$$reg),
10264            as_Register($src2$$reg));
10265   %}
10266 
10267   ins_pipe(lmul_reg_reg);
10268 %}
10269 
10270 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10271 %{
10272   match(Set dst (MulHiL src1 src2));
10273 
10274   ins_cost(INSN_COST * 7);
10275   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10276 
10277   ins_encode %{
10278     __ smulh(as_Register($dst$$reg),
10279              as_Register($src1$$reg),
10280              as_Register($src2$$reg));
10281   %}
10282 
10283   ins_pipe(lmul_reg_reg);
10284 %}
10285 
10286 // Combined Integer Multiply & Add/Sub
10287 
10288 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10289   match(Set dst (AddI src3 (MulI src1 src2)));
10290 
10291   ins_cost(INSN_COST * 3);
10292   format %{ "madd  $dst, $src1, $src2, $src3" %}
10293 
10294   ins_encode %{
10295     __ maddw(as_Register($dst$$reg),
10296              as_Register($src1$$reg),
10297              as_Register($src2$$reg),
10298              as_Register($src3$$reg));
10299   %}
10300 
10301   ins_pipe(imac_reg_reg);
10302 %}
10303 
10304 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10305   match(Set dst (SubI src3 (MulI src1 src2)));
10306 
10307   ins_cost(INSN_COST * 3);
10308   format %{ "msub  $dst, $src1, $src2, $src3" %}
10309 
10310   ins_encode %{
10311     __ msubw(as_Register($dst$$reg),
10312              as_Register($src1$$reg),
10313              as_Register($src2$$reg),
10314              as_Register($src3$$reg));
10315   %}
10316 
10317   ins_pipe(imac_reg_reg);
10318 %}
10319 
10320 // Combined Integer Multiply & Neg
10321 
10322 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10323   match(Set dst (MulI (SubI zero src1) src2));
10324   match(Set dst (MulI src1 (SubI zero src2)));
10325 
10326   ins_cost(INSN_COST * 3);
10327   format %{ "mneg  $dst, $src1, $src2" %}
10328 
10329   ins_encode %{
10330     __ mnegw(as_Register($dst$$reg),
10331              as_Register($src1$$reg),
10332              as_Register($src2$$reg));
10333   %}
10334 
10335   ins_pipe(imac_reg_reg);
10336 %}
10337 
10338 // Combined Long Multiply & Add/Sub
10339 
10340 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10341   match(Set dst (AddL src3 (MulL src1 src2)));
10342 
10343   ins_cost(INSN_COST * 5);
10344   format %{ "madd  $dst, $src1, $src2, $src3" %}
10345 
10346   ins_encode %{
10347     __ madd(as_Register($dst$$reg),
10348             as_Register($src1$$reg),
10349             as_Register($src2$$reg),
10350             as_Register($src3$$reg));
10351   %}
10352 
10353   ins_pipe(lmac_reg_reg);
10354 %}
10355 
10356 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10357   match(Set dst (SubL src3 (MulL src1 src2)));
10358 
10359   ins_cost(INSN_COST * 5);
10360   format %{ "msub  $dst, $src1, $src2, $src3" %}
10361 
10362   ins_encode %{
10363     __ msub(as_Register($dst$$reg),
10364             as_Register($src1$$reg),
10365             as_Register($src2$$reg),
10366             as_Register($src3$$reg));
10367   %}
10368 
10369   ins_pipe(lmac_reg_reg);
10370 %}
10371 
10372 // Combined Long Multiply & Neg
10373 
10374 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10375   match(Set dst (MulL (SubL zero src1) src2));
10376   match(Set dst (MulL src1 (SubL zero src2)));
10377 
10378   ins_cost(INSN_COST * 5);
10379   format %{ "mneg  $dst, $src1, $src2" %}
10380 
10381   ins_encode %{
10382     __ mneg(as_Register($dst$$reg),
10383             as_Register($src1$$reg),
10384             as_Register($src2$$reg));
10385   %}
10386 
10387   ins_pipe(lmac_reg_reg);
10388 %}
10389 
10390 // Integer Divide
10391 
10392 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10393   match(Set dst (DivI src1 src2));
10394 
10395   ins_cost(INSN_COST * 19);
10396   format %{ "sdivw  $dst, $src1, $src2" %}
10397 
10398   ins_encode(aarch64_enc_divw(dst, src1, src2));
10399   ins_pipe(idiv_reg_reg);
10400 %}
10401 
10402 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10403   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10404   ins_cost(INSN_COST);
10405   format %{ "lsrw $dst, $src1, $div1" %}
10406   ins_encode %{
10407     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10408   %}
10409   ins_pipe(ialu_reg_shift);
10410 %}
10411 
10412 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10413   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10414   ins_cost(INSN_COST);
10415   format %{ "addw $dst, $src, LSR $div1" %}
10416 
10417   ins_encode %{
10418     __ addw(as_Register($dst$$reg),
10419               as_Register($src$$reg),
10420               as_Register($src$$reg),
10421               Assembler::LSR, 31);
10422   %}
10423   ins_pipe(ialu_reg);
10424 %}
10425 
10426 // Long Divide
10427 
10428 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10429   match(Set dst (DivL src1 src2));
10430 
10431   ins_cost(INSN_COST * 35);
10432   format %{ "sdiv   $dst, $src1, $src2" %}
10433 
10434   ins_encode(aarch64_enc_div(dst, src1, src2));
10435   ins_pipe(ldiv_reg_reg);
10436 %}
10437 
10438 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10439   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10440   ins_cost(INSN_COST);
10441   format %{ "lsr $dst, $src1, $div1" %}
10442   ins_encode %{
10443     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10444   %}
10445   ins_pipe(ialu_reg_shift);
10446 %}
10447 
10448 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10449   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10450   ins_cost(INSN_COST);
10451   format %{ "add $dst, $src, $div1" %}
10452 
10453   ins_encode %{
10454     __ add(as_Register($dst$$reg),
10455               as_Register($src$$reg),
10456               as_Register($src$$reg),
10457               Assembler::LSR, 63);
10458   %}
10459   ins_pipe(ialu_reg);
10460 %}
10461 
10462 // Integer Remainder
10463 
10464 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10465   match(Set dst (ModI src1 src2));
10466 
10467   ins_cost(INSN_COST * 22);
10468   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10469             "msubw($dst, rscratch1, $src2, $src1" %}
10470 
10471   ins_encode(aarch64_enc_modw(dst, src1, src2));
10472   ins_pipe(idiv_reg_reg);
10473 %}
10474 
10475 // Long Remainder
10476 
10477 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10478   match(Set dst (ModL src1 src2));
10479 
10480   ins_cost(INSN_COST * 38);
10481   format %{ "sdiv   rscratch1, $src1, $src2\n"
10482             "msub($dst, rscratch1, $src2, $src1" %}
10483 
10484   ins_encode(aarch64_enc_mod(dst, src1, src2));
10485   ins_pipe(ldiv_reg_reg);
10486 %}
10487 
10488 // Integer Shifts
10489 
10490 // Shift Left Register
10491 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10492   match(Set dst (LShiftI src1 src2));
10493 
10494   ins_cost(INSN_COST * 2);
10495   format %{ "lslvw  $dst, $src1, $src2" %}
10496 
10497   ins_encode %{
10498     __ lslvw(as_Register($dst$$reg),
10499              as_Register($src1$$reg),
10500              as_Register($src2$$reg));
10501   %}
10502 
10503   ins_pipe(ialu_reg_reg_vshift);
10504 %}
10505 
10506 // Shift Left Immediate
10507 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10508   match(Set dst (LShiftI src1 src2));
10509 
10510   ins_cost(INSN_COST);
10511   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10512 
10513   ins_encode %{
10514     __ lslw(as_Register($dst$$reg),
10515             as_Register($src1$$reg),
10516             $src2$$constant & 0x1f);
10517   %}
10518 
10519   ins_pipe(ialu_reg_shift);
10520 %}
10521 
10522 // Shift Right Logical Register
10523 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10524   match(Set dst (URShiftI src1 src2));
10525 
10526   ins_cost(INSN_COST * 2);
10527   format %{ "lsrvw  $dst, $src1, $src2" %}
10528 
10529   ins_encode %{
10530     __ lsrvw(as_Register($dst$$reg),
10531              as_Register($src1$$reg),
10532              as_Register($src2$$reg));
10533   %}
10534 
10535   ins_pipe(ialu_reg_reg_vshift);
10536 %}
10537 
10538 // Shift Right Logical Immediate
10539 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10540   match(Set dst (URShiftI src1 src2));
10541 
10542   ins_cost(INSN_COST);
10543   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10544 
10545   ins_encode %{
10546     __ lsrw(as_Register($dst$$reg),
10547             as_Register($src1$$reg),
10548             $src2$$constant & 0x1f);
10549   %}
10550 
10551   ins_pipe(ialu_reg_shift);
10552 %}
10553 
10554 // Shift Right Arithmetic Register
10555 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10556   match(Set dst (RShiftI src1 src2));
10557 
10558   ins_cost(INSN_COST * 2);
10559   format %{ "asrvw  $dst, $src1, $src2" %}
10560 
10561   ins_encode %{
10562     __ asrvw(as_Register($dst$$reg),
10563              as_Register($src1$$reg),
10564              as_Register($src2$$reg));
10565   %}
10566 
10567   ins_pipe(ialu_reg_reg_vshift);
10568 %}
10569 
10570 // Shift Right Arithmetic Immediate
10571 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10572   match(Set dst (RShiftI src1 src2));
10573 
10574   ins_cost(INSN_COST);
10575   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10576 
10577   ins_encode %{
10578     __ asrw(as_Register($dst$$reg),
10579             as_Register($src1$$reg),
10580             $src2$$constant & 0x1f);
10581   %}
10582 
10583   ins_pipe(ialu_reg_shift);
10584 %}
10585 
10586 // Combined Int Mask and Right Shift (using UBFM)
10587 // TODO
10588 
10589 // Long Shifts
10590 
10591 // Shift Left Register
10592 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10593   match(Set dst (LShiftL src1 src2));
10594 
10595   ins_cost(INSN_COST * 2);
10596   format %{ "lslv  $dst, $src1, $src2" %}
10597 
10598   ins_encode %{
10599     __ lslv(as_Register($dst$$reg),
10600             as_Register($src1$$reg),
10601             as_Register($src2$$reg));
10602   %}
10603 
10604   ins_pipe(ialu_reg_reg_vshift);
10605 %}
10606 
10607 // Shift Left Immediate
10608 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10609   match(Set dst (LShiftL src1 src2));
10610 
10611   ins_cost(INSN_COST);
10612   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10613 
10614   ins_encode %{
10615     __ lsl(as_Register($dst$$reg),
10616             as_Register($src1$$reg),
10617             $src2$$constant & 0x3f);
10618   %}
10619 
10620   ins_pipe(ialu_reg_shift);
10621 %}
10622 
10623 // Shift Right Logical Register
10624 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10625   match(Set dst (URShiftL src1 src2));
10626 
10627   ins_cost(INSN_COST * 2);
10628   format %{ "lsrv  $dst, $src1, $src2" %}
10629 
10630   ins_encode %{
10631     __ lsrv(as_Register($dst$$reg),
10632             as_Register($src1$$reg),
10633             as_Register($src2$$reg));
10634   %}
10635 
10636   ins_pipe(ialu_reg_reg_vshift);
10637 %}
10638 
10639 // Shift Right Logical Immediate
10640 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10641   match(Set dst (URShiftL src1 src2));
10642 
10643   ins_cost(INSN_COST);
10644   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10645 
10646   ins_encode %{
10647     __ lsr(as_Register($dst$$reg),
10648            as_Register($src1$$reg),
10649            $src2$$constant & 0x3f);
10650   %}
10651 
10652   ins_pipe(ialu_reg_shift);
10653 %}
10654 
10655 // A special-case pattern for card table stores.
10656 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10657   match(Set dst (URShiftL (CastP2X src1) src2));
10658 
10659   ins_cost(INSN_COST);
10660   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10661 
10662   ins_encode %{
10663     __ lsr(as_Register($dst$$reg),
10664            as_Register($src1$$reg),
10665            $src2$$constant & 0x3f);
10666   %}
10667 
10668   ins_pipe(ialu_reg_shift);
10669 %}
10670 
10671 // Shift Right Arithmetic Register
10672 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10673   match(Set dst (RShiftL src1 src2));
10674 
10675   ins_cost(INSN_COST * 2);
10676   format %{ "asrv  $dst, $src1, $src2" %}
10677 
10678   ins_encode %{
10679     __ asrv(as_Register($dst$$reg),
10680             as_Register($src1$$reg),
10681             as_Register($src2$$reg));
10682   %}
10683 
10684   ins_pipe(ialu_reg_reg_vshift);
10685 %}
10686 
10687 // Shift Right Arithmetic Immediate
10688 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10689   match(Set dst (RShiftL src1 src2));
10690 
10691   ins_cost(INSN_COST);
10692   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10693 
10694   ins_encode %{
10695     __ asr(as_Register($dst$$reg),
10696            as_Register($src1$$reg),
10697            $src2$$constant & 0x3f);
10698   %}
10699 
10700   ins_pipe(ialu_reg_shift);
10701 %}
10702 
10703 // BEGIN This section of the file is automatically generated. Do not edit --------------
10704 
10705 instruct regL_not_reg(iRegLNoSp dst,
10706                          iRegL src1, immL_M1 m1,
10707                          rFlagsReg cr) %{
10708   match(Set dst (XorL src1 m1));
10709   ins_cost(INSN_COST);
10710   format %{ "eon  $dst, $src1, zr" %}
10711 
10712   ins_encode %{
10713     __ eon(as_Register($dst$$reg),
10714               as_Register($src1$$reg),
10715               zr,
10716               Assembler::LSL, 0);
10717   %}
10718 
10719   ins_pipe(ialu_reg);
10720 %}
10721 instruct regI_not_reg(iRegINoSp dst,
10722                          iRegIorL2I src1, immI_M1 m1,
10723                          rFlagsReg cr) %{
10724   match(Set dst (XorI src1 m1));
10725   ins_cost(INSN_COST);
10726   format %{ "eonw  $dst, $src1, zr" %}
10727 
10728   ins_encode %{
10729     __ eonw(as_Register($dst$$reg),
10730               as_Register($src1$$reg),
10731               zr,
10732               Assembler::LSL, 0);
10733   %}
10734 
10735   ins_pipe(ialu_reg);
10736 %}
10737 
10738 instruct AndI_reg_not_reg(iRegINoSp dst,
10739                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10740                          rFlagsReg cr) %{
10741   match(Set dst (AndI src1 (XorI src2 m1)));
10742   ins_cost(INSN_COST);
10743   format %{ "bicw  $dst, $src1, $src2" %}
10744 
10745   ins_encode %{
10746     __ bicw(as_Register($dst$$reg),
10747               as_Register($src1$$reg),
10748               as_Register($src2$$reg),
10749               Assembler::LSL, 0);
10750   %}
10751 
10752   ins_pipe(ialu_reg_reg);
10753 %}
10754 
10755 instruct AndL_reg_not_reg(iRegLNoSp dst,
10756                          iRegL src1, iRegL src2, immL_M1 m1,
10757                          rFlagsReg cr) %{
10758   match(Set dst (AndL src1 (XorL src2 m1)));
10759   ins_cost(INSN_COST);
10760   format %{ "bic  $dst, $src1, $src2" %}
10761 
10762   ins_encode %{
10763     __ bic(as_Register($dst$$reg),
10764               as_Register($src1$$reg),
10765               as_Register($src2$$reg),
10766               Assembler::LSL, 0);
10767   %}
10768 
10769   ins_pipe(ialu_reg_reg);
10770 %}
10771 
10772 instruct OrI_reg_not_reg(iRegINoSp dst,
10773                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10774                          rFlagsReg cr) %{
10775   match(Set dst (OrI src1 (XorI src2 m1)));
10776   ins_cost(INSN_COST);
10777   format %{ "ornw  $dst, $src1, $src2" %}
10778 
10779   ins_encode %{
10780     __ ornw(as_Register($dst$$reg),
10781               as_Register($src1$$reg),
10782               as_Register($src2$$reg),
10783               Assembler::LSL, 0);
10784   %}
10785 
10786   ins_pipe(ialu_reg_reg);
10787 %}
10788 
10789 instruct OrL_reg_not_reg(iRegLNoSp dst,
10790                          iRegL src1, iRegL src2, immL_M1 m1,
10791                          rFlagsReg cr) %{
10792   match(Set dst (OrL src1 (XorL src2 m1)));
10793   ins_cost(INSN_COST);
10794   format %{ "orn  $dst, $src1, $src2" %}
10795 
10796   ins_encode %{
10797     __ orn(as_Register($dst$$reg),
10798               as_Register($src1$$reg),
10799               as_Register($src2$$reg),
10800               Assembler::LSL, 0);
10801   %}
10802 
10803   ins_pipe(ialu_reg_reg);
10804 %}
10805 
10806 instruct XorI_reg_not_reg(iRegINoSp dst,
10807                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10808                          rFlagsReg cr) %{
10809   match(Set dst (XorI m1 (XorI src2 src1)));
10810   ins_cost(INSN_COST);
10811   format %{ "eonw  $dst, $src1, $src2" %}
10812 
10813   ins_encode %{
10814     __ eonw(as_Register($dst$$reg),
10815               as_Register($src1$$reg),
10816               as_Register($src2$$reg),
10817               Assembler::LSL, 0);
10818   %}
10819 
10820   ins_pipe(ialu_reg_reg);
10821 %}
10822 
10823 instruct XorL_reg_not_reg(iRegLNoSp dst,
10824                          iRegL src1, iRegL src2, immL_M1 m1,
10825                          rFlagsReg cr) %{
10826   match(Set dst (XorL m1 (XorL src2 src1)));
10827   ins_cost(INSN_COST);
10828   format %{ "eon  $dst, $src1, $src2" %}
10829 
10830   ins_encode %{
10831     __ eon(as_Register($dst$$reg),
10832               as_Register($src1$$reg),
10833               as_Register($src2$$reg),
10834               Assembler::LSL, 0);
10835   %}
10836 
10837   ins_pipe(ialu_reg_reg);
10838 %}
10839 
10840 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10841                          iRegIorL2I src1, iRegIorL2I src2,
10842                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10843   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10844   ins_cost(1.9 * INSN_COST);
10845   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10846 
10847   ins_encode %{
10848     __ bicw(as_Register($dst$$reg),
10849               as_Register($src1$$reg),
10850               as_Register($src2$$reg),
10851               Assembler::LSR,
10852               $src3$$constant & 0x1f);
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg_shift);
10856 %}
10857 
10858 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10859                          iRegL src1, iRegL src2,
10860                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10861   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10862   ins_cost(1.9 * INSN_COST);
10863   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10864 
10865   ins_encode %{
10866     __ bic(as_Register($dst$$reg),
10867               as_Register($src1$$reg),
10868               as_Register($src2$$reg),
10869               Assembler::LSR,
10870               $src3$$constant & 0x3f);
10871   %}
10872 
10873   ins_pipe(ialu_reg_reg_shift);
10874 %}
10875 
10876 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10877                          iRegIorL2I src1, iRegIorL2I src2,
10878                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10879   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10880   ins_cost(1.9 * INSN_COST);
10881   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10882 
10883   ins_encode %{
10884     __ bicw(as_Register($dst$$reg),
10885               as_Register($src1$$reg),
10886               as_Register($src2$$reg),
10887               Assembler::ASR,
10888               $src3$$constant & 0x1f);
10889   %}
10890 
10891   ins_pipe(ialu_reg_reg_shift);
10892 %}
10893 
10894 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10895                          iRegL src1, iRegL src2,
10896                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10897   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10898   ins_cost(1.9 * INSN_COST);
10899   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10900 
10901   ins_encode %{
10902     __ bic(as_Register($dst$$reg),
10903               as_Register($src1$$reg),
10904               as_Register($src2$$reg),
10905               Assembler::ASR,
10906               $src3$$constant & 0x3f);
10907   %}
10908 
10909   ins_pipe(ialu_reg_reg_shift);
10910 %}
10911 
10912 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10913                          iRegIorL2I src1, iRegIorL2I src2,
10914                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10915   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10916   ins_cost(1.9 * INSN_COST);
10917   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10918 
10919   ins_encode %{
10920     __ bicw(as_Register($dst$$reg),
10921               as_Register($src1$$reg),
10922               as_Register($src2$$reg),
10923               Assembler::LSL,
10924               $src3$$constant & 0x1f);
10925   %}
10926 
10927   ins_pipe(ialu_reg_reg_shift);
10928 %}
10929 
10930 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10931                          iRegL src1, iRegL src2,
10932                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10933   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10934   ins_cost(1.9 * INSN_COST);
10935   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10936 
10937   ins_encode %{
10938     __ bic(as_Register($dst$$reg),
10939               as_Register($src1$$reg),
10940               as_Register($src2$$reg),
10941               Assembler::LSL,
10942               $src3$$constant & 0x3f);
10943   %}
10944 
10945   ins_pipe(ialu_reg_reg_shift);
10946 %}
10947 
10948 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10949                          iRegIorL2I src1, iRegIorL2I src2,
10950                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10951   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10952   ins_cost(1.9 * INSN_COST);
10953   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10954 
10955   ins_encode %{
10956     __ eonw(as_Register($dst$$reg),
10957               as_Register($src1$$reg),
10958               as_Register($src2$$reg),
10959               Assembler::LSR,
10960               $src3$$constant & 0x1f);
10961   %}
10962 
10963   ins_pipe(ialu_reg_reg_shift);
10964 %}
10965 
10966 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10967                          iRegL src1, iRegL src2,
10968                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10969   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10970   ins_cost(1.9 * INSN_COST);
10971   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10972 
10973   ins_encode %{
10974     __ eon(as_Register($dst$$reg),
10975               as_Register($src1$$reg),
10976               as_Register($src2$$reg),
10977               Assembler::LSR,
10978               $src3$$constant & 0x3f);
10979   %}
10980 
10981   ins_pipe(ialu_reg_reg_shift);
10982 %}
10983 
10984 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10985                          iRegIorL2I src1, iRegIorL2I src2,
10986                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10987   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10988   ins_cost(1.9 * INSN_COST);
10989   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10990 
10991   ins_encode %{
10992     __ eonw(as_Register($dst$$reg),
10993               as_Register($src1$$reg),
10994               as_Register($src2$$reg),
10995               Assembler::ASR,
10996               $src3$$constant & 0x1f);
10997   %}
10998 
10999   ins_pipe(ialu_reg_reg_shift);
11000 %}
11001 
11002 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11003                          iRegL src1, iRegL src2,
11004                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11005   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11006   ins_cost(1.9 * INSN_COST);
11007   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11008 
11009   ins_encode %{
11010     __ eon(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::ASR,
11014               $src3$$constant & 0x3f);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg_shift);
11018 %}
11019 
11020 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11021                          iRegIorL2I src1, iRegIorL2I src2,
11022                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11023   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11024   ins_cost(1.9 * INSN_COST);
11025   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11026 
11027   ins_encode %{
11028     __ eonw(as_Register($dst$$reg),
11029               as_Register($src1$$reg),
11030               as_Register($src2$$reg),
11031               Assembler::LSL,
11032               $src3$$constant & 0x1f);
11033   %}
11034 
11035   ins_pipe(ialu_reg_reg_shift);
11036 %}
11037 
11038 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11039                          iRegL src1, iRegL src2,
11040                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11041   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11042   ins_cost(1.9 * INSN_COST);
11043   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11044 
11045   ins_encode %{
11046     __ eon(as_Register($dst$$reg),
11047               as_Register($src1$$reg),
11048               as_Register($src2$$reg),
11049               Assembler::LSL,
11050               $src3$$constant & 0x3f);
11051   %}
11052 
11053   ins_pipe(ialu_reg_reg_shift);
11054 %}
11055 
11056 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11057                          iRegIorL2I src1, iRegIorL2I src2,
11058                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11059   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11060   ins_cost(1.9 * INSN_COST);
11061   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11062 
11063   ins_encode %{
11064     __ ornw(as_Register($dst$$reg),
11065               as_Register($src1$$reg),
11066               as_Register($src2$$reg),
11067               Assembler::LSR,
11068               $src3$$constant & 0x1f);
11069   %}
11070 
11071   ins_pipe(ialu_reg_reg_shift);
11072 %}
11073 
11074 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11075                          iRegL src1, iRegL src2,
11076                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11077   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11078   ins_cost(1.9 * INSN_COST);
11079   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11080 
11081   ins_encode %{
11082     __ orn(as_Register($dst$$reg),
11083               as_Register($src1$$reg),
11084               as_Register($src2$$reg),
11085               Assembler::LSR,
11086               $src3$$constant & 0x3f);
11087   %}
11088 
11089   ins_pipe(ialu_reg_reg_shift);
11090 %}
11091 
11092 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11093                          iRegIorL2I src1, iRegIorL2I src2,
11094                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11095   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11096   ins_cost(1.9 * INSN_COST);
11097   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11098 
11099   ins_encode %{
11100     __ ornw(as_Register($dst$$reg),
11101               as_Register($src1$$reg),
11102               as_Register($src2$$reg),
11103               Assembler::ASR,
11104               $src3$$constant & 0x1f);
11105   %}
11106 
11107   ins_pipe(ialu_reg_reg_shift);
11108 %}
11109 
11110 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11111                          iRegL src1, iRegL src2,
11112                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11113   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11114   ins_cost(1.9 * INSN_COST);
11115   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11116 
11117   ins_encode %{
11118     __ orn(as_Register($dst$$reg),
11119               as_Register($src1$$reg),
11120               as_Register($src2$$reg),
11121               Assembler::ASR,
11122               $src3$$constant & 0x3f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_reg_shift);
11126 %}
11127 
11128 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11129                          iRegIorL2I src1, iRegIorL2I src2,
11130                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11131   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11132   ins_cost(1.9 * INSN_COST);
11133   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11134 
11135   ins_encode %{
11136     __ ornw(as_Register($dst$$reg),
11137               as_Register($src1$$reg),
11138               as_Register($src2$$reg),
11139               Assembler::LSL,
11140               $src3$$constant & 0x1f);
11141   %}
11142 
11143   ins_pipe(ialu_reg_reg_shift);
11144 %}
11145 
11146 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11147                          iRegL src1, iRegL src2,
11148                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11149   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11150   ins_cost(1.9 * INSN_COST);
11151   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11152 
11153   ins_encode %{
11154     __ orn(as_Register($dst$$reg),
11155               as_Register($src1$$reg),
11156               as_Register($src2$$reg),
11157               Assembler::LSL,
11158               $src3$$constant & 0x3f);
11159   %}
11160 
11161   ins_pipe(ialu_reg_reg_shift);
11162 %}
11163 
11164 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11165                          iRegIorL2I src1, iRegIorL2I src2,
11166                          immI src3, rFlagsReg cr) %{
11167   match(Set dst (AndI src1 (URShiftI src2 src3)));
11168 
11169   ins_cost(1.9 * INSN_COST);
11170   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11171 
11172   ins_encode %{
11173     __ andw(as_Register($dst$$reg),
11174               as_Register($src1$$reg),
11175               as_Register($src2$$reg),
11176               Assembler::LSR,
11177               $src3$$constant & 0x1f);
11178   %}
11179 
11180   ins_pipe(ialu_reg_reg_shift);
11181 %}
11182 
11183 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11184                          iRegL src1, iRegL src2,
11185                          immI src3, rFlagsReg cr) %{
11186   match(Set dst (AndL src1 (URShiftL src2 src3)));
11187 
11188   ins_cost(1.9 * INSN_COST);
11189   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11190 
11191   ins_encode %{
11192     __ andr(as_Register($dst$$reg),
11193               as_Register($src1$$reg),
11194               as_Register($src2$$reg),
11195               Assembler::LSR,
11196               $src3$$constant & 0x3f);
11197   %}
11198 
11199   ins_pipe(ialu_reg_reg_shift);
11200 %}
11201 
11202 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11203                          iRegIorL2I src1, iRegIorL2I src2,
11204                          immI src3, rFlagsReg cr) %{
11205   match(Set dst (AndI src1 (RShiftI src2 src3)));
11206 
11207   ins_cost(1.9 * INSN_COST);
11208   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11209 
11210   ins_encode %{
11211     __ andw(as_Register($dst$$reg),
11212               as_Register($src1$$reg),
11213               as_Register($src2$$reg),
11214               Assembler::ASR,
11215               $src3$$constant & 0x1f);
11216   %}
11217 
11218   ins_pipe(ialu_reg_reg_shift);
11219 %}
11220 
11221 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11222                          iRegL src1, iRegL src2,
11223                          immI src3, rFlagsReg cr) %{
11224   match(Set dst (AndL src1 (RShiftL src2 src3)));
11225 
11226   ins_cost(1.9 * INSN_COST);
11227   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11228 
11229   ins_encode %{
11230     __ andr(as_Register($dst$$reg),
11231               as_Register($src1$$reg),
11232               as_Register($src2$$reg),
11233               Assembler::ASR,
11234               $src3$$constant & 0x3f);
11235   %}
11236 
11237   ins_pipe(ialu_reg_reg_shift);
11238 %}
11239 
11240 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11241                          iRegIorL2I src1, iRegIorL2I src2,
11242                          immI src3, rFlagsReg cr) %{
11243   match(Set dst (AndI src1 (LShiftI src2 src3)));
11244 
11245   ins_cost(1.9 * INSN_COST);
11246   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11247 
11248   ins_encode %{
11249     __ andw(as_Register($dst$$reg),
11250               as_Register($src1$$reg),
11251               as_Register($src2$$reg),
11252               Assembler::LSL,
11253               $src3$$constant & 0x1f);
11254   %}
11255 
11256   ins_pipe(ialu_reg_reg_shift);
11257 %}
11258 
11259 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11260                          iRegL src1, iRegL src2,
11261                          immI src3, rFlagsReg cr) %{
11262   match(Set dst (AndL src1 (LShiftL src2 src3)));
11263 
11264   ins_cost(1.9 * INSN_COST);
11265   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11266 
11267   ins_encode %{
11268     __ andr(as_Register($dst$$reg),
11269               as_Register($src1$$reg),
11270               as_Register($src2$$reg),
11271               Assembler::LSL,
11272               $src3$$constant & 0x3f);
11273   %}
11274 
11275   ins_pipe(ialu_reg_reg_shift);
11276 %}
11277 
11278 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11279                          iRegIorL2I src1, iRegIorL2I src2,
11280                          immI src3, rFlagsReg cr) %{
11281   match(Set dst (XorI src1 (URShiftI src2 src3)));
11282 
11283   ins_cost(1.9 * INSN_COST);
11284   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11285 
11286   ins_encode %{
11287     __ eorw(as_Register($dst$$reg),
11288               as_Register($src1$$reg),
11289               as_Register($src2$$reg),
11290               Assembler::LSR,
11291               $src3$$constant & 0x1f);
11292   %}
11293 
11294   ins_pipe(ialu_reg_reg_shift);
11295 %}
11296 
11297 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11298                          iRegL src1, iRegL src2,
11299                          immI src3, rFlagsReg cr) %{
11300   match(Set dst (XorL src1 (URShiftL src2 src3)));
11301 
11302   ins_cost(1.9 * INSN_COST);
11303   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11304 
11305   ins_encode %{
11306     __ eor(as_Register($dst$$reg),
11307               as_Register($src1$$reg),
11308               as_Register($src2$$reg),
11309               Assembler::LSR,
11310               $src3$$constant & 0x3f);
11311   %}
11312 
11313   ins_pipe(ialu_reg_reg_shift);
11314 %}
11315 
11316 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11317                          iRegIorL2I src1, iRegIorL2I src2,
11318                          immI src3, rFlagsReg cr) %{
11319   match(Set dst (XorI src1 (RShiftI src2 src3)));
11320 
11321   ins_cost(1.9 * INSN_COST);
11322   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11323 
11324   ins_encode %{
11325     __ eorw(as_Register($dst$$reg),
11326               as_Register($src1$$reg),
11327               as_Register($src2$$reg),
11328               Assembler::ASR,
11329               $src3$$constant & 0x1f);
11330   %}
11331 
11332   ins_pipe(ialu_reg_reg_shift);
11333 %}
11334 
11335 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11336                          iRegL src1, iRegL src2,
11337                          immI src3, rFlagsReg cr) %{
11338   match(Set dst (XorL src1 (RShiftL src2 src3)));
11339 
11340   ins_cost(1.9 * INSN_COST);
11341   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11342 
11343   ins_encode %{
11344     __ eor(as_Register($dst$$reg),
11345               as_Register($src1$$reg),
11346               as_Register($src2$$reg),
11347               Assembler::ASR,
11348               $src3$$constant & 0x3f);
11349   %}
11350 
11351   ins_pipe(ialu_reg_reg_shift);
11352 %}
11353 
11354 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11355                          iRegIorL2I src1, iRegIorL2I src2,
11356                          immI src3, rFlagsReg cr) %{
11357   match(Set dst (XorI src1 (LShiftI src2 src3)));
11358 
11359   ins_cost(1.9 * INSN_COST);
11360   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11361 
11362   ins_encode %{
11363     __ eorw(as_Register($dst$$reg),
11364               as_Register($src1$$reg),
11365               as_Register($src2$$reg),
11366               Assembler::LSL,
11367               $src3$$constant & 0x1f);
11368   %}
11369 
11370   ins_pipe(ialu_reg_reg_shift);
11371 %}
11372 
11373 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11374                          iRegL src1, iRegL src2,
11375                          immI src3, rFlagsReg cr) %{
11376   match(Set dst (XorL src1 (LShiftL src2 src3)));
11377 
11378   ins_cost(1.9 * INSN_COST);
11379   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11380 
11381   ins_encode %{
11382     __ eor(as_Register($dst$$reg),
11383               as_Register($src1$$reg),
11384               as_Register($src2$$reg),
11385               Assembler::LSL,
11386               $src3$$constant & 0x3f);
11387   %}
11388 
11389   ins_pipe(ialu_reg_reg_shift);
11390 %}
11391 
11392 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11393                          iRegIorL2I src1, iRegIorL2I src2,
11394                          immI src3, rFlagsReg cr) %{
11395   match(Set dst (OrI src1 (URShiftI src2 src3)));
11396 
11397   ins_cost(1.9 * INSN_COST);
11398   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11399 
11400   ins_encode %{
11401     __ orrw(as_Register($dst$$reg),
11402               as_Register($src1$$reg),
11403               as_Register($src2$$reg),
11404               Assembler::LSR,
11405               $src3$$constant & 0x1f);
11406   %}
11407 
11408   ins_pipe(ialu_reg_reg_shift);
11409 %}
11410 
11411 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11412                          iRegL src1, iRegL src2,
11413                          immI src3, rFlagsReg cr) %{
11414   match(Set dst (OrL src1 (URShiftL src2 src3)));
11415 
11416   ins_cost(1.9 * INSN_COST);
11417   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11418 
11419   ins_encode %{
11420     __ orr(as_Register($dst$$reg),
11421               as_Register($src1$$reg),
11422               as_Register($src2$$reg),
11423               Assembler::LSR,
11424               $src3$$constant & 0x3f);
11425   %}
11426 
11427   ins_pipe(ialu_reg_reg_shift);
11428 %}
11429 
11430 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11431                          iRegIorL2I src1, iRegIorL2I src2,
11432                          immI src3, rFlagsReg cr) %{
11433   match(Set dst (OrI src1 (RShiftI src2 src3)));
11434 
11435   ins_cost(1.9 * INSN_COST);
11436   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11437 
11438   ins_encode %{
11439     __ orrw(as_Register($dst$$reg),
11440               as_Register($src1$$reg),
11441               as_Register($src2$$reg),
11442               Assembler::ASR,
11443               $src3$$constant & 0x1f);
11444   %}
11445 
11446   ins_pipe(ialu_reg_reg_shift);
11447 %}
11448 
11449 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11450                          iRegL src1, iRegL src2,
11451                          immI src3, rFlagsReg cr) %{
11452   match(Set dst (OrL src1 (RShiftL src2 src3)));
11453 
11454   ins_cost(1.9 * INSN_COST);
11455   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11456 
11457   ins_encode %{
11458     __ orr(as_Register($dst$$reg),
11459               as_Register($src1$$reg),
11460               as_Register($src2$$reg),
11461               Assembler::ASR,
11462               $src3$$constant & 0x3f);
11463   %}
11464 
11465   ins_pipe(ialu_reg_reg_shift);
11466 %}
11467 
11468 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11469                          iRegIorL2I src1, iRegIorL2I src2,
11470                          immI src3, rFlagsReg cr) %{
11471   match(Set dst (OrI src1 (LShiftI src2 src3)));
11472 
11473   ins_cost(1.9 * INSN_COST);
11474   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11475 
11476   ins_encode %{
11477     __ orrw(as_Register($dst$$reg),
11478               as_Register($src1$$reg),
11479               as_Register($src2$$reg),
11480               Assembler::LSL,
11481               $src3$$constant & 0x1f);
11482   %}
11483 
11484   ins_pipe(ialu_reg_reg_shift);
11485 %}
11486 
11487 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11488                          iRegL src1, iRegL src2,
11489                          immI src3, rFlagsReg cr) %{
11490   match(Set dst (OrL src1 (LShiftL src2 src3)));
11491 
11492   ins_cost(1.9 * INSN_COST);
11493   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11494 
11495   ins_encode %{
11496     __ orr(as_Register($dst$$reg),
11497               as_Register($src1$$reg),
11498               as_Register($src2$$reg),
11499               Assembler::LSL,
11500               $src3$$constant & 0x3f);
11501   %}
11502 
11503   ins_pipe(ialu_reg_reg_shift);
11504 %}
11505 
11506 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11507                          iRegIorL2I src1, iRegIorL2I src2,
11508                          immI src3, rFlagsReg cr) %{
11509   match(Set dst (AddI src1 (URShiftI src2 src3)));
11510 
11511   ins_cost(1.9 * INSN_COST);
11512   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11513 
11514   ins_encode %{
11515     __ addw(as_Register($dst$$reg),
11516               as_Register($src1$$reg),
11517               as_Register($src2$$reg),
11518               Assembler::LSR,
11519               $src3$$constant & 0x1f);
11520   %}
11521 
11522   ins_pipe(ialu_reg_reg_shift);
11523 %}
11524 
11525 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11526                          iRegL src1, iRegL src2,
11527                          immI src3, rFlagsReg cr) %{
11528   match(Set dst (AddL src1 (URShiftL src2 src3)));
11529 
11530   ins_cost(1.9 * INSN_COST);
11531   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11532 
11533   ins_encode %{
11534     __ add(as_Register($dst$$reg),
11535               as_Register($src1$$reg),
11536               as_Register($src2$$reg),
11537               Assembler::LSR,
11538               $src3$$constant & 0x3f);
11539   %}
11540 
11541   ins_pipe(ialu_reg_reg_shift);
11542 %}
11543 
11544 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11545                          iRegIorL2I src1, iRegIorL2I src2,
11546                          immI src3, rFlagsReg cr) %{
11547   match(Set dst (AddI src1 (RShiftI src2 src3)));
11548 
11549   ins_cost(1.9 * INSN_COST);
11550   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11551 
11552   ins_encode %{
11553     __ addw(as_Register($dst$$reg),
11554               as_Register($src1$$reg),
11555               as_Register($src2$$reg),
11556               Assembler::ASR,
11557               $src3$$constant & 0x1f);
11558   %}
11559 
11560   ins_pipe(ialu_reg_reg_shift);
11561 %}
11562 
11563 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11564                          iRegL src1, iRegL src2,
11565                          immI src3, rFlagsReg cr) %{
11566   match(Set dst (AddL src1 (RShiftL src2 src3)));
11567 
11568   ins_cost(1.9 * INSN_COST);
11569   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11570 
11571   ins_encode %{
11572     __ add(as_Register($dst$$reg),
11573               as_Register($src1$$reg),
11574               as_Register($src2$$reg),
11575               Assembler::ASR,
11576               $src3$$constant & 0x3f);
11577   %}
11578 
11579   ins_pipe(ialu_reg_reg_shift);
11580 %}
11581 
11582 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11583                          iRegIorL2I src1, iRegIorL2I src2,
11584                          immI src3, rFlagsReg cr) %{
11585   match(Set dst (AddI src1 (LShiftI src2 src3)));
11586 
11587   ins_cost(1.9 * INSN_COST);
11588   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11589 
11590   ins_encode %{
11591     __ addw(as_Register($dst$$reg),
11592               as_Register($src1$$reg),
11593               as_Register($src2$$reg),
11594               Assembler::LSL,
11595               $src3$$constant & 0x1f);
11596   %}
11597 
11598   ins_pipe(ialu_reg_reg_shift);
11599 %}
11600 
11601 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11602                          iRegL src1, iRegL src2,
11603                          immI src3, rFlagsReg cr) %{
11604   match(Set dst (AddL src1 (LShiftL src2 src3)));
11605 
11606   ins_cost(1.9 * INSN_COST);
11607   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11608 
11609   ins_encode %{
11610     __ add(as_Register($dst$$reg),
11611               as_Register($src1$$reg),
11612               as_Register($src2$$reg),
11613               Assembler::LSL,
11614               $src3$$constant & 0x3f);
11615   %}
11616 
11617   ins_pipe(ialu_reg_reg_shift);
11618 %}
11619 
11620 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11621                          iRegIorL2I src1, iRegIorL2I src2,
11622                          immI src3, rFlagsReg cr) %{
11623   match(Set dst (SubI src1 (URShiftI src2 src3)));
11624 
11625   ins_cost(1.9 * INSN_COST);
11626   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11627 
11628   ins_encode %{
11629     __ subw(as_Register($dst$$reg),
11630               as_Register($src1$$reg),
11631               as_Register($src2$$reg),
11632               Assembler::LSR,
11633               $src3$$constant & 0x1f);
11634   %}
11635 
11636   ins_pipe(ialu_reg_reg_shift);
11637 %}
11638 
11639 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11640                          iRegL src1, iRegL src2,
11641                          immI src3, rFlagsReg cr) %{
11642   match(Set dst (SubL src1 (URShiftL src2 src3)));
11643 
11644   ins_cost(1.9 * INSN_COST);
11645   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11646 
11647   ins_encode %{
11648     __ sub(as_Register($dst$$reg),
11649               as_Register($src1$$reg),
11650               as_Register($src2$$reg),
11651               Assembler::LSR,
11652               $src3$$constant & 0x3f);
11653   %}
11654 
11655   ins_pipe(ialu_reg_reg_shift);
11656 %}
11657 
11658 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11659                          iRegIorL2I src1, iRegIorL2I src2,
11660                          immI src3, rFlagsReg cr) %{
11661   match(Set dst (SubI src1 (RShiftI src2 src3)));
11662 
11663   ins_cost(1.9 * INSN_COST);
11664   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11665 
11666   ins_encode %{
11667     __ subw(as_Register($dst$$reg),
11668               as_Register($src1$$reg),
11669               as_Register($src2$$reg),
11670               Assembler::ASR,
11671               $src3$$constant & 0x1f);
11672   %}
11673 
11674   ins_pipe(ialu_reg_reg_shift);
11675 %}
11676 
11677 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11678                          iRegL src1, iRegL src2,
11679                          immI src3, rFlagsReg cr) %{
11680   match(Set dst (SubL src1 (RShiftL src2 src3)));
11681 
11682   ins_cost(1.9 * INSN_COST);
11683   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11684 
11685   ins_encode %{
11686     __ sub(as_Register($dst$$reg),
11687               as_Register($src1$$reg),
11688               as_Register($src2$$reg),
11689               Assembler::ASR,
11690               $src3$$constant & 0x3f);
11691   %}
11692 
11693   ins_pipe(ialu_reg_reg_shift);
11694 %}
11695 
11696 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11697                          iRegIorL2I src1, iRegIorL2I src2,
11698                          immI src3, rFlagsReg cr) %{
11699   match(Set dst (SubI src1 (LShiftI src2 src3)));
11700 
11701   ins_cost(1.9 * INSN_COST);
11702   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11703 
11704   ins_encode %{
11705     __ subw(as_Register($dst$$reg),
11706               as_Register($src1$$reg),
11707               as_Register($src2$$reg),
11708               Assembler::LSL,
11709               $src3$$constant & 0x1f);
11710   %}
11711 
11712   ins_pipe(ialu_reg_reg_shift);
11713 %}
11714 
11715 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11716                          iRegL src1, iRegL src2,
11717                          immI src3, rFlagsReg cr) %{
11718   match(Set dst (SubL src1 (LShiftL src2 src3)));
11719 
11720   ins_cost(1.9 * INSN_COST);
11721   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11722 
11723   ins_encode %{
11724     __ sub(as_Register($dst$$reg),
11725               as_Register($src1$$reg),
11726               as_Register($src2$$reg),
11727               Assembler::LSL,
11728               $src3$$constant & 0x3f);
11729   %}
11730 
11731   ins_pipe(ialu_reg_reg_shift);
11732 %}
11733 
11734 
11735 
11736 // Shift Left followed by Shift Right.
11737 // This idiom is used by the compiler for the i2b bytecode etc.
11738 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11739 %{
11740   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11741   ins_cost(INSN_COST * 2);
11742   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11743   ins_encode %{
11744     int lshift = $lshift_count$$constant & 63;
11745     int rshift = $rshift_count$$constant & 63;
11746     int s = 63 - lshift;
11747     int r = (rshift - lshift) & 63;
11748     __ sbfm(as_Register($dst$$reg),
11749             as_Register($src$$reg),
11750             r, s);
11751   %}
11752 
11753   ins_pipe(ialu_reg_shift);
11754 %}
11755 
11756 // Shift Left followed by Shift Right.
11757 // This idiom is used by the compiler for the i2b bytecode etc.
11758 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11759 %{
11760   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11761   ins_cost(INSN_COST * 2);
11762   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11763   ins_encode %{
11764     int lshift = $lshift_count$$constant & 31;
11765     int rshift = $rshift_count$$constant & 31;
11766     int s = 31 - lshift;
11767     int r = (rshift - lshift) & 31;
11768     __ sbfmw(as_Register($dst$$reg),
11769             as_Register($src$$reg),
11770             r, s);
11771   %}
11772 
11773   ins_pipe(ialu_reg_shift);
11774 %}
11775 
11776 // Shift Left followed by Shift Right.
11777 // This idiom is used by the compiler for the i2b bytecode etc.
11778 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11779 %{
11780   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11781   ins_cost(INSN_COST * 2);
11782   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11783   ins_encode %{
11784     int lshift = $lshift_count$$constant & 63;
11785     int rshift = $rshift_count$$constant & 63;
11786     int s = 63 - lshift;
11787     int r = (rshift - lshift) & 63;
11788     __ ubfm(as_Register($dst$$reg),
11789             as_Register($src$$reg),
11790             r, s);
11791   %}
11792 
11793   ins_pipe(ialu_reg_shift);
11794 %}
11795 
11796 // Shift Left followed by Shift Right.
11797 // This idiom is used by the compiler for the i2b bytecode etc.
11798 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11799 %{
11800   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11801   ins_cost(INSN_COST * 2);
11802   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11803   ins_encode %{
11804     int lshift = $lshift_count$$constant & 31;
11805     int rshift = $rshift_count$$constant & 31;
11806     int s = 31 - lshift;
11807     int r = (rshift - lshift) & 31;
11808     __ ubfmw(as_Register($dst$$reg),
11809             as_Register($src$$reg),
11810             r, s);
11811   %}
11812 
11813   ins_pipe(ialu_reg_shift);
11814 %}
11815 // Bitfield extract with shift & mask
11816 
11817 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11818 %{
11819   match(Set dst (AndI (URShiftI src rshift) mask));
11820   // Make sure we are not going to exceed what ubfxw can do.
11821   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11822 
11823   ins_cost(INSN_COST);
11824   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11825   ins_encode %{
11826     int rshift = $rshift$$constant & 31;
11827     long mask = $mask$$constant;
11828     int width = exact_log2(mask+1);
11829     __ ubfxw(as_Register($dst$$reg),
11830             as_Register($src$$reg), rshift, width);
11831   %}
11832   ins_pipe(ialu_reg_shift);
11833 %}
11834 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11835 %{
11836   match(Set dst (AndL (URShiftL src rshift) mask));
11837   // Make sure we are not going to exceed what ubfx can do.
11838   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11839 
11840   ins_cost(INSN_COST);
11841   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11842   ins_encode %{
11843     int rshift = $rshift$$constant & 63;
11844     long mask = $mask$$constant;
11845     int width = exact_log2_long(mask+1);
11846     __ ubfx(as_Register($dst$$reg),
11847             as_Register($src$$reg), rshift, width);
11848   %}
11849   ins_pipe(ialu_reg_shift);
11850 %}
11851 
11852 // We can use ubfx when extending an And with a mask when we know mask
11853 // is positive.  We know that because immI_bitmask guarantees it.
11854 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11855 %{
11856   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11857   // Make sure we are not going to exceed what ubfxw can do.
11858   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11859 
11860   ins_cost(INSN_COST * 2);
11861   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11862   ins_encode %{
11863     int rshift = $rshift$$constant & 31;
11864     long mask = $mask$$constant;
11865     int width = exact_log2(mask+1);
11866     __ ubfx(as_Register($dst$$reg),
11867             as_Register($src$$reg), rshift, width);
11868   %}
11869   ins_pipe(ialu_reg_shift);
11870 %}
11871 
11872 // We can use ubfiz when masking by a positive number and then left shifting the result.
11873 // We know that the mask is positive because immI_bitmask guarantees it.
11874 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11875 %{
11876   match(Set dst (LShiftI (AndI src mask) lshift));
11877   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11878 
11879   ins_cost(INSN_COST);
11880   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11881   ins_encode %{
11882     int lshift = $lshift$$constant & 31;
11883     long mask = $mask$$constant;
11884     int width = exact_log2(mask+1);
11885     __ ubfizw(as_Register($dst$$reg),
11886           as_Register($src$$reg), lshift, width);
11887   %}
11888   ins_pipe(ialu_reg_shift);
11889 %}
11890 // We can use ubfiz when masking by a positive number and then left shifting the result.
11891 // We know that the mask is positive because immL_bitmask guarantees it.
11892 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11893 %{
11894   match(Set dst (LShiftL (AndL src mask) lshift));
11895   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11896 
11897   ins_cost(INSN_COST);
11898   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11899   ins_encode %{
11900     int lshift = $lshift$$constant & 63;
11901     long mask = $mask$$constant;
11902     int width = exact_log2_long(mask+1);
11903     __ ubfiz(as_Register($dst$$reg),
11904           as_Register($src$$reg), lshift, width);
11905   %}
11906   ins_pipe(ialu_reg_shift);
11907 %}
11908 
11909 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11910 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11911 %{
11912   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
11913   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11914 
11915   ins_cost(INSN_COST);
11916   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11917   ins_encode %{
11918     int lshift = $lshift$$constant & 63;
11919     long mask = $mask$$constant;
11920     int width = exact_log2(mask+1);
11921     __ ubfiz(as_Register($dst$$reg),
11922              as_Register($src$$reg), lshift, width);
11923   %}
11924   ins_pipe(ialu_reg_shift);
11925 %}
11926 
11927 // Rotations
11928 
11929 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11930 %{
11931   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11932   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11933 
11934   ins_cost(INSN_COST);
11935   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11936 
11937   ins_encode %{
11938     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11939             $rshift$$constant & 63);
11940   %}
11941   ins_pipe(ialu_reg_reg_extr);
11942 %}
11943 
11944 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11945 %{
11946   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11947   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11948 
11949   ins_cost(INSN_COST);
11950   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11951 
11952   ins_encode %{
11953     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11954             $rshift$$constant & 31);
11955   %}
11956   ins_pipe(ialu_reg_reg_extr);
11957 %}
11958 
11959 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11960 %{
11961   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11962   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11963 
11964   ins_cost(INSN_COST);
11965   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11966 
11967   ins_encode %{
11968     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11969             $rshift$$constant & 63);
11970   %}
11971   ins_pipe(ialu_reg_reg_extr);
11972 %}
11973 
11974 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11975 %{
11976   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11977   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11978 
11979   ins_cost(INSN_COST);
11980   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11981 
11982   ins_encode %{
11983     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11984             $rshift$$constant & 31);
11985   %}
11986   ins_pipe(ialu_reg_reg_extr);
11987 %}
11988 
11989 
11990 // rol expander
11991 
11992 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11993 %{
11994   effect(DEF dst, USE src, USE shift);
11995 
11996   format %{ "rol    $dst, $src, $shift" %}
11997   ins_cost(INSN_COST * 3);
11998   ins_encode %{
11999     __ subw(rscratch1, zr, as_Register($shift$$reg));
12000     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12001             rscratch1);
12002     %}
12003   ins_pipe(ialu_reg_reg_vshift);
12004 %}
12005 
12006 // rol expander
12007 
12008 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12009 %{
12010   effect(DEF dst, USE src, USE shift);
12011 
12012   format %{ "rol    $dst, $src, $shift" %}
12013   ins_cost(INSN_COST * 3);
12014   ins_encode %{
12015     __ subw(rscratch1, zr, as_Register($shift$$reg));
12016     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12017             rscratch1);
12018     %}
12019   ins_pipe(ialu_reg_reg_vshift);
12020 %}
12021 
12022 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12023 %{
12024   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12025 
12026   expand %{
12027     rolL_rReg(dst, src, shift, cr);
12028   %}
12029 %}
12030 
12031 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12032 %{
12033   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12034 
12035   expand %{
12036     rolL_rReg(dst, src, shift, cr);
12037   %}
12038 %}
12039 
12040 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12041 %{
12042   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12043 
12044   expand %{
12045     rolI_rReg(dst, src, shift, cr);
12046   %}
12047 %}
12048 
12049 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12050 %{
12051   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12052 
12053   expand %{
12054     rolI_rReg(dst, src, shift, cr);
12055   %}
12056 %}
12057 
12058 // ror expander
12059 
12060 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12061 %{
12062   effect(DEF dst, USE src, USE shift);
12063 
12064   format %{ "ror    $dst, $src, $shift" %}
12065   ins_cost(INSN_COST);
12066   ins_encode %{
12067     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12068             as_Register($shift$$reg));
12069     %}
12070   ins_pipe(ialu_reg_reg_vshift);
12071 %}
12072 
12073 // ror expander
12074 
12075 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12076 %{
12077   effect(DEF dst, USE src, USE shift);
12078 
12079   format %{ "ror    $dst, $src, $shift" %}
12080   ins_cost(INSN_COST);
12081   ins_encode %{
12082     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12083             as_Register($shift$$reg));
12084     %}
12085   ins_pipe(ialu_reg_reg_vshift);
12086 %}
12087 
12088 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12089 %{
12090   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12091 
12092   expand %{
12093     rorL_rReg(dst, src, shift, cr);
12094   %}
12095 %}
12096 
12097 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12098 %{
12099   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12100 
12101   expand %{
12102     rorL_rReg(dst, src, shift, cr);
12103   %}
12104 %}
12105 
12106 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12107 %{
12108   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12109 
12110   expand %{
12111     rorI_rReg(dst, src, shift, cr);
12112   %}
12113 %}
12114 
12115 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12116 %{
12117   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12118 
12119   expand %{
12120     rorI_rReg(dst, src, shift, cr);
12121   %}
12122 %}
12123 
12124 // Add/subtract (extended)
12125 
12126 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12127 %{
12128   match(Set dst (AddL src1 (ConvI2L src2)));
12129   ins_cost(INSN_COST);
12130   format %{ "add  $dst, $src1, $src2, sxtw" %}
12131 
12132    ins_encode %{
12133      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12134             as_Register($src2$$reg), ext::sxtw);
12135    %}
12136   ins_pipe(ialu_reg_reg);
12137 %};
12138 
12139 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12140 %{
12141   match(Set dst (SubL src1 (ConvI2L src2)));
12142   ins_cost(INSN_COST);
12143   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12144 
12145    ins_encode %{
12146      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12147             as_Register($src2$$reg), ext::sxtw);
12148    %}
12149   ins_pipe(ialu_reg_reg);
12150 %};
12151 
12152 
12153 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12154 %{
12155   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12156   ins_cost(INSN_COST);
12157   format %{ "add  $dst, $src1, $src2, sxth" %}
12158 
12159    ins_encode %{
12160      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12161             as_Register($src2$$reg), ext::sxth);
12162    %}
12163   ins_pipe(ialu_reg_reg);
12164 %}
12165 
12166 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12167 %{
12168   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12169   ins_cost(INSN_COST);
12170   format %{ "add  $dst, $src1, $src2, sxtb" %}
12171 
12172    ins_encode %{
12173      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12174             as_Register($src2$$reg), ext::sxtb);
12175    %}
12176   ins_pipe(ialu_reg_reg);
12177 %}
12178 
12179 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12180 %{
12181   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12182   ins_cost(INSN_COST);
12183   format %{ "add  $dst, $src1, $src2, uxtb" %}
12184 
12185    ins_encode %{
12186      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12187             as_Register($src2$$reg), ext::uxtb);
12188    %}
12189   ins_pipe(ialu_reg_reg);
12190 %}
12191 
12192 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12193 %{
12194   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12195   ins_cost(INSN_COST);
12196   format %{ "add  $dst, $src1, $src2, sxth" %}
12197 
12198    ins_encode %{
12199      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12200             as_Register($src2$$reg), ext::sxth);
12201    %}
12202   ins_pipe(ialu_reg_reg);
12203 %}
12204 
12205 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12206 %{
12207   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12208   ins_cost(INSN_COST);
12209   format %{ "add  $dst, $src1, $src2, sxtw" %}
12210 
12211    ins_encode %{
12212      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12213             as_Register($src2$$reg), ext::sxtw);
12214    %}
12215   ins_pipe(ialu_reg_reg);
12216 %}
12217 
12218 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12219 %{
12220   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12221   ins_cost(INSN_COST);
12222   format %{ "add  $dst, $src1, $src2, sxtb" %}
12223 
12224    ins_encode %{
12225      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12226             as_Register($src2$$reg), ext::sxtb);
12227    %}
12228   ins_pipe(ialu_reg_reg);
12229 %}
12230 
12231 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12232 %{
12233   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12234   ins_cost(INSN_COST);
12235   format %{ "add  $dst, $src1, $src2, uxtb" %}
12236 
12237    ins_encode %{
12238      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12239             as_Register($src2$$reg), ext::uxtb);
12240    %}
12241   ins_pipe(ialu_reg_reg);
12242 %}
12243 
12244 
12245 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12246 %{
12247   match(Set dst (AddI src1 (AndI src2 mask)));
12248   ins_cost(INSN_COST);
12249   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12250 
12251    ins_encode %{
12252      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12253             as_Register($src2$$reg), ext::uxtb);
12254    %}
12255   ins_pipe(ialu_reg_reg);
12256 %}
12257 
12258 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12259 %{
12260   match(Set dst (AddI src1 (AndI src2 mask)));
12261   ins_cost(INSN_COST);
12262   format %{ "addw  $dst, $src1, $src2, uxth" %}
12263 
12264    ins_encode %{
12265      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12266             as_Register($src2$$reg), ext::uxth);
12267    %}
12268   ins_pipe(ialu_reg_reg);
12269 %}
12270 
12271 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12272 %{
12273   match(Set dst (AddL src1 (AndL src2 mask)));
12274   ins_cost(INSN_COST);
12275   format %{ "add  $dst, $src1, $src2, uxtb" %}
12276 
12277    ins_encode %{
12278      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12279             as_Register($src2$$reg), ext::uxtb);
12280    %}
12281   ins_pipe(ialu_reg_reg);
12282 %}
12283 
12284 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12285 %{
12286   match(Set dst (AddL src1 (AndL src2 mask)));
12287   ins_cost(INSN_COST);
12288   format %{ "add  $dst, $src1, $src2, uxth" %}
12289 
12290    ins_encode %{
12291      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12292             as_Register($src2$$reg), ext::uxth);
12293    %}
12294   ins_pipe(ialu_reg_reg);
12295 %}
12296 
12297 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12298 %{
12299   match(Set dst (AddL src1 (AndL src2 mask)));
12300   ins_cost(INSN_COST);
12301   format %{ "add  $dst, $src1, $src2, uxtw" %}
12302 
12303    ins_encode %{
12304      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12305             as_Register($src2$$reg), ext::uxtw);
12306    %}
12307   ins_pipe(ialu_reg_reg);
12308 %}
12309 
12310 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12311 %{
12312   match(Set dst (SubI src1 (AndI src2 mask)));
12313   ins_cost(INSN_COST);
12314   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12315 
12316    ins_encode %{
12317      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12318             as_Register($src2$$reg), ext::uxtb);
12319    %}
12320   ins_pipe(ialu_reg_reg);
12321 %}
12322 
12323 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12324 %{
12325   match(Set dst (SubI src1 (AndI src2 mask)));
12326   ins_cost(INSN_COST);
12327   format %{ "subw  $dst, $src1, $src2, uxth" %}
12328 
12329    ins_encode %{
12330      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12331             as_Register($src2$$reg), ext::uxth);
12332    %}
12333   ins_pipe(ialu_reg_reg);
12334 %}
12335 
12336 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12337 %{
12338   match(Set dst (SubL src1 (AndL src2 mask)));
12339   ins_cost(INSN_COST);
12340   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12341 
12342    ins_encode %{
12343      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12344             as_Register($src2$$reg), ext::uxtb);
12345    %}
12346   ins_pipe(ialu_reg_reg);
12347 %}
12348 
12349 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12350 %{
12351   match(Set dst (SubL src1 (AndL src2 mask)));
12352   ins_cost(INSN_COST);
12353   format %{ "sub  $dst, $src1, $src2, uxth" %}
12354 
12355    ins_encode %{
12356      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12357             as_Register($src2$$reg), ext::uxth);
12358    %}
12359   ins_pipe(ialu_reg_reg);
12360 %}
12361 
12362 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12363 %{
12364   match(Set dst (SubL src1 (AndL src2 mask)));
12365   ins_cost(INSN_COST);
12366   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12367 
12368    ins_encode %{
12369      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12370             as_Register($src2$$reg), ext::uxtw);
12371    %}
12372   ins_pipe(ialu_reg_reg);
12373 %}
12374 
12375 
12376 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12377 %{
12378   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12379   ins_cost(1.9 * INSN_COST);
12380   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12381 
12382    ins_encode %{
12383      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12384             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12385    %}
12386   ins_pipe(ialu_reg_reg_shift);
12387 %}
12388 
12389 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12390 %{
12391   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12392   ins_cost(1.9 * INSN_COST);
12393   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12394 
12395    ins_encode %{
12396      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12397             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12398    %}
12399   ins_pipe(ialu_reg_reg_shift);
12400 %}
12401 
12402 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12403 %{
12404   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12405   ins_cost(1.9 * INSN_COST);
12406   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12407 
12408    ins_encode %{
12409      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12410             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12411    %}
12412   ins_pipe(ialu_reg_reg_shift);
12413 %}
12414 
12415 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12416 %{
12417   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12418   ins_cost(1.9 * INSN_COST);
12419   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12420 
12421    ins_encode %{
12422      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12423             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12424    %}
12425   ins_pipe(ialu_reg_reg_shift);
12426 %}
12427 
12428 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12429 %{
12430   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12431   ins_cost(1.9 * INSN_COST);
12432   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12433 
12434    ins_encode %{
12435      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12436             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12437    %}
12438   ins_pipe(ialu_reg_reg_shift);
12439 %}
12440 
12441 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12442 %{
12443   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12444   ins_cost(1.9 * INSN_COST);
12445   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12446 
12447    ins_encode %{
12448      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12449             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12450    %}
12451   ins_pipe(ialu_reg_reg_shift);
12452 %}
12453 
12454 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12455 %{
12456   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12457   ins_cost(1.9 * INSN_COST);
12458   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12459 
12460    ins_encode %{
12461      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12462             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12463    %}
12464   ins_pipe(ialu_reg_reg_shift);
12465 %}
12466 
12467 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12468 %{
12469   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12470   ins_cost(1.9 * INSN_COST);
12471   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12472 
12473    ins_encode %{
12474      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12475             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12476    %}
12477   ins_pipe(ialu_reg_reg_shift);
12478 %}
12479 
12480 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12481 %{
12482   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12483   ins_cost(1.9 * INSN_COST);
12484   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12485 
12486    ins_encode %{
12487      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12488             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12489    %}
12490   ins_pipe(ialu_reg_reg_shift);
12491 %}
12492 
12493 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12494 %{
12495   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12496   ins_cost(1.9 * INSN_COST);
12497   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12498 
12499    ins_encode %{
12500      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12501             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12502    %}
12503   ins_pipe(ialu_reg_reg_shift);
12504 %}
12505 
12506 
12507 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12508 %{
12509   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12510   ins_cost(1.9 * INSN_COST);
12511   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12512 
12513    ins_encode %{
12514      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12515             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12516    %}
12517   ins_pipe(ialu_reg_reg_shift);
12518 %};
12519 
12520 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12521 %{
12522   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12523   ins_cost(1.9 * INSN_COST);
12524   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12525 
12526    ins_encode %{
12527      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12528             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12529    %}
12530   ins_pipe(ialu_reg_reg_shift);
12531 %};
12532 
12533 
12534 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12535 %{
12536   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12537   ins_cost(1.9 * INSN_COST);
12538   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12539 
12540    ins_encode %{
12541      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12542             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12543    %}
12544   ins_pipe(ialu_reg_reg_shift);
12545 %}
12546 
12547 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12548 %{
12549   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12550   ins_cost(1.9 * INSN_COST);
12551   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12552 
12553    ins_encode %{
12554      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12555             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12556    %}
12557   ins_pipe(ialu_reg_reg_shift);
12558 %}
12559 
12560 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12561 %{
12562   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12563   ins_cost(1.9 * INSN_COST);
12564   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12565 
12566    ins_encode %{
12567      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12568             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12569    %}
12570   ins_pipe(ialu_reg_reg_shift);
12571 %}
12572 
12573 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12574 %{
12575   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12576   ins_cost(1.9 * INSN_COST);
12577   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12578 
12579    ins_encode %{
12580      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12581             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12582    %}
12583   ins_pipe(ialu_reg_reg_shift);
12584 %}
12585 
12586 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12587 %{
12588   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12589   ins_cost(1.9 * INSN_COST);
12590   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12591 
12592    ins_encode %{
12593      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12594             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12595    %}
12596   ins_pipe(ialu_reg_reg_shift);
12597 %}
12598 
12599 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12600 %{
12601   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12602   ins_cost(1.9 * INSN_COST);
12603   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12604 
12605    ins_encode %{
12606      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12607             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12608    %}
12609   ins_pipe(ialu_reg_reg_shift);
12610 %}
12611 
12612 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12613 %{
12614   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12615   ins_cost(1.9 * INSN_COST);
12616   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12617 
12618    ins_encode %{
12619      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12620             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12621    %}
12622   ins_pipe(ialu_reg_reg_shift);
12623 %}
12624 
12625 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12626 %{
12627   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12628   ins_cost(1.9 * INSN_COST);
12629   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12630 
12631    ins_encode %{
12632      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12633             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12634    %}
12635   ins_pipe(ialu_reg_reg_shift);
12636 %}
12637 
12638 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12639 %{
12640   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12641   ins_cost(1.9 * INSN_COST);
12642   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12643 
12644    ins_encode %{
12645      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12646             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12647    %}
12648   ins_pipe(ialu_reg_reg_shift);
12649 %}
12650 
12651 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12652 %{
12653   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12654   ins_cost(1.9 * INSN_COST);
12655   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12656 
12657    ins_encode %{
12658      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12659             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12660    %}
12661   ins_pipe(ialu_reg_reg_shift);
12662 %}
12663 // END This section of the file is automatically generated. Do not edit --------------
12664 
12665 // ============================================================================
12666 // Floating Point Arithmetic Instructions
12667 
12668 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12669   match(Set dst (AddF src1 src2));
12670 
12671   ins_cost(INSN_COST * 5);
12672   format %{ "fadds   $dst, $src1, $src2" %}
12673 
12674   ins_encode %{
12675     __ fadds(as_FloatRegister($dst$$reg),
12676              as_FloatRegister($src1$$reg),
12677              as_FloatRegister($src2$$reg));
12678   %}
12679 
12680   ins_pipe(fp_dop_reg_reg_s);
12681 %}
12682 
12683 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12684   match(Set dst (AddD src1 src2));
12685 
12686   ins_cost(INSN_COST * 5);
12687   format %{ "faddd   $dst, $src1, $src2" %}
12688 
12689   ins_encode %{
12690     __ faddd(as_FloatRegister($dst$$reg),
12691              as_FloatRegister($src1$$reg),
12692              as_FloatRegister($src2$$reg));
12693   %}
12694 
12695   ins_pipe(fp_dop_reg_reg_d);
12696 %}
12697 
12698 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12699   match(Set dst (SubF src1 src2));
12700 
12701   ins_cost(INSN_COST * 5);
12702   format %{ "fsubs   $dst, $src1, $src2" %}
12703 
12704   ins_encode %{
12705     __ fsubs(as_FloatRegister($dst$$reg),
12706              as_FloatRegister($src1$$reg),
12707              as_FloatRegister($src2$$reg));
12708   %}
12709 
12710   ins_pipe(fp_dop_reg_reg_s);
12711 %}
12712 
12713 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12714   match(Set dst (SubD src1 src2));
12715 
12716   ins_cost(INSN_COST * 5);
12717   format %{ "fsubd   $dst, $src1, $src2" %}
12718 
12719   ins_encode %{
12720     __ fsubd(as_FloatRegister($dst$$reg),
12721              as_FloatRegister($src1$$reg),
12722              as_FloatRegister($src2$$reg));
12723   %}
12724 
12725   ins_pipe(fp_dop_reg_reg_d);
12726 %}
12727 
12728 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12729   match(Set dst (MulF src1 src2));
12730 
12731   ins_cost(INSN_COST * 6);
12732   format %{ "fmuls   $dst, $src1, $src2" %}
12733 
12734   ins_encode %{
12735     __ fmuls(as_FloatRegister($dst$$reg),
12736              as_FloatRegister($src1$$reg),
12737              as_FloatRegister($src2$$reg));
12738   %}
12739 
12740   ins_pipe(fp_dop_reg_reg_s);
12741 %}
12742 
12743 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12744   match(Set dst (MulD src1 src2));
12745 
12746   ins_cost(INSN_COST * 6);
12747   format %{ "fmuld   $dst, $src1, $src2" %}
12748 
12749   ins_encode %{
12750     __ fmuld(as_FloatRegister($dst$$reg),
12751              as_FloatRegister($src1$$reg),
12752              as_FloatRegister($src2$$reg));
12753   %}
12754 
12755   ins_pipe(fp_dop_reg_reg_d);
12756 %}
12757 
12758 // src1 * src2 + src3
12759 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12760   predicate(UseFMA);
12761   match(Set dst (FmaF src3 (Binary src1 src2)));
12762 
12763   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12764 
12765   ins_encode %{
12766     __ fmadds(as_FloatRegister($dst$$reg),
12767              as_FloatRegister($src1$$reg),
12768              as_FloatRegister($src2$$reg),
12769              as_FloatRegister($src3$$reg));
12770   %}
12771 
12772   ins_pipe(pipe_class_default);
12773 %}
12774 
12775 // src1 * src2 + src3
12776 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12777   predicate(UseFMA);
12778   match(Set dst (FmaD src3 (Binary src1 src2)));
12779 
12780   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12781 
12782   ins_encode %{
12783     __ fmaddd(as_FloatRegister($dst$$reg),
12784              as_FloatRegister($src1$$reg),
12785              as_FloatRegister($src2$$reg),
12786              as_FloatRegister($src3$$reg));
12787   %}
12788 
12789   ins_pipe(pipe_class_default);
12790 %}
12791 
12792 // -src1 * src2 + src3
12793 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12794   predicate(UseFMA);
12795   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12796   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12797 
12798   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12799 
12800   ins_encode %{
12801     __ fmsubs(as_FloatRegister($dst$$reg),
12802               as_FloatRegister($src1$$reg),
12803               as_FloatRegister($src2$$reg),
12804               as_FloatRegister($src3$$reg));
12805   %}
12806 
12807   ins_pipe(pipe_class_default);
12808 %}
12809 
12810 // -src1 * src2 + src3
12811 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12812   predicate(UseFMA);
12813   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12814   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12815 
12816   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12817 
12818   ins_encode %{
12819     __ fmsubd(as_FloatRegister($dst$$reg),
12820               as_FloatRegister($src1$$reg),
12821               as_FloatRegister($src2$$reg),
12822               as_FloatRegister($src3$$reg));
12823   %}
12824 
12825   ins_pipe(pipe_class_default);
12826 %}
12827 
12828 // -src1 * src2 - src3
12829 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12830   predicate(UseFMA);
12831   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12832   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12833 
12834   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12835 
12836   ins_encode %{
12837     __ fnmadds(as_FloatRegister($dst$$reg),
12838                as_FloatRegister($src1$$reg),
12839                as_FloatRegister($src2$$reg),
12840                as_FloatRegister($src3$$reg));
12841   %}
12842 
12843   ins_pipe(pipe_class_default);
12844 %}
12845 
12846 // -src1 * src2 - src3
12847 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12848   predicate(UseFMA);
12849   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12850   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12851 
12852   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12853 
12854   ins_encode %{
12855     __ fnmaddd(as_FloatRegister($dst$$reg),
12856                as_FloatRegister($src1$$reg),
12857                as_FloatRegister($src2$$reg),
12858                as_FloatRegister($src3$$reg));
12859   %}
12860 
12861   ins_pipe(pipe_class_default);
12862 %}
12863 
12864 // src1 * src2 - src3
12865 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12866   predicate(UseFMA);
12867   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12868 
12869   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12870 
12871   ins_encode %{
12872     __ fnmsubs(as_FloatRegister($dst$$reg),
12873                as_FloatRegister($src1$$reg),
12874                as_FloatRegister($src2$$reg),
12875                as_FloatRegister($src3$$reg));
12876   %}
12877 
12878   ins_pipe(pipe_class_default);
12879 %}
12880 
12881 // src1 * src2 - src3
12882 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12883   predicate(UseFMA);
12884   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12885 
12886   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12887 
12888   ins_encode %{
12889   // n.b. insn name should be fnmsubd
12890     __ fnmsub(as_FloatRegister($dst$$reg),
12891               as_FloatRegister($src1$$reg),
12892               as_FloatRegister($src2$$reg),
12893               as_FloatRegister($src3$$reg));
12894   %}
12895 
12896   ins_pipe(pipe_class_default);
12897 %}
12898 
12899 
12900 // Math.max(FF)F
12901 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12902   match(Set dst (MaxF src1 src2));
12903 
12904   format %{ "fmaxs   $dst, $src1, $src2" %}
12905   ins_encode %{
12906     __ fmaxs(as_FloatRegister($dst$$reg),
12907              as_FloatRegister($src1$$reg),
12908              as_FloatRegister($src2$$reg));
12909   %}
12910 
12911   ins_pipe(fp_dop_reg_reg_s);
12912 %}
12913 
12914 // Math.min(FF)F
12915 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12916   match(Set dst (MinF src1 src2));
12917 
12918   format %{ "fmins   $dst, $src1, $src2" %}
12919   ins_encode %{
12920     __ fmins(as_FloatRegister($dst$$reg),
12921              as_FloatRegister($src1$$reg),
12922              as_FloatRegister($src2$$reg));
12923   %}
12924 
12925   ins_pipe(fp_dop_reg_reg_s);
12926 %}
12927 
12928 // Math.max(DD)D
12929 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12930   match(Set dst (MaxD src1 src2));
12931 
12932   format %{ "fmaxd   $dst, $src1, $src2" %}
12933   ins_encode %{
12934     __ fmaxd(as_FloatRegister($dst$$reg),
12935              as_FloatRegister($src1$$reg),
12936              as_FloatRegister($src2$$reg));
12937   %}
12938 
12939   ins_pipe(fp_dop_reg_reg_d);
12940 %}
12941 
12942 // Math.min(DD)D
12943 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12944   match(Set dst (MinD src1 src2));
12945 
12946   format %{ "fmind   $dst, $src1, $src2" %}
12947   ins_encode %{
12948     __ fmind(as_FloatRegister($dst$$reg),
12949              as_FloatRegister($src1$$reg),
12950              as_FloatRegister($src2$$reg));
12951   %}
12952 
12953   ins_pipe(fp_dop_reg_reg_d);
12954 %}
12955 
12956 
12957 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12958   match(Set dst (DivF src1  src2));
12959 
12960   ins_cost(INSN_COST * 18);
12961   format %{ "fdivs   $dst, $src1, $src2" %}
12962 
12963   ins_encode %{
12964     __ fdivs(as_FloatRegister($dst$$reg),
12965              as_FloatRegister($src1$$reg),
12966              as_FloatRegister($src2$$reg));
12967   %}
12968 
12969   ins_pipe(fp_div_s);
12970 %}
12971 
12972 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12973   match(Set dst (DivD src1  src2));
12974 
12975   ins_cost(INSN_COST * 32);
12976   format %{ "fdivd   $dst, $src1, $src2" %}
12977 
12978   ins_encode %{
12979     __ fdivd(as_FloatRegister($dst$$reg),
12980              as_FloatRegister($src1$$reg),
12981              as_FloatRegister($src2$$reg));
12982   %}
12983 
12984   ins_pipe(fp_div_d);
12985 %}
12986 
12987 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12988   match(Set dst (NegF src));
12989 
12990   ins_cost(INSN_COST * 3);
12991   format %{ "fneg   $dst, $src" %}
12992 
12993   ins_encode %{
12994     __ fnegs(as_FloatRegister($dst$$reg),
12995              as_FloatRegister($src$$reg));
12996   %}
12997 
12998   ins_pipe(fp_uop_s);
12999 %}
13000 
13001 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13002   match(Set dst (NegD src));
13003 
13004   ins_cost(INSN_COST * 3);
13005   format %{ "fnegd   $dst, $src" %}
13006 
13007   ins_encode %{
13008     __ fnegd(as_FloatRegister($dst$$reg),
13009              as_FloatRegister($src$$reg));
13010   %}
13011 
13012   ins_pipe(fp_uop_d);
13013 %}
13014 
13015 instruct absF_reg(vRegF dst, vRegF src) %{
13016   match(Set dst (AbsF src));
13017 
13018   ins_cost(INSN_COST * 3);
13019   format %{ "fabss   $dst, $src" %}
13020   ins_encode %{
13021     __ fabss(as_FloatRegister($dst$$reg),
13022              as_FloatRegister($src$$reg));
13023   %}
13024 
13025   ins_pipe(fp_uop_s);
13026 %}
13027 
13028 instruct absD_reg(vRegD dst, vRegD src) %{
13029   match(Set dst (AbsD src));
13030 
13031   ins_cost(INSN_COST * 3);
13032   format %{ "fabsd   $dst, $src" %}
13033   ins_encode %{
13034     __ fabsd(as_FloatRegister($dst$$reg),
13035              as_FloatRegister($src$$reg));
13036   %}
13037 
13038   ins_pipe(fp_uop_d);
13039 %}
13040 
13041 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13042   match(Set dst (SqrtD src));
13043 
13044   ins_cost(INSN_COST * 50);
13045   format %{ "fsqrtd  $dst, $src" %}
13046   ins_encode %{
13047     __ fsqrtd(as_FloatRegister($dst$$reg),
13048              as_FloatRegister($src$$reg));
13049   %}
13050 
13051   ins_pipe(fp_div_s);
13052 %}
13053 
13054 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13055   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13056 
13057   ins_cost(INSN_COST * 50);
13058   format %{ "fsqrts  $dst, $src" %}
13059   ins_encode %{
13060     __ fsqrts(as_FloatRegister($dst$$reg),
13061              as_FloatRegister($src$$reg));
13062   %}
13063 
13064   ins_pipe(fp_div_d);
13065 %}
13066 
13067 // ============================================================================
13068 // Logical Instructions
13069 
13070 // Integer Logical Instructions
13071 
13072 // And Instructions
13073 
13074 
13075 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13076   match(Set dst (AndI src1 src2));
13077 
13078   format %{ "andw  $dst, $src1, $src2\t# int" %}
13079 
13080   ins_cost(INSN_COST);
13081   ins_encode %{
13082     __ andw(as_Register($dst$$reg),
13083             as_Register($src1$$reg),
13084             as_Register($src2$$reg));
13085   %}
13086 
13087   ins_pipe(ialu_reg_reg);
13088 %}
13089 
13090 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13091   match(Set dst (AndI src1 src2));
13092 
13093   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13094 
13095   ins_cost(INSN_COST);
13096   ins_encode %{
13097     __ andw(as_Register($dst$$reg),
13098             as_Register($src1$$reg),
13099             (unsigned long)($src2$$constant));
13100   %}
13101 
13102   ins_pipe(ialu_reg_imm);
13103 %}
13104 
13105 // Or Instructions
13106 
13107 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13108   match(Set dst (OrI src1 src2));
13109 
13110   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13111 
13112   ins_cost(INSN_COST);
13113   ins_encode %{
13114     __ orrw(as_Register($dst$$reg),
13115             as_Register($src1$$reg),
13116             as_Register($src2$$reg));
13117   %}
13118 
13119   ins_pipe(ialu_reg_reg);
13120 %}
13121 
13122 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13123   match(Set dst (OrI src1 src2));
13124 
13125   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13126 
13127   ins_cost(INSN_COST);
13128   ins_encode %{
13129     __ orrw(as_Register($dst$$reg),
13130             as_Register($src1$$reg),
13131             (unsigned long)($src2$$constant));
13132   %}
13133 
13134   ins_pipe(ialu_reg_imm);
13135 %}
13136 
13137 // Xor Instructions
13138 
13139 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13140   match(Set dst (XorI src1 src2));
13141 
13142   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13143 
13144   ins_cost(INSN_COST);
13145   ins_encode %{
13146     __ eorw(as_Register($dst$$reg),
13147             as_Register($src1$$reg),
13148             as_Register($src2$$reg));
13149   %}
13150 
13151   ins_pipe(ialu_reg_reg);
13152 %}
13153 
13154 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13155   match(Set dst (XorI src1 src2));
13156 
13157   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13158 
13159   ins_cost(INSN_COST);
13160   ins_encode %{
13161     __ eorw(as_Register($dst$$reg),
13162             as_Register($src1$$reg),
13163             (unsigned long)($src2$$constant));
13164   %}
13165 
13166   ins_pipe(ialu_reg_imm);
13167 %}
13168 
13169 // Long Logical Instructions
13170 // TODO
13171 
13172 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13173   match(Set dst (AndL src1 src2));
13174 
13175   format %{ "and  $dst, $src1, $src2\t# int" %}
13176 
13177   ins_cost(INSN_COST);
13178   ins_encode %{
13179     __ andr(as_Register($dst$$reg),
13180             as_Register($src1$$reg),
13181             as_Register($src2$$reg));
13182   %}
13183 
13184   ins_pipe(ialu_reg_reg);
13185 %}
13186 
13187 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13188   match(Set dst (AndL src1 src2));
13189 
13190   format %{ "and  $dst, $src1, $src2\t# int" %}
13191 
13192   ins_cost(INSN_COST);
13193   ins_encode %{
13194     __ andr(as_Register($dst$$reg),
13195             as_Register($src1$$reg),
13196             (unsigned long)($src2$$constant));
13197   %}
13198 
13199   ins_pipe(ialu_reg_imm);
13200 %}
13201 
13202 // Or Instructions
13203 
13204 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13205   match(Set dst (OrL src1 src2));
13206 
13207   format %{ "orr  $dst, $src1, $src2\t# int" %}
13208 
13209   ins_cost(INSN_COST);
13210   ins_encode %{
13211     __ orr(as_Register($dst$$reg),
13212            as_Register($src1$$reg),
13213            as_Register($src2$$reg));
13214   %}
13215 
13216   ins_pipe(ialu_reg_reg);
13217 %}
13218 
13219 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13220   match(Set dst (OrL src1 src2));
13221 
13222   format %{ "orr  $dst, $src1, $src2\t# int" %}
13223 
13224   ins_cost(INSN_COST);
13225   ins_encode %{
13226     __ orr(as_Register($dst$$reg),
13227            as_Register($src1$$reg),
13228            (unsigned long)($src2$$constant));
13229   %}
13230 
13231   ins_pipe(ialu_reg_imm);
13232 %}
13233 
13234 // Xor Instructions
13235 
13236 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13237   match(Set dst (XorL src1 src2));
13238 
13239   format %{ "eor  $dst, $src1, $src2\t# int" %}
13240 
13241   ins_cost(INSN_COST);
13242   ins_encode %{
13243     __ eor(as_Register($dst$$reg),
13244            as_Register($src1$$reg),
13245            as_Register($src2$$reg));
13246   %}
13247 
13248   ins_pipe(ialu_reg_reg);
13249 %}
13250 
13251 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13252   match(Set dst (XorL src1 src2));
13253 
13254   ins_cost(INSN_COST);
13255   format %{ "eor  $dst, $src1, $src2\t# int" %}
13256 
13257   ins_encode %{
13258     __ eor(as_Register($dst$$reg),
13259            as_Register($src1$$reg),
13260            (unsigned long)($src2$$constant));
13261   %}
13262 
13263   ins_pipe(ialu_reg_imm);
13264 %}
13265 
13266 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13267 %{
13268   match(Set dst (ConvI2L src));
13269 
13270   ins_cost(INSN_COST);
13271   format %{ "sxtw  $dst, $src\t# i2l" %}
13272   ins_encode %{
13273     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13274   %}
13275   ins_pipe(ialu_reg_shift);
13276 %}
13277 
13278 // this pattern occurs in bigmath arithmetic
13279 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13280 %{
13281   match(Set dst (AndL (ConvI2L src) mask));
13282 
13283   ins_cost(INSN_COST);
13284   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13285   ins_encode %{
13286     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13287   %}
13288 
13289   ins_pipe(ialu_reg_shift);
13290 %}
13291 
13292 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13293   match(Set dst (ConvL2I src));
13294 
13295   ins_cost(INSN_COST);
13296   format %{ "movw  $dst, $src \t// l2i" %}
13297 
13298   ins_encode %{
13299     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13300   %}
13301 
13302   ins_pipe(ialu_reg);
13303 %}
13304 
13305 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13306 %{
13307   match(Set dst (Conv2B src));
13308   effect(KILL cr);
13309 
13310   format %{
13311     "cmpw $src, zr\n\t"
13312     "cset $dst, ne"
13313   %}
13314 
13315   ins_encode %{
13316     __ cmpw(as_Register($src$$reg), zr);
13317     __ cset(as_Register($dst$$reg), Assembler::NE);
13318   %}
13319 
13320   ins_pipe(ialu_reg);
13321 %}
13322 
13323 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13324 %{
13325   match(Set dst (Conv2B src));
13326   effect(KILL cr);
13327 
13328   format %{
13329     "cmp  $src, zr\n\t"
13330     "cset $dst, ne"
13331   %}
13332 
13333   ins_encode %{
13334     __ cmp(as_Register($src$$reg), zr);
13335     __ cset(as_Register($dst$$reg), Assembler::NE);
13336   %}
13337 
13338   ins_pipe(ialu_reg);
13339 %}
13340 
13341 instruct convD2F_reg(vRegF dst, vRegD src) %{
13342   match(Set dst (ConvD2F src));
13343 
13344   ins_cost(INSN_COST * 5);
13345   format %{ "fcvtd  $dst, $src \t// d2f" %}
13346 
13347   ins_encode %{
13348     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13349   %}
13350 
13351   ins_pipe(fp_d2f);
13352 %}
13353 
13354 instruct convF2D_reg(vRegD dst, vRegF src) %{
13355   match(Set dst (ConvF2D src));
13356 
13357   ins_cost(INSN_COST * 5);
13358   format %{ "fcvts  $dst, $src \t// f2d" %}
13359 
13360   ins_encode %{
13361     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13362   %}
13363 
13364   ins_pipe(fp_f2d);
13365 %}
13366 
13367 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13368   match(Set dst (ConvF2I src));
13369 
13370   ins_cost(INSN_COST * 5);
13371   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13372 
13373   ins_encode %{
13374     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13375   %}
13376 
13377   ins_pipe(fp_f2i);
13378 %}
13379 
13380 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13381   match(Set dst (ConvF2L src));
13382 
13383   ins_cost(INSN_COST * 5);
13384   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13385 
13386   ins_encode %{
13387     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13388   %}
13389 
13390   ins_pipe(fp_f2l);
13391 %}
13392 
13393 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13394   match(Set dst (ConvI2F src));
13395 
13396   ins_cost(INSN_COST * 5);
13397   format %{ "scvtfws  $dst, $src \t// i2f" %}
13398 
13399   ins_encode %{
13400     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13401   %}
13402 
13403   ins_pipe(fp_i2f);
13404 %}
13405 
13406 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13407   match(Set dst (ConvL2F src));
13408 
13409   ins_cost(INSN_COST * 5);
13410   format %{ "scvtfs  $dst, $src \t// l2f" %}
13411 
13412   ins_encode %{
13413     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13414   %}
13415 
13416   ins_pipe(fp_l2f);
13417 %}
13418 
13419 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13420   match(Set dst (ConvD2I src));
13421 
13422   ins_cost(INSN_COST * 5);
13423   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13424 
13425   ins_encode %{
13426     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13427   %}
13428 
13429   ins_pipe(fp_d2i);
13430 %}
13431 
13432 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13433   match(Set dst (ConvD2L src));
13434 
13435   ins_cost(INSN_COST * 5);
13436   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13437 
13438   ins_encode %{
13439     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13440   %}
13441 
13442   ins_pipe(fp_d2l);
13443 %}
13444 
13445 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13446   match(Set dst (ConvI2D src));
13447 
13448   ins_cost(INSN_COST * 5);
13449   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13450 
13451   ins_encode %{
13452     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13453   %}
13454 
13455   ins_pipe(fp_i2d);
13456 %}
13457 
13458 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13459   match(Set dst (ConvL2D src));
13460 
13461   ins_cost(INSN_COST * 5);
13462   format %{ "scvtfd  $dst, $src \t// l2d" %}
13463 
13464   ins_encode %{
13465     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13466   %}
13467 
13468   ins_pipe(fp_l2d);
13469 %}
13470 
13471 // stack <-> reg and reg <-> reg shuffles with no conversion
13472 
13473 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13474 
13475   match(Set dst (MoveF2I src));
13476 
13477   effect(DEF dst, USE src);
13478 
13479   ins_cost(4 * INSN_COST);
13480 
13481   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13482 
13483   ins_encode %{
13484     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13485   %}
13486 
13487   ins_pipe(iload_reg_reg);
13488 
13489 %}
13490 
13491 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13492 
13493   match(Set dst (MoveI2F src));
13494 
13495   effect(DEF dst, USE src);
13496 
13497   ins_cost(4 * INSN_COST);
13498 
13499   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13500 
13501   ins_encode %{
13502     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13503   %}
13504 
13505   ins_pipe(pipe_class_memory);
13506 
13507 %}
13508 
13509 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13510 
13511   match(Set dst (MoveD2L src));
13512 
13513   effect(DEF dst, USE src);
13514 
13515   ins_cost(4 * INSN_COST);
13516 
13517   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13518 
13519   ins_encode %{
13520     __ ldr($dst$$Register, Address(sp, $src$$disp));
13521   %}
13522 
13523   ins_pipe(iload_reg_reg);
13524 
13525 %}
13526 
13527 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13528 
13529   match(Set dst (MoveL2D src));
13530 
13531   effect(DEF dst, USE src);
13532 
13533   ins_cost(4 * INSN_COST);
13534 
13535   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13536 
13537   ins_encode %{
13538     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13539   %}
13540 
13541   ins_pipe(pipe_class_memory);
13542 
13543 %}
13544 
13545 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13546 
13547   match(Set dst (MoveF2I src));
13548 
13549   effect(DEF dst, USE src);
13550 
13551   ins_cost(INSN_COST);
13552 
13553   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13554 
13555   ins_encode %{
13556     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13557   %}
13558 
13559   ins_pipe(pipe_class_memory);
13560 
13561 %}
13562 
13563 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13564 
13565   match(Set dst (MoveI2F src));
13566 
13567   effect(DEF dst, USE src);
13568 
13569   ins_cost(INSN_COST);
13570 
13571   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13572 
13573   ins_encode %{
13574     __ strw($src$$Register, Address(sp, $dst$$disp));
13575   %}
13576 
13577   ins_pipe(istore_reg_reg);
13578 
13579 %}
13580 
13581 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13582 
13583   match(Set dst (MoveD2L src));
13584 
13585   effect(DEF dst, USE src);
13586 
13587   ins_cost(INSN_COST);
13588 
13589   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13590 
13591   ins_encode %{
13592     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13593   %}
13594 
13595   ins_pipe(pipe_class_memory);
13596 
13597 %}
13598 
13599 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13600 
13601   match(Set dst (MoveL2D src));
13602 
13603   effect(DEF dst, USE src);
13604 
13605   ins_cost(INSN_COST);
13606 
13607   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13608 
13609   ins_encode %{
13610     __ str($src$$Register, Address(sp, $dst$$disp));
13611   %}
13612 
13613   ins_pipe(istore_reg_reg);
13614 
13615 %}
13616 
13617 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13618 
13619   match(Set dst (MoveF2I src));
13620 
13621   effect(DEF dst, USE src);
13622 
13623   ins_cost(INSN_COST);
13624 
13625   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13626 
13627   ins_encode %{
13628     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13629   %}
13630 
13631   ins_pipe(fp_f2i);
13632 
13633 %}
13634 
13635 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13636 
13637   match(Set dst (MoveI2F src));
13638 
13639   effect(DEF dst, USE src);
13640 
13641   ins_cost(INSN_COST);
13642 
13643   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13644 
13645   ins_encode %{
13646     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13647   %}
13648 
13649   ins_pipe(fp_i2f);
13650 
13651 %}
13652 
13653 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13654 
13655   match(Set dst (MoveD2L src));
13656 
13657   effect(DEF dst, USE src);
13658 
13659   ins_cost(INSN_COST);
13660 
13661   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13662 
13663   ins_encode %{
13664     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13665   %}
13666 
13667   ins_pipe(fp_d2l);
13668 
13669 %}
13670 
13671 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13672 
13673   match(Set dst (MoveL2D src));
13674 
13675   effect(DEF dst, USE src);
13676 
13677   ins_cost(INSN_COST);
13678 
13679   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13680 
13681   ins_encode %{
13682     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13683   %}
13684 
13685   ins_pipe(fp_l2d);
13686 
13687 %}
13688 
13689 // ============================================================================
13690 // clearing of an array
13691 
13692 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13693 %{
13694   match(Set dummy (ClearArray cnt base));
13695   effect(USE_KILL cnt, USE_KILL base);
13696 
13697   ins_cost(4 * INSN_COST);
13698   format %{ "ClearArray $cnt, $base" %}
13699 
13700   ins_encode %{
13701     __ zero_words($base$$Register, $cnt$$Register);
13702   %}
13703 
13704   ins_pipe(pipe_class_memory);
13705 %}
13706 
13707 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13708 %{
13709   predicate((u_int64_t)n->in(2)->get_long()
13710             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13711   match(Set dummy (ClearArray cnt base));
13712   effect(USE_KILL base);
13713 
13714   ins_cost(4 * INSN_COST);
13715   format %{ "ClearArray $cnt, $base" %}
13716 
13717   ins_encode %{
13718     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13719   %}
13720 
13721   ins_pipe(pipe_class_memory);
13722 %}
13723 
13724 // ============================================================================
13725 // Overflow Math Instructions
13726 
13727 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13728 %{
13729   match(Set cr (OverflowAddI op1 op2));
13730 
13731   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13732   ins_cost(INSN_COST);
13733   ins_encode %{
13734     __ cmnw($op1$$Register, $op2$$Register);
13735   %}
13736 
13737   ins_pipe(icmp_reg_reg);
13738 %}
13739 
13740 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13741 %{
13742   match(Set cr (OverflowAddI op1 op2));
13743 
13744   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13745   ins_cost(INSN_COST);
13746   ins_encode %{
13747     __ cmnw($op1$$Register, $op2$$constant);
13748   %}
13749 
13750   ins_pipe(icmp_reg_imm);
13751 %}
13752 
13753 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13754 %{
13755   match(Set cr (OverflowAddL op1 op2));
13756 
13757   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13758   ins_cost(INSN_COST);
13759   ins_encode %{
13760     __ cmn($op1$$Register, $op2$$Register);
13761   %}
13762 
13763   ins_pipe(icmp_reg_reg);
13764 %}
13765 
13766 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13767 %{
13768   match(Set cr (OverflowAddL op1 op2));
13769 
13770   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13771   ins_cost(INSN_COST);
13772   ins_encode %{
13773     __ cmn($op1$$Register, $op2$$constant);
13774   %}
13775 
13776   ins_pipe(icmp_reg_imm);
13777 %}
13778 
13779 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13780 %{
13781   match(Set cr (OverflowSubI op1 op2));
13782 
13783   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13784   ins_cost(INSN_COST);
13785   ins_encode %{
13786     __ cmpw($op1$$Register, $op2$$Register);
13787   %}
13788 
13789   ins_pipe(icmp_reg_reg);
13790 %}
13791 
13792 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13793 %{
13794   match(Set cr (OverflowSubI op1 op2));
13795 
13796   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13797   ins_cost(INSN_COST);
13798   ins_encode %{
13799     __ cmpw($op1$$Register, $op2$$constant);
13800   %}
13801 
13802   ins_pipe(icmp_reg_imm);
13803 %}
13804 
13805 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13806 %{
13807   match(Set cr (OverflowSubL op1 op2));
13808 
13809   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13810   ins_cost(INSN_COST);
13811   ins_encode %{
13812     __ cmp($op1$$Register, $op2$$Register);
13813   %}
13814 
13815   ins_pipe(icmp_reg_reg);
13816 %}
13817 
13818 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13819 %{
13820   match(Set cr (OverflowSubL op1 op2));
13821 
13822   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13823   ins_cost(INSN_COST);
13824   ins_encode %{
13825     __ subs(zr, $op1$$Register, $op2$$constant);
13826   %}
13827 
13828   ins_pipe(icmp_reg_imm);
13829 %}
13830 
13831 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13832 %{
13833   match(Set cr (OverflowSubI zero op1));
13834 
13835   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13836   ins_cost(INSN_COST);
13837   ins_encode %{
13838     __ cmpw(zr, $op1$$Register);
13839   %}
13840 
13841   ins_pipe(icmp_reg_imm);
13842 %}
13843 
13844 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13845 %{
13846   match(Set cr (OverflowSubL zero op1));
13847 
13848   format %{ "cmp   zr, $op1\t# overflow check long" %}
13849   ins_cost(INSN_COST);
13850   ins_encode %{
13851     __ cmp(zr, $op1$$Register);
13852   %}
13853 
13854   ins_pipe(icmp_reg_imm);
13855 %}
13856 
13857 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13858 %{
13859   match(Set cr (OverflowMulI op1 op2));
13860 
13861   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13862             "cmp   rscratch1, rscratch1, sxtw\n\t"
13863             "movw  rscratch1, #0x80000000\n\t"
13864             "cselw rscratch1, rscratch1, zr, NE\n\t"
13865             "cmpw  rscratch1, #1" %}
13866   ins_cost(5 * INSN_COST);
13867   ins_encode %{
13868     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13869     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13870     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13871     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13872     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13873   %}
13874 
13875   ins_pipe(pipe_slow);
13876 %}
13877 
13878 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13879 %{
13880   match(If cmp (OverflowMulI op1 op2));
13881   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13882             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13883   effect(USE labl, KILL cr);
13884 
13885   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13886             "cmp   rscratch1, rscratch1, sxtw\n\t"
13887             "b$cmp   $labl" %}
13888   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13889   ins_encode %{
13890     Label* L = $labl$$label;
13891     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13892     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13893     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13894     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13895   %}
13896 
13897   ins_pipe(pipe_serial);
13898 %}
13899 
13900 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13901 %{
13902   match(Set cr (OverflowMulL op1 op2));
13903 
13904   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13905             "smulh rscratch2, $op1, $op2\n\t"
13906             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13907             "movw  rscratch1, #0x80000000\n\t"
13908             "cselw rscratch1, rscratch1, zr, NE\n\t"
13909             "cmpw  rscratch1, #1" %}
13910   ins_cost(6 * INSN_COST);
13911   ins_encode %{
13912     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13913     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13914     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13915     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13916     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13917     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13918   %}
13919 
13920   ins_pipe(pipe_slow);
13921 %}
13922 
13923 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13924 %{
13925   match(If cmp (OverflowMulL op1 op2));
13926   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13927             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13928   effect(USE labl, KILL cr);
13929 
13930   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13931             "smulh rscratch2, $op1, $op2\n\t"
13932             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13933             "b$cmp $labl" %}
13934   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13935   ins_encode %{
13936     Label* L = $labl$$label;
13937     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13938     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13939     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13940     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13941     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13942   %}
13943 
13944   ins_pipe(pipe_serial);
13945 %}
13946 
13947 // ============================================================================
13948 // Compare Instructions
13949 
13950 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13951 %{
13952   match(Set cr (CmpI op1 op2));
13953 
13954   effect(DEF cr, USE op1, USE op2);
13955 
13956   ins_cost(INSN_COST);
13957   format %{ "cmpw  $op1, $op2" %}
13958 
13959   ins_encode(aarch64_enc_cmpw(op1, op2));
13960 
13961   ins_pipe(icmp_reg_reg);
13962 %}
13963 
13964 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13965 %{
13966   match(Set cr (CmpI op1 zero));
13967 
13968   effect(DEF cr, USE op1);
13969 
13970   ins_cost(INSN_COST);
13971   format %{ "cmpw $op1, 0" %}
13972 
13973   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13974 
13975   ins_pipe(icmp_reg_imm);
13976 %}
13977 
13978 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13979 %{
13980   match(Set cr (CmpI op1 op2));
13981 
13982   effect(DEF cr, USE op1);
13983 
13984   ins_cost(INSN_COST);
13985   format %{ "cmpw  $op1, $op2" %}
13986 
13987   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13988 
13989   ins_pipe(icmp_reg_imm);
13990 %}
13991 
13992 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13993 %{
13994   match(Set cr (CmpI op1 op2));
13995 
13996   effect(DEF cr, USE op1);
13997 
13998   ins_cost(INSN_COST * 2);
13999   format %{ "cmpw  $op1, $op2" %}
14000 
14001   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14002 
14003   ins_pipe(icmp_reg_imm);
14004 %}
14005 
14006 // Unsigned compare Instructions; really, same as signed compare
14007 // except it should only be used to feed an If or a CMovI which takes a
14008 // cmpOpU.
14009 
14010 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14011 %{
14012   match(Set cr (CmpU op1 op2));
14013 
14014   effect(DEF cr, USE op1, USE op2);
14015 
14016   ins_cost(INSN_COST);
14017   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14018 
14019   ins_encode(aarch64_enc_cmpw(op1, op2));
14020 
14021   ins_pipe(icmp_reg_reg);
14022 %}
14023 
14024 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14025 %{
14026   match(Set cr (CmpU op1 zero));
14027 
14028   effect(DEF cr, USE op1);
14029 
14030   ins_cost(INSN_COST);
14031   format %{ "cmpw $op1, #0\t# unsigned" %}
14032 
14033   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14034 
14035   ins_pipe(icmp_reg_imm);
14036 %}
14037 
14038 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14039 %{
14040   match(Set cr (CmpU op1 op2));
14041 
14042   effect(DEF cr, USE op1);
14043 
14044   ins_cost(INSN_COST);
14045   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14046 
14047   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14048 
14049   ins_pipe(icmp_reg_imm);
14050 %}
14051 
14052 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14053 %{
14054   match(Set cr (CmpU op1 op2));
14055 
14056   effect(DEF cr, USE op1);
14057 
14058   ins_cost(INSN_COST * 2);
14059   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14060 
14061   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14062 
14063   ins_pipe(icmp_reg_imm);
14064 %}
14065 
14066 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14067 %{
14068   match(Set cr (CmpL op1 op2));
14069 
14070   effect(DEF cr, USE op1, USE op2);
14071 
14072   ins_cost(INSN_COST);
14073   format %{ "cmp  $op1, $op2" %}
14074 
14075   ins_encode(aarch64_enc_cmp(op1, op2));
14076 
14077   ins_pipe(icmp_reg_reg);
14078 %}
14079 
14080 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14081 %{
14082   match(Set cr (CmpL op1 zero));
14083 
14084   effect(DEF cr, USE op1);
14085 
14086   ins_cost(INSN_COST);
14087   format %{ "tst  $op1" %}
14088 
14089   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14090 
14091   ins_pipe(icmp_reg_imm);
14092 %}
14093 
14094 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14095 %{
14096   match(Set cr (CmpL op1 op2));
14097 
14098   effect(DEF cr, USE op1);
14099 
14100   ins_cost(INSN_COST);
14101   format %{ "cmp  $op1, $op2" %}
14102 
14103   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14104 
14105   ins_pipe(icmp_reg_imm);
14106 %}
14107 
14108 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14109 %{
14110   match(Set cr (CmpL op1 op2));
14111 
14112   effect(DEF cr, USE op1);
14113 
14114   ins_cost(INSN_COST * 2);
14115   format %{ "cmp  $op1, $op2" %}
14116 
14117   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14118 
14119   ins_pipe(icmp_reg_imm);
14120 %}
14121 
14122 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14123 %{
14124   match(Set cr (CmpUL op1 op2));
14125 
14126   effect(DEF cr, USE op1, USE op2);
14127 
14128   ins_cost(INSN_COST);
14129   format %{ "cmp  $op1, $op2" %}
14130 
14131   ins_encode(aarch64_enc_cmp(op1, op2));
14132 
14133   ins_pipe(icmp_reg_reg);
14134 %}
14135 
14136 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14137 %{
14138   match(Set cr (CmpUL op1 zero));
14139 
14140   effect(DEF cr, USE op1);
14141 
14142   ins_cost(INSN_COST);
14143   format %{ "tst  $op1" %}
14144 
14145   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14146 
14147   ins_pipe(icmp_reg_imm);
14148 %}
14149 
14150 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14151 %{
14152   match(Set cr (CmpUL op1 op2));
14153 
14154   effect(DEF cr, USE op1);
14155 
14156   ins_cost(INSN_COST);
14157   format %{ "cmp  $op1, $op2" %}
14158 
14159   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14160 
14161   ins_pipe(icmp_reg_imm);
14162 %}
14163 
14164 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14165 %{
14166   match(Set cr (CmpUL op1 op2));
14167 
14168   effect(DEF cr, USE op1);
14169 
14170   ins_cost(INSN_COST * 2);
14171   format %{ "cmp  $op1, $op2" %}
14172 
14173   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14174 
14175   ins_pipe(icmp_reg_imm);
14176 %}
14177 
14178 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14179 %{
14180   match(Set cr (CmpP op1 op2));
14181 
14182   effect(DEF cr, USE op1, USE op2);
14183 
14184   ins_cost(INSN_COST);
14185   format %{ "cmp  $op1, $op2\t // ptr" %}
14186 
14187   ins_encode(aarch64_enc_cmpp(op1, op2));
14188 
14189   ins_pipe(icmp_reg_reg);
14190 %}
14191 
14192 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14193 %{
14194   match(Set cr (CmpN op1 op2));
14195 
14196   effect(DEF cr, USE op1, USE op2);
14197 
14198   ins_cost(INSN_COST);
14199   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14200 
14201   ins_encode(aarch64_enc_cmpn(op1, op2));
14202 
14203   ins_pipe(icmp_reg_reg);
14204 %}
14205 
14206 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14207 %{
14208   match(Set cr (CmpP op1 zero));
14209 
14210   effect(DEF cr, USE op1, USE zero);
14211 
14212   ins_cost(INSN_COST);
14213   format %{ "cmp  $op1, 0\t // ptr" %}
14214 
14215   ins_encode(aarch64_enc_testp(op1));
14216 
14217   ins_pipe(icmp_reg_imm);
14218 %}
14219 
14220 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14221 %{
14222   match(Set cr (CmpN op1 zero));
14223 
14224   effect(DEF cr, USE op1, USE zero);
14225 
14226   ins_cost(INSN_COST);
14227   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14228 
14229   ins_encode(aarch64_enc_testn(op1));
14230 
14231   ins_pipe(icmp_reg_imm);
14232 %}
14233 
14234 // FP comparisons
14235 //
14236 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14237 // using normal cmpOp. See declaration of rFlagsReg for details.
14238 
14239 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14240 %{
14241   match(Set cr (CmpF src1 src2));
14242 
14243   ins_cost(3 * INSN_COST);
14244   format %{ "fcmps $src1, $src2" %}
14245 
14246   ins_encode %{
14247     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14248   %}
14249 
14250   ins_pipe(pipe_class_compare);
14251 %}
14252 
14253 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14254 %{
14255   match(Set cr (CmpF src1 src2));
14256 
14257   ins_cost(3 * INSN_COST);
14258   format %{ "fcmps $src1, 0.0" %}
14259 
14260   ins_encode %{
14261     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14262   %}
14263 
14264   ins_pipe(pipe_class_compare);
14265 %}
14266 // FROM HERE
14267 
14268 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14269 %{
14270   match(Set cr (CmpD src1 src2));
14271 
14272   ins_cost(3 * INSN_COST);
14273   format %{ "fcmpd $src1, $src2" %}
14274 
14275   ins_encode %{
14276     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14277   %}
14278 
14279   ins_pipe(pipe_class_compare);
14280 %}
14281 
14282 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14283 %{
14284   match(Set cr (CmpD src1 src2));
14285 
14286   ins_cost(3 * INSN_COST);
14287   format %{ "fcmpd $src1, 0.0" %}
14288 
14289   ins_encode %{
14290     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14291   %}
14292 
14293   ins_pipe(pipe_class_compare);
14294 %}
14295 
14296 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14297 %{
14298   match(Set dst (CmpF3 src1 src2));
14299   effect(KILL cr);
14300 
14301   ins_cost(5 * INSN_COST);
14302   format %{ "fcmps $src1, $src2\n\t"
14303             "csinvw($dst, zr, zr, eq\n\t"
14304             "csnegw($dst, $dst, $dst, lt)"
14305   %}
14306 
14307   ins_encode %{
14308     Label done;
14309     FloatRegister s1 = as_FloatRegister($src1$$reg);
14310     FloatRegister s2 = as_FloatRegister($src2$$reg);
14311     Register d = as_Register($dst$$reg);
14312     __ fcmps(s1, s2);
14313     // installs 0 if EQ else -1
14314     __ csinvw(d, zr, zr, Assembler::EQ);
14315     // keeps -1 if less or unordered else installs 1
14316     __ csnegw(d, d, d, Assembler::LT);
14317     __ bind(done);
14318   %}
14319 
14320   ins_pipe(pipe_class_default);
14321 
14322 %}
14323 
14324 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14325 %{
14326   match(Set dst (CmpD3 src1 src2));
14327   effect(KILL cr);
14328 
14329   ins_cost(5 * INSN_COST);
14330   format %{ "fcmpd $src1, $src2\n\t"
14331             "csinvw($dst, zr, zr, eq\n\t"
14332             "csnegw($dst, $dst, $dst, lt)"
14333   %}
14334 
14335   ins_encode %{
14336     Label done;
14337     FloatRegister s1 = as_FloatRegister($src1$$reg);
14338     FloatRegister s2 = as_FloatRegister($src2$$reg);
14339     Register d = as_Register($dst$$reg);
14340     __ fcmpd(s1, s2);
14341     // installs 0 if EQ else -1
14342     __ csinvw(d, zr, zr, Assembler::EQ);
14343     // keeps -1 if less or unordered else installs 1
14344     __ csnegw(d, d, d, Assembler::LT);
14345     __ bind(done);
14346   %}
14347   ins_pipe(pipe_class_default);
14348 
14349 %}
14350 
14351 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14352 %{
14353   match(Set dst (CmpF3 src1 zero));
14354   effect(KILL cr);
14355 
14356   ins_cost(5 * INSN_COST);
14357   format %{ "fcmps $src1, 0.0\n\t"
14358             "csinvw($dst, zr, zr, eq\n\t"
14359             "csnegw($dst, $dst, $dst, lt)"
14360   %}
14361 
14362   ins_encode %{
14363     Label done;
14364     FloatRegister s1 = as_FloatRegister($src1$$reg);
14365     Register d = as_Register($dst$$reg);
14366     __ fcmps(s1, 0.0);
14367     // installs 0 if EQ else -1
14368     __ csinvw(d, zr, zr, Assembler::EQ);
14369     // keeps -1 if less or unordered else installs 1
14370     __ csnegw(d, d, d, Assembler::LT);
14371     __ bind(done);
14372   %}
14373 
14374   ins_pipe(pipe_class_default);
14375 
14376 %}
14377 
14378 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14379 %{
14380   match(Set dst (CmpD3 src1 zero));
14381   effect(KILL cr);
14382 
14383   ins_cost(5 * INSN_COST);
14384   format %{ "fcmpd $src1, 0.0\n\t"
14385             "csinvw($dst, zr, zr, eq\n\t"
14386             "csnegw($dst, $dst, $dst, lt)"
14387   %}
14388 
14389   ins_encode %{
14390     Label done;
14391     FloatRegister s1 = as_FloatRegister($src1$$reg);
14392     Register d = as_Register($dst$$reg);
14393     __ fcmpd(s1, 0.0);
14394     // installs 0 if EQ else -1
14395     __ csinvw(d, zr, zr, Assembler::EQ);
14396     // keeps -1 if less or unordered else installs 1
14397     __ csnegw(d, d, d, Assembler::LT);
14398     __ bind(done);
14399   %}
14400   ins_pipe(pipe_class_default);
14401 
14402 %}
14403 
14404 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14405 %{
14406   match(Set dst (CmpLTMask p q));
14407   effect(KILL cr);
14408 
14409   ins_cost(3 * INSN_COST);
14410 
14411   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14412             "csetw $dst, lt\n\t"
14413             "subw $dst, zr, $dst"
14414   %}
14415 
14416   ins_encode %{
14417     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14418     __ csetw(as_Register($dst$$reg), Assembler::LT);
14419     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14420   %}
14421 
14422   ins_pipe(ialu_reg_reg);
14423 %}
14424 
14425 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14426 %{
14427   match(Set dst (CmpLTMask src zero));
14428   effect(KILL cr);
14429 
14430   ins_cost(INSN_COST);
14431 
14432   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14433 
14434   ins_encode %{
14435     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14436   %}
14437 
14438   ins_pipe(ialu_reg_shift);
14439 %}
14440 
14441 // ============================================================================
14442 // Max and Min
14443 
14444 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14445 %{
14446   effect( DEF dst, USE src1, USE src2, USE cr );
14447 
14448   ins_cost(INSN_COST * 2);
14449   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14450 
14451   ins_encode %{
14452     __ cselw(as_Register($dst$$reg),
14453              as_Register($src1$$reg),
14454              as_Register($src2$$reg),
14455              Assembler::LT);
14456   %}
14457 
14458   ins_pipe(icond_reg_reg);
14459 %}
14460 
14461 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14462 %{
14463   match(Set dst (MinI src1 src2));
14464   ins_cost(INSN_COST * 3);
14465 
14466   expand %{
14467     rFlagsReg cr;
14468     compI_reg_reg(cr, src1, src2);
14469     cmovI_reg_reg_lt(dst, src1, src2, cr);
14470   %}
14471 
14472 %}
14473 // FROM HERE
14474 
14475 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14476 %{
14477   effect( DEF dst, USE src1, USE src2, USE cr );
14478 
14479   ins_cost(INSN_COST * 2);
14480   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14481 
14482   ins_encode %{
14483     __ cselw(as_Register($dst$$reg),
14484              as_Register($src1$$reg),
14485              as_Register($src2$$reg),
14486              Assembler::GT);
14487   %}
14488 
14489   ins_pipe(icond_reg_reg);
14490 %}
14491 
14492 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14493 %{
14494   match(Set dst (MaxI src1 src2));
14495   ins_cost(INSN_COST * 3);
14496   expand %{
14497     rFlagsReg cr;
14498     compI_reg_reg(cr, src1, src2);
14499     cmovI_reg_reg_gt(dst, src1, src2, cr);
14500   %}
14501 %}
14502 
14503 // ============================================================================
14504 // Branch Instructions
14505 
14506 // Direct Branch.
14507 instruct branch(label lbl)
14508 %{
14509   match(Goto);
14510 
14511   effect(USE lbl);
14512 
14513   ins_cost(BRANCH_COST);
14514   format %{ "b  $lbl" %}
14515 
14516   ins_encode(aarch64_enc_b(lbl));
14517 
14518   ins_pipe(pipe_branch);
14519 %}
14520 
14521 // Conditional Near Branch
14522 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14523 %{
14524   // Same match rule as `branchConFar'.
14525   match(If cmp cr);
14526 
14527   effect(USE lbl);
14528 
14529   ins_cost(BRANCH_COST);
14530   // If set to 1 this indicates that the current instruction is a
14531   // short variant of a long branch. This avoids using this
14532   // instruction in first-pass matching. It will then only be used in
14533   // the `Shorten_branches' pass.
14534   // ins_short_branch(1);
14535   format %{ "b$cmp  $lbl" %}
14536 
14537   ins_encode(aarch64_enc_br_con(cmp, lbl));
14538 
14539   ins_pipe(pipe_branch_cond);
14540 %}
14541 
14542 // Conditional Near Branch Unsigned
14543 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14544 %{
14545   // Same match rule as `branchConFar'.
14546   match(If cmp cr);
14547 
14548   effect(USE lbl);
14549 
14550   ins_cost(BRANCH_COST);
14551   // If set to 1 this indicates that the current instruction is a
14552   // short variant of a long branch. This avoids using this
14553   // instruction in first-pass matching. It will then only be used in
14554   // the `Shorten_branches' pass.
14555   // ins_short_branch(1);
14556   format %{ "b$cmp  $lbl\t# unsigned" %}
14557 
14558   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14559 
14560   ins_pipe(pipe_branch_cond);
14561 %}
14562 
14563 // Make use of CBZ and CBNZ.  These instructions, as well as being
14564 // shorter than (cmp; branch), have the additional benefit of not
14565 // killing the flags.
14566 
14567 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14568   match(If cmp (CmpI op1 op2));
14569   effect(USE labl);
14570 
14571   ins_cost(BRANCH_COST);
14572   format %{ "cbw$cmp   $op1, $labl" %}
14573   ins_encode %{
14574     Label* L = $labl$$label;
14575     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14576     if (cond == Assembler::EQ)
14577       __ cbzw($op1$$Register, *L);
14578     else
14579       __ cbnzw($op1$$Register, *L);
14580   %}
14581   ins_pipe(pipe_cmp_branch);
14582 %}
14583 
14584 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14585   match(If cmp (CmpL op1 op2));
14586   effect(USE labl);
14587 
14588   ins_cost(BRANCH_COST);
14589   format %{ "cb$cmp   $op1, $labl" %}
14590   ins_encode %{
14591     Label* L = $labl$$label;
14592     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14593     if (cond == Assembler::EQ)
14594       __ cbz($op1$$Register, *L);
14595     else
14596       __ cbnz($op1$$Register, *L);
14597   %}
14598   ins_pipe(pipe_cmp_branch);
14599 %}
14600 
14601 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14602   match(If cmp (CmpP op1 op2));
14603   effect(USE labl);
14604 
14605   ins_cost(BRANCH_COST);
14606   format %{ "cb$cmp   $op1, $labl" %}
14607   ins_encode %{
14608     Label* L = $labl$$label;
14609     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14610     if (cond == Assembler::EQ)
14611       __ cbz($op1$$Register, *L);
14612     else
14613       __ cbnz($op1$$Register, *L);
14614   %}
14615   ins_pipe(pipe_cmp_branch);
14616 %}
14617 
14618 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14619   match(If cmp (CmpN op1 op2));
14620   effect(USE labl);
14621 
14622   ins_cost(BRANCH_COST);
14623   format %{ "cbw$cmp   $op1, $labl" %}
14624   ins_encode %{
14625     Label* L = $labl$$label;
14626     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14627     if (cond == Assembler::EQ)
14628       __ cbzw($op1$$Register, *L);
14629     else
14630       __ cbnzw($op1$$Register, *L);
14631   %}
14632   ins_pipe(pipe_cmp_branch);
14633 %}
14634 
14635 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14636   match(If cmp (CmpP (DecodeN oop) zero));
14637   effect(USE labl);
14638 
14639   ins_cost(BRANCH_COST);
14640   format %{ "cb$cmp   $oop, $labl" %}
14641   ins_encode %{
14642     Label* L = $labl$$label;
14643     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14644     if (cond == Assembler::EQ)
14645       __ cbzw($oop$$Register, *L);
14646     else
14647       __ cbnzw($oop$$Register, *L);
14648   %}
14649   ins_pipe(pipe_cmp_branch);
14650 %}
14651 
14652 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14653   match(If cmp (CmpU op1 op2));
14654   effect(USE labl);
14655 
14656   ins_cost(BRANCH_COST);
14657   format %{ "cbw$cmp   $op1, $labl" %}
14658   ins_encode %{
14659     Label* L = $labl$$label;
14660     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14661     if (cond == Assembler::EQ || cond == Assembler::LS)
14662       __ cbzw($op1$$Register, *L);
14663     else
14664       __ cbnzw($op1$$Register, *L);
14665   %}
14666   ins_pipe(pipe_cmp_branch);
14667 %}
14668 
14669 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14670   match(If cmp (CmpUL op1 op2));
14671   effect(USE labl);
14672 
14673   ins_cost(BRANCH_COST);
14674   format %{ "cb$cmp   $op1, $labl" %}
14675   ins_encode %{
14676     Label* L = $labl$$label;
14677     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14678     if (cond == Assembler::EQ || cond == Assembler::LS)
14679       __ cbz($op1$$Register, *L);
14680     else
14681       __ cbnz($op1$$Register, *L);
14682   %}
14683   ins_pipe(pipe_cmp_branch);
14684 %}
14685 
14686 // Test bit and Branch
14687 
14688 // Patterns for short (< 32KiB) variants
14689 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14690   match(If cmp (CmpL op1 op2));
14691   effect(USE labl);
14692 
14693   ins_cost(BRANCH_COST);
14694   format %{ "cb$cmp   $op1, $labl # long" %}
14695   ins_encode %{
14696     Label* L = $labl$$label;
14697     Assembler::Condition cond =
14698       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14699     __ tbr(cond, $op1$$Register, 63, *L);
14700   %}
14701   ins_pipe(pipe_cmp_branch);
14702   ins_short_branch(1);
14703 %}
14704 
14705 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14706   match(If cmp (CmpI op1 op2));
14707   effect(USE labl);
14708 
14709   ins_cost(BRANCH_COST);
14710   format %{ "cb$cmp   $op1, $labl # int" %}
14711   ins_encode %{
14712     Label* L = $labl$$label;
14713     Assembler::Condition cond =
14714       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14715     __ tbr(cond, $op1$$Register, 31, *L);
14716   %}
14717   ins_pipe(pipe_cmp_branch);
14718   ins_short_branch(1);
14719 %}
14720 
14721 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14722   match(If cmp (CmpL (AndL op1 op2) op3));
14723   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14724   effect(USE labl);
14725 
14726   ins_cost(BRANCH_COST);
14727   format %{ "tb$cmp   $op1, $op2, $labl" %}
14728   ins_encode %{
14729     Label* L = $labl$$label;
14730     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14731     int bit = exact_log2($op2$$constant);
14732     __ tbr(cond, $op1$$Register, bit, *L);
14733   %}
14734   ins_pipe(pipe_cmp_branch);
14735   ins_short_branch(1);
14736 %}
14737 
14738 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14739   match(If cmp (CmpI (AndI op1 op2) op3));
14740   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14741   effect(USE labl);
14742 
14743   ins_cost(BRANCH_COST);
14744   format %{ "tb$cmp   $op1, $op2, $labl" %}
14745   ins_encode %{
14746     Label* L = $labl$$label;
14747     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14748     int bit = exact_log2($op2$$constant);
14749     __ tbr(cond, $op1$$Register, bit, *L);
14750   %}
14751   ins_pipe(pipe_cmp_branch);
14752   ins_short_branch(1);
14753 %}
14754 
14755 // And far variants
14756 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14757   match(If cmp (CmpL op1 op2));
14758   effect(USE labl);
14759 
14760   ins_cost(BRANCH_COST);
14761   format %{ "cb$cmp   $op1, $labl # long" %}
14762   ins_encode %{
14763     Label* L = $labl$$label;
14764     Assembler::Condition cond =
14765       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14766     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14767   %}
14768   ins_pipe(pipe_cmp_branch);
14769 %}
14770 
14771 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14772   match(If cmp (CmpI op1 op2));
14773   effect(USE labl);
14774 
14775   ins_cost(BRANCH_COST);
14776   format %{ "cb$cmp   $op1, $labl # int" %}
14777   ins_encode %{
14778     Label* L = $labl$$label;
14779     Assembler::Condition cond =
14780       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14781     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14782   %}
14783   ins_pipe(pipe_cmp_branch);
14784 %}
14785 
14786 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14787   match(If cmp (CmpL (AndL op1 op2) op3));
14788   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14789   effect(USE labl);
14790 
14791   ins_cost(BRANCH_COST);
14792   format %{ "tb$cmp   $op1, $op2, $labl" %}
14793   ins_encode %{
14794     Label* L = $labl$$label;
14795     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14796     int bit = exact_log2($op2$$constant);
14797     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14798   %}
14799   ins_pipe(pipe_cmp_branch);
14800 %}
14801 
14802 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14803   match(If cmp (CmpI (AndI op1 op2) op3));
14804   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14805   effect(USE labl);
14806 
14807   ins_cost(BRANCH_COST);
14808   format %{ "tb$cmp   $op1, $op2, $labl" %}
14809   ins_encode %{
14810     Label* L = $labl$$label;
14811     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14812     int bit = exact_log2($op2$$constant);
14813     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14814   %}
14815   ins_pipe(pipe_cmp_branch);
14816 %}
14817 
14818 // Test bits
14819 
14820 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14821   match(Set cr (CmpL (AndL op1 op2) op3));
14822   predicate(Assembler::operand_valid_for_logical_immediate
14823             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14824 
14825   ins_cost(INSN_COST);
14826   format %{ "tst $op1, $op2 # long" %}
14827   ins_encode %{
14828     __ tst($op1$$Register, $op2$$constant);
14829   %}
14830   ins_pipe(ialu_reg_reg);
14831 %}
14832 
14833 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14834   match(Set cr (CmpI (AndI op1 op2) op3));
14835   predicate(Assembler::operand_valid_for_logical_immediate
14836             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14837 
14838   ins_cost(INSN_COST);
14839   format %{ "tst $op1, $op2 # int" %}
14840   ins_encode %{
14841     __ tstw($op1$$Register, $op2$$constant);
14842   %}
14843   ins_pipe(ialu_reg_reg);
14844 %}
14845 
14846 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14847   match(Set cr (CmpL (AndL op1 op2) op3));
14848 
14849   ins_cost(INSN_COST);
14850   format %{ "tst $op1, $op2 # long" %}
14851   ins_encode %{
14852     __ tst($op1$$Register, $op2$$Register);
14853   %}
14854   ins_pipe(ialu_reg_reg);
14855 %}
14856 
14857 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14858   match(Set cr (CmpI (AndI op1 op2) op3));
14859 
14860   ins_cost(INSN_COST);
14861   format %{ "tstw $op1, $op2 # int" %}
14862   ins_encode %{
14863     __ tstw($op1$$Register, $op2$$Register);
14864   %}
14865   ins_pipe(ialu_reg_reg);
14866 %}
14867 
14868 
14869 // Conditional Far Branch
14870 // Conditional Far Branch Unsigned
14871 // TODO: fixme
14872 
14873 // counted loop end branch near
14874 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14875 %{
14876   match(CountedLoopEnd cmp cr);
14877 
14878   effect(USE lbl);
14879 
14880   ins_cost(BRANCH_COST);
14881   // short variant.
14882   // ins_short_branch(1);
14883   format %{ "b$cmp $lbl \t// counted loop end" %}
14884 
14885   ins_encode(aarch64_enc_br_con(cmp, lbl));
14886 
14887   ins_pipe(pipe_branch);
14888 %}
14889 
14890 // counted loop end branch near Unsigned
14891 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14892 %{
14893   match(CountedLoopEnd cmp cr);
14894 
14895   effect(USE lbl);
14896 
14897   ins_cost(BRANCH_COST);
14898   // short variant.
14899   // ins_short_branch(1);
14900   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14901 
14902   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14903 
14904   ins_pipe(pipe_branch);
14905 %}
14906 
14907 // counted loop end branch far
14908 // counted loop end branch far unsigned
14909 // TODO: fixme
14910 
14911 // ============================================================================
14912 // inlined locking and unlocking
14913 
14914 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14915 %{
14916   match(Set cr (FastLock object box));
14917   effect(TEMP tmp, TEMP tmp2);
14918 
14919   // TODO
14920   // identify correct cost
14921   ins_cost(5 * INSN_COST);
14922   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14923 
14924   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14925 
14926   ins_pipe(pipe_serial);
14927 %}
14928 
14929 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14930 %{
14931   match(Set cr (FastUnlock object box));
14932   effect(TEMP tmp, TEMP tmp2);
14933 
14934   ins_cost(5 * INSN_COST);
14935   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14936 
14937   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14938 
14939   ins_pipe(pipe_serial);
14940 %}
14941 
14942 
14943 // ============================================================================
14944 // Safepoint Instructions
14945 
14946 // TODO
14947 // provide a near and far version of this code
14948 
14949 instruct safePoint(rFlagsReg cr, iRegP poll)
14950 %{
14951   match(SafePoint poll);
14952   effect(KILL cr);
14953 
14954   format %{
14955     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14956   %}
14957   ins_encode %{
14958     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14959   %}
14960   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14961 %}
14962 
14963 
14964 // ============================================================================
14965 // Procedure Call/Return Instructions
14966 
14967 // Call Java Static Instruction
14968 
14969 instruct CallStaticJavaDirect(method meth)
14970 %{
14971   match(CallStaticJava);
14972 
14973   effect(USE meth);
14974 
14975   ins_cost(CALL_COST);
14976 
14977   format %{ "call,static $meth \t// ==> " %}
14978 
14979   ins_encode( aarch64_enc_java_static_call(meth),
14980               aarch64_enc_call_epilog );
14981 
14982   ins_pipe(pipe_class_call);
14983 %}
14984 
14985 // TO HERE
14986 
14987 // Call Java Dynamic Instruction
14988 instruct CallDynamicJavaDirect(method meth)
14989 %{
14990   match(CallDynamicJava);
14991 
14992   effect(USE meth);
14993 
14994   ins_cost(CALL_COST);
14995 
14996   format %{ "CALL,dynamic $meth \t// ==> " %}
14997 
14998   ins_encode( aarch64_enc_java_dynamic_call(meth),
14999                aarch64_enc_call_epilog );
15000 
15001   ins_pipe(pipe_class_call);
15002 %}
15003 
15004 // Call Runtime Instruction
15005 
15006 instruct CallRuntimeDirect(method meth)
15007 %{
15008   match(CallRuntime);
15009 
15010   effect(USE meth);
15011 
15012   ins_cost(CALL_COST);
15013 
15014   format %{ "CALL, runtime $meth" %}
15015 
15016   ins_encode( aarch64_enc_java_to_runtime(meth) );
15017 
15018   ins_pipe(pipe_class_call);
15019 %}
15020 
15021 // Call Runtime Instruction
15022 
15023 instruct CallLeafDirect(method meth)
15024 %{
15025   match(CallLeaf);
15026 
15027   effect(USE meth);
15028 
15029   ins_cost(CALL_COST);
15030 
15031   format %{ "CALL, runtime leaf $meth" %}
15032 
15033   ins_encode( aarch64_enc_java_to_runtime(meth) );
15034 
15035   ins_pipe(pipe_class_call);
15036 %}
15037 
15038 // Call Runtime Instruction
15039 
15040 instruct CallLeafNoFPDirect(method meth)
15041 %{
15042   match(CallLeafNoFP);
15043 
15044   effect(USE meth);
15045 
15046   ins_cost(CALL_COST);
15047 
15048   format %{ "CALL, runtime leaf nofp $meth" %}
15049 
15050   ins_encode( aarch64_enc_java_to_runtime(meth) );
15051 
15052   ins_pipe(pipe_class_call);
15053 %}
15054 
15055 // Tail Call; Jump from runtime stub to Java code.
15056 // Also known as an 'interprocedural jump'.
15057 // Target of jump will eventually return to caller.
15058 // TailJump below removes the return address.
15059 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15060 %{
15061   match(TailCall jump_target method_oop);
15062 
15063   ins_cost(CALL_COST);
15064 
15065   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15066 
15067   ins_encode(aarch64_enc_tail_call(jump_target));
15068 
15069   ins_pipe(pipe_class_call);
15070 %}
15071 
15072 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15073 %{
15074   match(TailJump jump_target ex_oop);
15075 
15076   ins_cost(CALL_COST);
15077 
15078   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15079 
15080   ins_encode(aarch64_enc_tail_jmp(jump_target));
15081 
15082   ins_pipe(pipe_class_call);
15083 %}
15084 
15085 // Create exception oop: created by stack-crawling runtime code.
15086 // Created exception is now available to this handler, and is setup
15087 // just prior to jumping to this handler. No code emitted.
15088 // TODO check
15089 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15090 instruct CreateException(iRegP_R0 ex_oop)
15091 %{
15092   match(Set ex_oop (CreateEx));
15093 
15094   format %{ " -- \t// exception oop; no code emitted" %}
15095 
15096   size(0);
15097 
15098   ins_encode( /*empty*/ );
15099 
15100   ins_pipe(pipe_class_empty);
15101 %}
15102 
15103 // Rethrow exception: The exception oop will come in the first
15104 // argument position. Then JUMP (not call) to the rethrow stub code.
15105 instruct RethrowException() %{
15106   match(Rethrow);
15107   ins_cost(CALL_COST);
15108 
15109   format %{ "b rethrow_stub" %}
15110 
15111   ins_encode( aarch64_enc_rethrow() );
15112 
15113   ins_pipe(pipe_class_call);
15114 %}
15115 
15116 
15117 // Return Instruction
15118 // epilog node loads ret address into lr as part of frame pop
15119 instruct Ret()
15120 %{
15121   match(Return);
15122 
15123   format %{ "ret\t// return register" %}
15124 
15125   ins_encode( aarch64_enc_ret() );
15126 
15127   ins_pipe(pipe_branch);
15128 %}
15129 
15130 // Die now.
15131 instruct ShouldNotReachHere() %{
15132   match(Halt);
15133 
15134   ins_cost(CALL_COST);
15135   format %{ "ShouldNotReachHere" %}
15136 
15137   ins_encode %{
15138     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15139     // return true
15140     __ dpcs1(0xdead + 1);
15141   %}
15142 
15143   ins_pipe(pipe_class_default);
15144 %}
15145 
15146 // ============================================================================
15147 // Partial Subtype Check
15148 //
15149 // superklass array for an instance of the superklass.  Set a hidden
15150 // internal cache on a hit (cache is checked with exposed code in
15151 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15152 // encoding ALSO sets flags.
15153 
15154 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15155 %{
15156   match(Set result (PartialSubtypeCheck sub super));
15157   effect(KILL cr, KILL temp);
15158 
15159   ins_cost(1100);  // slightly larger than the next version
15160   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15161 
15162   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15163 
15164   opcode(0x1); // Force zero of result reg on hit
15165 
15166   ins_pipe(pipe_class_memory);
15167 %}
15168 
15169 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15170 %{
15171   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15172   effect(KILL temp, KILL result);
15173 
15174   ins_cost(1100);  // slightly larger than the next version
15175   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15176 
15177   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15178 
15179   opcode(0x0); // Don't zero result reg on hit
15180 
15181   ins_pipe(pipe_class_memory);
15182 %}
15183 
15184 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15185                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15186 %{
15187   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15188   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15189   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15190 
15191   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15192   ins_encode %{
15193     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15194     __ string_compare($str1$$Register, $str2$$Register,
15195                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15196                       $tmp1$$Register, $tmp2$$Register,
15197                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15198   %}
15199   ins_pipe(pipe_class_memory);
15200 %}
15201 
15202 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15203                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15204 %{
15205   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15206   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15207   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15208 
15209   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15210   ins_encode %{
15211     __ string_compare($str1$$Register, $str2$$Register,
15212                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15213                       $tmp1$$Register, $tmp2$$Register,
15214                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15215   %}
15216   ins_pipe(pipe_class_memory);
15217 %}
15218 
15219 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15220                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15221                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15222 %{
15223   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15224   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15225   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15226          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15227 
15228   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15229   ins_encode %{
15230     __ string_compare($str1$$Register, $str2$$Register,
15231                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15232                       $tmp1$$Register, $tmp2$$Register,
15233                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15234                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15235   %}
15236   ins_pipe(pipe_class_memory);
15237 %}
15238 
15239 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15240                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15241                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15242 %{
15243   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15244   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15245   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15246          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15247 
15248   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15249   ins_encode %{
15250     __ string_compare($str1$$Register, $str2$$Register,
15251                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15252                       $tmp1$$Register, $tmp2$$Register,
15253                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15254                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15255   %}
15256   ins_pipe(pipe_class_memory);
15257 %}
15258 
15259 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15260        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15261        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15262 %{
15263   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15264   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15265   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15266          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15267   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15268 
15269   ins_encode %{
15270     __ string_indexof($str1$$Register, $str2$$Register,
15271                       $cnt1$$Register, $cnt2$$Register,
15272                       $tmp1$$Register, $tmp2$$Register,
15273                       $tmp3$$Register, $tmp4$$Register,
15274                       $tmp5$$Register, $tmp6$$Register,
15275                       -1, $result$$Register, StrIntrinsicNode::UU);
15276   %}
15277   ins_pipe(pipe_class_memory);
15278 %}
15279 
15280 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15281        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15282        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15283 %{
15284   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15285   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15286   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15287          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15288   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15289 
15290   ins_encode %{
15291     __ string_indexof($str1$$Register, $str2$$Register,
15292                       $cnt1$$Register, $cnt2$$Register,
15293                       $tmp1$$Register, $tmp2$$Register,
15294                       $tmp3$$Register, $tmp4$$Register,
15295                       $tmp5$$Register, $tmp6$$Register,
15296                       -1, $result$$Register, StrIntrinsicNode::LL);
15297   %}
15298   ins_pipe(pipe_class_memory);
15299 %}
15300 
15301 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15302        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15303        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15304 %{
15305   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15306   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15307   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15308          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15309   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15310 
15311   ins_encode %{
15312     __ string_indexof($str1$$Register, $str2$$Register,
15313                       $cnt1$$Register, $cnt2$$Register,
15314                       $tmp1$$Register, $tmp2$$Register,
15315                       $tmp3$$Register, $tmp4$$Register,
15316                       $tmp5$$Register, $tmp6$$Register,
15317                       -1, $result$$Register, StrIntrinsicNode::UL);
15318   %}
15319   ins_pipe(pipe_class_memory);
15320 %}
15321 
15322 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15323                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15324                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15325 %{
15326   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15327   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15328   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15329          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15330   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15331 
15332   ins_encode %{
15333     int icnt2 = (int)$int_cnt2$$constant;
15334     __ string_indexof($str1$$Register, $str2$$Register,
15335                       $cnt1$$Register, zr,
15336                       $tmp1$$Register, $tmp2$$Register,
15337                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15338                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15339   %}
15340   ins_pipe(pipe_class_memory);
15341 %}
15342 
15343 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15344                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15345                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15346 %{
15347   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15348   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15349   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15350          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15351   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15352 
15353   ins_encode %{
15354     int icnt2 = (int)$int_cnt2$$constant;
15355     __ string_indexof($str1$$Register, $str2$$Register,
15356                       $cnt1$$Register, zr,
15357                       $tmp1$$Register, $tmp2$$Register,
15358                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15359                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15360   %}
15361   ins_pipe(pipe_class_memory);
15362 %}
15363 
15364 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15365                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15366                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15367 %{
15368   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15369   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15370   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15371          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15372   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15373 
15374   ins_encode %{
15375     int icnt2 = (int)$int_cnt2$$constant;
15376     __ string_indexof($str1$$Register, $str2$$Register,
15377                       $cnt1$$Register, zr,
15378                       $tmp1$$Register, $tmp2$$Register,
15379                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15380                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15381   %}
15382   ins_pipe(pipe_class_memory);
15383 %}
15384 
15385 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15386                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15387                               iRegINoSp tmp3, rFlagsReg cr)
15388 %{
15389   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15390   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15391          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15392 
15393   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15394 
15395   ins_encode %{
15396     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15397                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15398                            $tmp3$$Register);
15399   %}
15400   ins_pipe(pipe_class_memory);
15401 %}
15402 
15403 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15404                         iRegI_R0 result, rFlagsReg cr)
15405 %{
15406   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15407   match(Set result (StrEquals (Binary str1 str2) cnt));
15408   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15409 
15410   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15411   ins_encode %{
15412     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15413     __ string_equals($str1$$Register, $str2$$Register,
15414                      $result$$Register, $cnt$$Register, 1);
15415   %}
15416   ins_pipe(pipe_class_memory);
15417 %}
15418 
15419 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15420                         iRegI_R0 result, rFlagsReg cr)
15421 %{
15422   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15423   match(Set result (StrEquals (Binary str1 str2) cnt));
15424   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15425 
15426   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15427   ins_encode %{
15428     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15429     __ string_equals($str1$$Register, $str2$$Register,
15430                      $result$$Register, $cnt$$Register, 2);
15431   %}
15432   ins_pipe(pipe_class_memory);
15433 %}
15434 
15435 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15436                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15437                        iRegP_R10 tmp, rFlagsReg cr)
15438 %{
15439   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15440   match(Set result (AryEq ary1 ary2));
15441   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15442 
15443   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15444   ins_encode %{
15445     __ arrays_equals($ary1$$Register, $ary2$$Register,
15446                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15447                      $result$$Register, $tmp$$Register, 1);
15448     %}
15449   ins_pipe(pipe_class_memory);
15450 %}
15451 
15452 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15453                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15454                        iRegP_R10 tmp, rFlagsReg cr)
15455 %{
15456   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15457   match(Set result (AryEq ary1 ary2));
15458   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15459 
15460   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15461   ins_encode %{
15462     __ arrays_equals($ary1$$Register, $ary2$$Register,
15463                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15464                      $result$$Register, $tmp$$Register, 2);
15465   %}
15466   ins_pipe(pipe_class_memory);
15467 %}
15468 
15469 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15470 %{
15471   match(Set result (HasNegatives ary1 len));
15472   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15473   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15474   ins_encode %{
15475     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15476   %}
15477   ins_pipe( pipe_slow );
15478 %}
15479 
15480 // fast char[] to byte[] compression
15481 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15482                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15483                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15484                          iRegI_R0 result, rFlagsReg cr)
15485 %{
15486   match(Set result (StrCompressedCopy src (Binary dst len)));
15487   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15488 
15489   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15490   ins_encode %{
15491     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15492                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15493                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15494                            $result$$Register);
15495   %}
15496   ins_pipe( pipe_slow );
15497 %}
15498 
15499 // fast byte[] to char[] inflation
15500 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15501                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15502 %{
15503   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15504   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15505 
15506   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15507   ins_encode %{
15508     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15509                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15510   %}
15511   ins_pipe(pipe_class_memory);
15512 %}
15513 
15514 // encode char[] to byte[] in ISO_8859_1
15515 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15516                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15517                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15518                           iRegI_R0 result, rFlagsReg cr)
15519 %{
15520   match(Set result (EncodeISOArray src (Binary dst len)));
15521   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15522          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15523 
15524   format %{ "Encode array $src,$dst,$len -> $result" %}
15525   ins_encode %{
15526     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15527          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15528          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15529   %}
15530   ins_pipe( pipe_class_memory );
15531 %}
15532 
15533 // ============================================================================
15534 // This name is KNOWN by the ADLC and cannot be changed.
15535 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15536 // for this guy.
15537 instruct tlsLoadP(thread_RegP dst)
15538 %{
15539   match(Set dst (ThreadLocal));
15540 
15541   ins_cost(0);
15542 
15543   format %{ " -- \t// $dst=Thread::current(), empty" %}
15544 
15545   size(0);
15546 
15547   ins_encode( /*empty*/ );
15548 
15549   ins_pipe(pipe_class_empty);
15550 %}
15551 
15552 // ====================VECTOR INSTRUCTIONS=====================================
15553 
15554 // Load vector (32 bits)
15555 instruct loadV4(vecD dst, vmem4 mem)
15556 %{
15557   predicate(n->as_LoadVector()->memory_size() == 4);
15558   match(Set dst (LoadVector mem));
15559   ins_cost(4 * INSN_COST);
15560   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15561   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15562   ins_pipe(vload_reg_mem64);
15563 %}
15564 
15565 // Load vector (64 bits)
15566 instruct loadV8(vecD dst, vmem8 mem)
15567 %{
15568   predicate(n->as_LoadVector()->memory_size() == 8);
15569   match(Set dst (LoadVector mem));
15570   ins_cost(4 * INSN_COST);
15571   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15572   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15573   ins_pipe(vload_reg_mem64);
15574 %}
15575 
15576 // Load Vector (128 bits)
15577 instruct loadV16(vecX dst, vmem16 mem)
15578 %{
15579   predicate(n->as_LoadVector()->memory_size() == 16);
15580   match(Set dst (LoadVector mem));
15581   ins_cost(4 * INSN_COST);
15582   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15583   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15584   ins_pipe(vload_reg_mem128);
15585 %}
15586 
15587 // Store Vector (32 bits)
15588 instruct storeV4(vecD src, vmem4 mem)
15589 %{
15590   predicate(n->as_StoreVector()->memory_size() == 4);
15591   match(Set mem (StoreVector mem src));
15592   ins_cost(4 * INSN_COST);
15593   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15594   ins_encode( aarch64_enc_strvS(src, mem) );
15595   ins_pipe(vstore_reg_mem64);
15596 %}
15597 
15598 // Store Vector (64 bits)
15599 instruct storeV8(vecD src, vmem8 mem)
15600 %{
15601   predicate(n->as_StoreVector()->memory_size() == 8);
15602   match(Set mem (StoreVector mem src));
15603   ins_cost(4 * INSN_COST);
15604   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15605   ins_encode( aarch64_enc_strvD(src, mem) );
15606   ins_pipe(vstore_reg_mem64);
15607 %}
15608 
15609 // Store Vector (128 bits)
15610 instruct storeV16(vecX src, vmem16 mem)
15611 %{
15612   predicate(n->as_StoreVector()->memory_size() == 16);
15613   match(Set mem (StoreVector mem src));
15614   ins_cost(4 * INSN_COST);
15615   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15616   ins_encode( aarch64_enc_strvQ(src, mem) );
15617   ins_pipe(vstore_reg_mem128);
15618 %}
15619 
15620 instruct replicate8B(vecD dst, iRegIorL2I src)
15621 %{
15622   predicate(n->as_Vector()->length() == 4 ||
15623             n->as_Vector()->length() == 8);
15624   match(Set dst (ReplicateB src));
15625   ins_cost(INSN_COST);
15626   format %{ "dup  $dst, $src\t# vector (8B)" %}
15627   ins_encode %{
15628     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15629   %}
15630   ins_pipe(vdup_reg_reg64);
15631 %}
15632 
15633 instruct replicate16B(vecX dst, iRegIorL2I src)
15634 %{
15635   predicate(n->as_Vector()->length() == 16);
15636   match(Set dst (ReplicateB src));
15637   ins_cost(INSN_COST);
15638   format %{ "dup  $dst, $src\t# vector (16B)" %}
15639   ins_encode %{
15640     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15641   %}
15642   ins_pipe(vdup_reg_reg128);
15643 %}
15644 
15645 instruct replicate8B_imm(vecD dst, immI con)
15646 %{
15647   predicate(n->as_Vector()->length() == 4 ||
15648             n->as_Vector()->length() == 8);
15649   match(Set dst (ReplicateB con));
15650   ins_cost(INSN_COST);
15651   format %{ "movi  $dst, $con\t# vector(8B)" %}
15652   ins_encode %{
15653     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15654   %}
15655   ins_pipe(vmovi_reg_imm64);
15656 %}
15657 
15658 instruct replicate16B_imm(vecX dst, immI con)
15659 %{
15660   predicate(n->as_Vector()->length() == 16);
15661   match(Set dst (ReplicateB con));
15662   ins_cost(INSN_COST);
15663   format %{ "movi  $dst, $con\t# vector(16B)" %}
15664   ins_encode %{
15665     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15666   %}
15667   ins_pipe(vmovi_reg_imm128);
15668 %}
15669 
15670 instruct replicate4S(vecD dst, iRegIorL2I src)
15671 %{
15672   predicate(n->as_Vector()->length() == 2 ||
15673             n->as_Vector()->length() == 4);
15674   match(Set dst (ReplicateS src));
15675   ins_cost(INSN_COST);
15676   format %{ "dup  $dst, $src\t# vector (4S)" %}
15677   ins_encode %{
15678     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15679   %}
15680   ins_pipe(vdup_reg_reg64);
15681 %}
15682 
15683 instruct replicate8S(vecX dst, iRegIorL2I src)
15684 %{
15685   predicate(n->as_Vector()->length() == 8);
15686   match(Set dst (ReplicateS src));
15687   ins_cost(INSN_COST);
15688   format %{ "dup  $dst, $src\t# vector (8S)" %}
15689   ins_encode %{
15690     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15691   %}
15692   ins_pipe(vdup_reg_reg128);
15693 %}
15694 
15695 instruct replicate4S_imm(vecD dst, immI con)
15696 %{
15697   predicate(n->as_Vector()->length() == 2 ||
15698             n->as_Vector()->length() == 4);
15699   match(Set dst (ReplicateS con));
15700   ins_cost(INSN_COST);
15701   format %{ "movi  $dst, $con\t# vector(4H)" %}
15702   ins_encode %{
15703     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15704   %}
15705   ins_pipe(vmovi_reg_imm64);
15706 %}
15707 
15708 instruct replicate8S_imm(vecX dst, immI con)
15709 %{
15710   predicate(n->as_Vector()->length() == 8);
15711   match(Set dst (ReplicateS con));
15712   ins_cost(INSN_COST);
15713   format %{ "movi  $dst, $con\t# vector(8H)" %}
15714   ins_encode %{
15715     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15716   %}
15717   ins_pipe(vmovi_reg_imm128);
15718 %}
15719 
15720 instruct replicate2I(vecD dst, iRegIorL2I src)
15721 %{
15722   predicate(n->as_Vector()->length() == 2);
15723   match(Set dst (ReplicateI src));
15724   ins_cost(INSN_COST);
15725   format %{ "dup  $dst, $src\t# vector (2I)" %}
15726   ins_encode %{
15727     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15728   %}
15729   ins_pipe(vdup_reg_reg64);
15730 %}
15731 
15732 instruct replicate4I(vecX dst, iRegIorL2I src)
15733 %{
15734   predicate(n->as_Vector()->length() == 4);
15735   match(Set dst (ReplicateI src));
15736   ins_cost(INSN_COST);
15737   format %{ "dup  $dst, $src\t# vector (4I)" %}
15738   ins_encode %{
15739     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15740   %}
15741   ins_pipe(vdup_reg_reg128);
15742 %}
15743 
15744 instruct replicate2I_imm(vecD dst, immI con)
15745 %{
15746   predicate(n->as_Vector()->length() == 2);
15747   match(Set dst (ReplicateI con));
15748   ins_cost(INSN_COST);
15749   format %{ "movi  $dst, $con\t# vector(2I)" %}
15750   ins_encode %{
15751     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15752   %}
15753   ins_pipe(vmovi_reg_imm64);
15754 %}
15755 
15756 instruct replicate4I_imm(vecX dst, immI con)
15757 %{
15758   predicate(n->as_Vector()->length() == 4);
15759   match(Set dst (ReplicateI con));
15760   ins_cost(INSN_COST);
15761   format %{ "movi  $dst, $con\t# vector(4I)" %}
15762   ins_encode %{
15763     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15764   %}
15765   ins_pipe(vmovi_reg_imm128);
15766 %}
15767 
15768 instruct replicate2L(vecX dst, iRegL src)
15769 %{
15770   predicate(n->as_Vector()->length() == 2);
15771   match(Set dst (ReplicateL src));
15772   ins_cost(INSN_COST);
15773   format %{ "dup  $dst, $src\t# vector (2L)" %}
15774   ins_encode %{
15775     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15776   %}
15777   ins_pipe(vdup_reg_reg128);
15778 %}
15779 
15780 instruct replicate2L_zero(vecX dst, immI0 zero)
15781 %{
15782   predicate(n->as_Vector()->length() == 2);
15783   match(Set dst (ReplicateI zero));
15784   ins_cost(INSN_COST);
15785   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15786   ins_encode %{
15787     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15788            as_FloatRegister($dst$$reg),
15789            as_FloatRegister($dst$$reg));
15790   %}
15791   ins_pipe(vmovi_reg_imm128);
15792 %}
15793 
15794 instruct replicate2F(vecD dst, vRegF src)
15795 %{
15796   predicate(n->as_Vector()->length() == 2);
15797   match(Set dst (ReplicateF src));
15798   ins_cost(INSN_COST);
15799   format %{ "dup  $dst, $src\t# vector (2F)" %}
15800   ins_encode %{
15801     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15802            as_FloatRegister($src$$reg));
15803   %}
15804   ins_pipe(vdup_reg_freg64);
15805 %}
15806 
15807 instruct replicate4F(vecX dst, vRegF src)
15808 %{
15809   predicate(n->as_Vector()->length() == 4);
15810   match(Set dst (ReplicateF src));
15811   ins_cost(INSN_COST);
15812   format %{ "dup  $dst, $src\t# vector (4F)" %}
15813   ins_encode %{
15814     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15815            as_FloatRegister($src$$reg));
15816   %}
15817   ins_pipe(vdup_reg_freg128);
15818 %}
15819 
15820 instruct replicate2D(vecX dst, vRegD src)
15821 %{
15822   predicate(n->as_Vector()->length() == 2);
15823   match(Set dst (ReplicateD src));
15824   ins_cost(INSN_COST);
15825   format %{ "dup  $dst, $src\t# vector (2D)" %}
15826   ins_encode %{
15827     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15828            as_FloatRegister($src$$reg));
15829   %}
15830   ins_pipe(vdup_reg_dreg128);
15831 %}
15832 
15833 // ====================REDUCTION ARITHMETIC====================================
15834 
15835 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15836 %{
15837   match(Set dst (AddReductionVI src1 src2));
15838   ins_cost(INSN_COST);
15839   effect(TEMP tmp, TEMP tmp2);
15840   format %{ "umov  $tmp, $src2, S, 0\n\t"
15841             "umov  $tmp2, $src2, S, 1\n\t"
15842             "addw  $dst, $src1, $tmp\n\t"
15843             "addw  $dst, $dst, $tmp2\t add reduction2i"
15844   %}
15845   ins_encode %{
15846     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15847     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15848     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15849     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15850   %}
15851   ins_pipe(pipe_class_default);
15852 %}
15853 
15854 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15855 %{
15856   match(Set dst (AddReductionVI src1 src2));
15857   ins_cost(INSN_COST);
15858   effect(TEMP tmp, TEMP tmp2);
15859   format %{ "addv  $tmp, T4S, $src2\n\t"
15860             "umov  $tmp2, $tmp, S, 0\n\t"
15861             "addw  $dst, $tmp2, $src1\t add reduction4i"
15862   %}
15863   ins_encode %{
15864     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15865             as_FloatRegister($src2$$reg));
15866     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15867     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15868   %}
15869   ins_pipe(pipe_class_default);
15870 %}
15871 
15872 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15873 %{
15874   match(Set dst (MulReductionVI src1 src2));
15875   ins_cost(INSN_COST);
15876   effect(TEMP tmp, TEMP dst);
15877   format %{ "umov  $tmp, $src2, S, 0\n\t"
15878             "mul   $dst, $tmp, $src1\n\t"
15879             "umov  $tmp, $src2, S, 1\n\t"
15880             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15881   %}
15882   ins_encode %{
15883     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15884     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15885     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15886     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15887   %}
15888   ins_pipe(pipe_class_default);
15889 %}
15890 
15891 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15892 %{
15893   match(Set dst (MulReductionVI src1 src2));
15894   ins_cost(INSN_COST);
15895   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15896   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15897             "mul   $tmp, $tmp, $src2\n\t"
15898             "umov  $tmp2, $tmp, S, 0\n\t"
15899             "mul   $dst, $tmp2, $src1\n\t"
15900             "umov  $tmp2, $tmp, S, 1\n\t"
15901             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15902   %}
15903   ins_encode %{
15904     __ ins(as_FloatRegister($tmp$$reg), __ D,
15905            as_FloatRegister($src2$$reg), 0, 1);
15906     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15907            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15908     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15909     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15910     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15911     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15912   %}
15913   ins_pipe(pipe_class_default);
15914 %}
15915 
15916 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15917 %{
15918   match(Set dst (AddReductionVF src1 src2));
15919   ins_cost(INSN_COST);
15920   effect(TEMP tmp, TEMP dst);
15921   format %{ "fadds $dst, $src1, $src2\n\t"
15922             "ins   $tmp, S, $src2, 0, 1\n\t"
15923             "fadds $dst, $dst, $tmp\t add reduction2f"
15924   %}
15925   ins_encode %{
15926     __ fadds(as_FloatRegister($dst$$reg),
15927              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15928     __ ins(as_FloatRegister($tmp$$reg), __ S,
15929            as_FloatRegister($src2$$reg), 0, 1);
15930     __ fadds(as_FloatRegister($dst$$reg),
15931              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15932   %}
15933   ins_pipe(pipe_class_default);
15934 %}
15935 
15936 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15937 %{
15938   match(Set dst (AddReductionVF src1 src2));
15939   ins_cost(INSN_COST);
15940   effect(TEMP tmp, TEMP dst);
15941   format %{ "fadds $dst, $src1, $src2\n\t"
15942             "ins   $tmp, S, $src2, 0, 1\n\t"
15943             "fadds $dst, $dst, $tmp\n\t"
15944             "ins   $tmp, S, $src2, 0, 2\n\t"
15945             "fadds $dst, $dst, $tmp\n\t"
15946             "ins   $tmp, S, $src2, 0, 3\n\t"
15947             "fadds $dst, $dst, $tmp\t add reduction4f"
15948   %}
15949   ins_encode %{
15950     __ fadds(as_FloatRegister($dst$$reg),
15951              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15952     __ ins(as_FloatRegister($tmp$$reg), __ S,
15953            as_FloatRegister($src2$$reg), 0, 1);
15954     __ fadds(as_FloatRegister($dst$$reg),
15955              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15956     __ ins(as_FloatRegister($tmp$$reg), __ S,
15957            as_FloatRegister($src2$$reg), 0, 2);
15958     __ fadds(as_FloatRegister($dst$$reg),
15959              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15960     __ ins(as_FloatRegister($tmp$$reg), __ S,
15961            as_FloatRegister($src2$$reg), 0, 3);
15962     __ fadds(as_FloatRegister($dst$$reg),
15963              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15964   %}
15965   ins_pipe(pipe_class_default);
15966 %}
15967 
15968 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15969 %{
15970   match(Set dst (MulReductionVF src1 src2));
15971   ins_cost(INSN_COST);
15972   effect(TEMP tmp, TEMP dst);
15973   format %{ "fmuls $dst, $src1, $src2\n\t"
15974             "ins   $tmp, S, $src2, 0, 1\n\t"
15975             "fmuls $dst, $dst, $tmp\t add reduction4f"
15976   %}
15977   ins_encode %{
15978     __ fmuls(as_FloatRegister($dst$$reg),
15979              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15980     __ ins(as_FloatRegister($tmp$$reg), __ S,
15981            as_FloatRegister($src2$$reg), 0, 1);
15982     __ fmuls(as_FloatRegister($dst$$reg),
15983              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15984   %}
15985   ins_pipe(pipe_class_default);
15986 %}
15987 
15988 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15989 %{
15990   match(Set dst (MulReductionVF src1 src2));
15991   ins_cost(INSN_COST);
15992   effect(TEMP tmp, TEMP dst);
15993   format %{ "fmuls $dst, $src1, $src2\n\t"
15994             "ins   $tmp, S, $src2, 0, 1\n\t"
15995             "fmuls $dst, $dst, $tmp\n\t"
15996             "ins   $tmp, S, $src2, 0, 2\n\t"
15997             "fmuls $dst, $dst, $tmp\n\t"
15998             "ins   $tmp, S, $src2, 0, 3\n\t"
15999             "fmuls $dst, $dst, $tmp\t add reduction4f"
16000   %}
16001   ins_encode %{
16002     __ fmuls(as_FloatRegister($dst$$reg),
16003              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16004     __ ins(as_FloatRegister($tmp$$reg), __ S,
16005            as_FloatRegister($src2$$reg), 0, 1);
16006     __ fmuls(as_FloatRegister($dst$$reg),
16007              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16008     __ ins(as_FloatRegister($tmp$$reg), __ S,
16009            as_FloatRegister($src2$$reg), 0, 2);
16010     __ fmuls(as_FloatRegister($dst$$reg),
16011              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16012     __ ins(as_FloatRegister($tmp$$reg), __ S,
16013            as_FloatRegister($src2$$reg), 0, 3);
16014     __ fmuls(as_FloatRegister($dst$$reg),
16015              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16016   %}
16017   ins_pipe(pipe_class_default);
16018 %}
16019 
16020 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16021 %{
16022   match(Set dst (AddReductionVD src1 src2));
16023   ins_cost(INSN_COST);
16024   effect(TEMP tmp, TEMP dst);
16025   format %{ "faddd $dst, $src1, $src2\n\t"
16026             "ins   $tmp, D, $src2, 0, 1\n\t"
16027             "faddd $dst, $dst, $tmp\t add reduction2d"
16028   %}
16029   ins_encode %{
16030     __ faddd(as_FloatRegister($dst$$reg),
16031              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16032     __ ins(as_FloatRegister($tmp$$reg), __ D,
16033            as_FloatRegister($src2$$reg), 0, 1);
16034     __ faddd(as_FloatRegister($dst$$reg),
16035              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16036   %}
16037   ins_pipe(pipe_class_default);
16038 %}
16039 
16040 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16041 %{
16042   match(Set dst (MulReductionVD src1 src2));
16043   ins_cost(INSN_COST);
16044   effect(TEMP tmp, TEMP dst);
16045   format %{ "fmuld $dst, $src1, $src2\n\t"
16046             "ins   $tmp, D, $src2, 0, 1\n\t"
16047             "fmuld $dst, $dst, $tmp\t add reduction2d"
16048   %}
16049   ins_encode %{
16050     __ fmuld(as_FloatRegister($dst$$reg),
16051              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16052     __ ins(as_FloatRegister($tmp$$reg), __ D,
16053            as_FloatRegister($src2$$reg), 0, 1);
16054     __ fmuld(as_FloatRegister($dst$$reg),
16055              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16056   %}
16057   ins_pipe(pipe_class_default);
16058 %}
16059 
16060 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16061   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16062   match(Set dst (MaxReductionV src1 src2));
16063   ins_cost(INSN_COST);
16064   effect(TEMP_DEF dst, TEMP tmp);
16065   format %{ "fmaxs $dst, $src1, $src2\n\t"
16066             "ins   $tmp, S, $src2, 0, 1\n\t"
16067             "fmaxs $dst, $dst, $tmp\t max reduction2F" %}
16068   ins_encode %{
16069     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16070     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16071     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16072   %}
16073   ins_pipe(pipe_class_default);
16074 %}
16075 
16076 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
16077   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16078   match(Set dst (MaxReductionV src1 src2));
16079   ins_cost(INSN_COST);
16080   effect(TEMP_DEF dst);
16081   format %{ "fmaxv $dst, T4S, $src2\n\t"
16082             "fmaxs $dst, $dst, $src1\t max reduction4F" %}
16083   ins_encode %{
16084     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16085     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16086   %}
16087   ins_pipe(pipe_class_default);
16088 %}
16089 
16090 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16091   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16092   match(Set dst (MaxReductionV src1 src2));
16093   ins_cost(INSN_COST);
16094   effect(TEMP_DEF dst, TEMP tmp);
16095   format %{ "fmaxd $dst, $src1, $src2\n\t"
16096             "ins   $tmp, D, $src2, 0, 1\n\t"
16097             "fmaxd $dst, $dst, $tmp\t max reduction2D" %}
16098   ins_encode %{
16099     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16100     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16101     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16102   %}
16103   ins_pipe(pipe_class_default);
16104 %}
16105 
16106 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16107   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16108   match(Set dst (MinReductionV src1 src2));
16109   ins_cost(INSN_COST);
16110   effect(TEMP_DEF dst, TEMP tmp);
16111   format %{ "fmins $dst, $src1, $src2\n\t"
16112             "ins   $tmp, S, $src2, 0, 1\n\t"
16113             "fmins $dst, $dst, $tmp\t min reduction2F" %}
16114   ins_encode %{
16115     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16116     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16117     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16118   %}
16119   ins_pipe(pipe_class_default);
16120 %}
16121 
16122 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
16123   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16124   match(Set dst (MinReductionV src1 src2));
16125   ins_cost(INSN_COST);
16126   effect(TEMP_DEF dst);
16127   format %{ "fminv $dst, T4S, $src2\n\t"
16128             "fmins $dst, $dst, $src1\t min reduction4F" %}
16129   ins_encode %{
16130     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16131     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16132   %}
16133   ins_pipe(pipe_class_default);
16134 %}
16135 
16136 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16137   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16138   match(Set dst (MinReductionV src1 src2));
16139   ins_cost(INSN_COST);
16140   effect(TEMP_DEF dst, TEMP tmp);
16141   format %{ "fmind $dst, $src1, $src2\n\t"
16142             "ins   $tmp, D, $src2, 0, 1\n\t"
16143             "fmind $dst, $dst, $tmp\t min reduction2D" %}
16144   ins_encode %{
16145     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16146     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16147     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16148   %}
16149   ins_pipe(pipe_class_default);
16150 %}
16151 
16152 // ====================VECTOR ARITHMETIC=======================================
16153 
16154 // --------------------------------- ADD --------------------------------------
16155 
16156 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16157 %{
16158   predicate(n->as_Vector()->length() == 4 ||
16159             n->as_Vector()->length() == 8);
16160   match(Set dst (AddVB src1 src2));
16161   ins_cost(INSN_COST);
16162   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16163   ins_encode %{
16164     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16165             as_FloatRegister($src1$$reg),
16166             as_FloatRegister($src2$$reg));
16167   %}
16168   ins_pipe(vdop64);
16169 %}
16170 
16171 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16172 %{
16173   predicate(n->as_Vector()->length() == 16);
16174   match(Set dst (AddVB src1 src2));
16175   ins_cost(INSN_COST);
16176   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16177   ins_encode %{
16178     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16179             as_FloatRegister($src1$$reg),
16180             as_FloatRegister($src2$$reg));
16181   %}
16182   ins_pipe(vdop128);
16183 %}
16184 
16185 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16186 %{
16187   predicate(n->as_Vector()->length() == 2 ||
16188             n->as_Vector()->length() == 4);
16189   match(Set dst (AddVS src1 src2));
16190   ins_cost(INSN_COST);
16191   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16192   ins_encode %{
16193     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16194             as_FloatRegister($src1$$reg),
16195             as_FloatRegister($src2$$reg));
16196   %}
16197   ins_pipe(vdop64);
16198 %}
16199 
16200 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16201 %{
16202   predicate(n->as_Vector()->length() == 8);
16203   match(Set dst (AddVS src1 src2));
16204   ins_cost(INSN_COST);
16205   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16206   ins_encode %{
16207     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16208             as_FloatRegister($src1$$reg),
16209             as_FloatRegister($src2$$reg));
16210   %}
16211   ins_pipe(vdop128);
16212 %}
16213 
16214 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16215 %{
16216   predicate(n->as_Vector()->length() == 2);
16217   match(Set dst (AddVI src1 src2));
16218   ins_cost(INSN_COST);
16219   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16220   ins_encode %{
16221     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16222             as_FloatRegister($src1$$reg),
16223             as_FloatRegister($src2$$reg));
16224   %}
16225   ins_pipe(vdop64);
16226 %}
16227 
16228 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16229 %{
16230   predicate(n->as_Vector()->length() == 4);
16231   match(Set dst (AddVI src1 src2));
16232   ins_cost(INSN_COST);
16233   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16234   ins_encode %{
16235     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16236             as_FloatRegister($src1$$reg),
16237             as_FloatRegister($src2$$reg));
16238   %}
16239   ins_pipe(vdop128);
16240 %}
16241 
16242 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16243 %{
16244   predicate(n->as_Vector()->length() == 2);
16245   match(Set dst (AddVL src1 src2));
16246   ins_cost(INSN_COST);
16247   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16248   ins_encode %{
16249     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16250             as_FloatRegister($src1$$reg),
16251             as_FloatRegister($src2$$reg));
16252   %}
16253   ins_pipe(vdop128);
16254 %}
16255 
16256 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16257 %{
16258   predicate(n->as_Vector()->length() == 2);
16259   match(Set dst (AddVF src1 src2));
16260   ins_cost(INSN_COST);
16261   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16262   ins_encode %{
16263     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16264             as_FloatRegister($src1$$reg),
16265             as_FloatRegister($src2$$reg));
16266   %}
16267   ins_pipe(vdop_fp64);
16268 %}
16269 
16270 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16271 %{
16272   predicate(n->as_Vector()->length() == 4);
16273   match(Set dst (AddVF src1 src2));
16274   ins_cost(INSN_COST);
16275   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16276   ins_encode %{
16277     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16278             as_FloatRegister($src1$$reg),
16279             as_FloatRegister($src2$$reg));
16280   %}
16281   ins_pipe(vdop_fp128);
16282 %}
16283 
16284 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16285 %{
16286   match(Set dst (AddVD src1 src2));
16287   ins_cost(INSN_COST);
16288   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16289   ins_encode %{
16290     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16291             as_FloatRegister($src1$$reg),
16292             as_FloatRegister($src2$$reg));
16293   %}
16294   ins_pipe(vdop_fp128);
16295 %}
16296 
16297 // --------------------------------- SUB --------------------------------------
16298 
16299 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16300 %{
16301   predicate(n->as_Vector()->length() == 4 ||
16302             n->as_Vector()->length() == 8);
16303   match(Set dst (SubVB src1 src2));
16304   ins_cost(INSN_COST);
16305   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16306   ins_encode %{
16307     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16308             as_FloatRegister($src1$$reg),
16309             as_FloatRegister($src2$$reg));
16310   %}
16311   ins_pipe(vdop64);
16312 %}
16313 
16314 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16315 %{
16316   predicate(n->as_Vector()->length() == 16);
16317   match(Set dst (SubVB src1 src2));
16318   ins_cost(INSN_COST);
16319   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16320   ins_encode %{
16321     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16322             as_FloatRegister($src1$$reg),
16323             as_FloatRegister($src2$$reg));
16324   %}
16325   ins_pipe(vdop128);
16326 %}
16327 
16328 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16329 %{
16330   predicate(n->as_Vector()->length() == 2 ||
16331             n->as_Vector()->length() == 4);
16332   match(Set dst (SubVS src1 src2));
16333   ins_cost(INSN_COST);
16334   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16335   ins_encode %{
16336     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16337             as_FloatRegister($src1$$reg),
16338             as_FloatRegister($src2$$reg));
16339   %}
16340   ins_pipe(vdop64);
16341 %}
16342 
16343 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16344 %{
16345   predicate(n->as_Vector()->length() == 8);
16346   match(Set dst (SubVS src1 src2));
16347   ins_cost(INSN_COST);
16348   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16349   ins_encode %{
16350     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16351             as_FloatRegister($src1$$reg),
16352             as_FloatRegister($src2$$reg));
16353   %}
16354   ins_pipe(vdop128);
16355 %}
16356 
16357 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16358 %{
16359   predicate(n->as_Vector()->length() == 2);
16360   match(Set dst (SubVI src1 src2));
16361   ins_cost(INSN_COST);
16362   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16363   ins_encode %{
16364     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16365             as_FloatRegister($src1$$reg),
16366             as_FloatRegister($src2$$reg));
16367   %}
16368   ins_pipe(vdop64);
16369 %}
16370 
16371 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16372 %{
16373   predicate(n->as_Vector()->length() == 4);
16374   match(Set dst (SubVI src1 src2));
16375   ins_cost(INSN_COST);
16376   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16377   ins_encode %{
16378     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16379             as_FloatRegister($src1$$reg),
16380             as_FloatRegister($src2$$reg));
16381   %}
16382   ins_pipe(vdop128);
16383 %}
16384 
16385 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16386 %{
16387   predicate(n->as_Vector()->length() == 2);
16388   match(Set dst (SubVL src1 src2));
16389   ins_cost(INSN_COST);
16390   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16391   ins_encode %{
16392     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16393             as_FloatRegister($src1$$reg),
16394             as_FloatRegister($src2$$reg));
16395   %}
16396   ins_pipe(vdop128);
16397 %}
16398 
16399 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16400 %{
16401   predicate(n->as_Vector()->length() == 2);
16402   match(Set dst (SubVF src1 src2));
16403   ins_cost(INSN_COST);
16404   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16405   ins_encode %{
16406     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16407             as_FloatRegister($src1$$reg),
16408             as_FloatRegister($src2$$reg));
16409   %}
16410   ins_pipe(vdop_fp64);
16411 %}
16412 
16413 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16414 %{
16415   predicate(n->as_Vector()->length() == 4);
16416   match(Set dst (SubVF src1 src2));
16417   ins_cost(INSN_COST);
16418   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16419   ins_encode %{
16420     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16421             as_FloatRegister($src1$$reg),
16422             as_FloatRegister($src2$$reg));
16423   %}
16424   ins_pipe(vdop_fp128);
16425 %}
16426 
16427 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16428 %{
16429   predicate(n->as_Vector()->length() == 2);
16430   match(Set dst (SubVD src1 src2));
16431   ins_cost(INSN_COST);
16432   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16433   ins_encode %{
16434     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16435             as_FloatRegister($src1$$reg),
16436             as_FloatRegister($src2$$reg));
16437   %}
16438   ins_pipe(vdop_fp128);
16439 %}
16440 
16441 // --------------------------------- MUL --------------------------------------
16442 
16443 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16444 %{
16445   predicate(n->as_Vector()->length() == 2 ||
16446             n->as_Vector()->length() == 4);
16447   match(Set dst (MulVS src1 src2));
16448   ins_cost(INSN_COST);
16449   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16450   ins_encode %{
16451     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16452             as_FloatRegister($src1$$reg),
16453             as_FloatRegister($src2$$reg));
16454   %}
16455   ins_pipe(vmul64);
16456 %}
16457 
16458 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16459 %{
16460   predicate(n->as_Vector()->length() == 8);
16461   match(Set dst (MulVS src1 src2));
16462   ins_cost(INSN_COST);
16463   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16464   ins_encode %{
16465     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16466             as_FloatRegister($src1$$reg),
16467             as_FloatRegister($src2$$reg));
16468   %}
16469   ins_pipe(vmul128);
16470 %}
16471 
16472 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16473 %{
16474   predicate(n->as_Vector()->length() == 2);
16475   match(Set dst (MulVI src1 src2));
16476   ins_cost(INSN_COST);
16477   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16478   ins_encode %{
16479     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16480             as_FloatRegister($src1$$reg),
16481             as_FloatRegister($src2$$reg));
16482   %}
16483   ins_pipe(vmul64);
16484 %}
16485 
16486 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16487 %{
16488   predicate(n->as_Vector()->length() == 4);
16489   match(Set dst (MulVI src1 src2));
16490   ins_cost(INSN_COST);
16491   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16492   ins_encode %{
16493     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16494             as_FloatRegister($src1$$reg),
16495             as_FloatRegister($src2$$reg));
16496   %}
16497   ins_pipe(vmul128);
16498 %}
16499 
16500 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16501 %{
16502   predicate(n->as_Vector()->length() == 2);
16503   match(Set dst (MulVF src1 src2));
16504   ins_cost(INSN_COST);
16505   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16506   ins_encode %{
16507     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16508             as_FloatRegister($src1$$reg),
16509             as_FloatRegister($src2$$reg));
16510   %}
16511   ins_pipe(vmuldiv_fp64);
16512 %}
16513 
16514 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16515 %{
16516   predicate(n->as_Vector()->length() == 4);
16517   match(Set dst (MulVF src1 src2));
16518   ins_cost(INSN_COST);
16519   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16520   ins_encode %{
16521     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16522             as_FloatRegister($src1$$reg),
16523             as_FloatRegister($src2$$reg));
16524   %}
16525   ins_pipe(vmuldiv_fp128);
16526 %}
16527 
16528 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16529 %{
16530   predicate(n->as_Vector()->length() == 2);
16531   match(Set dst (MulVD src1 src2));
16532   ins_cost(INSN_COST);
16533   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16534   ins_encode %{
16535     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16536             as_FloatRegister($src1$$reg),
16537             as_FloatRegister($src2$$reg));
16538   %}
16539   ins_pipe(vmuldiv_fp128);
16540 %}
16541 
16542 // --------------------------------- MLA --------------------------------------
16543 
16544 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16545 %{
16546   predicate(n->as_Vector()->length() == 2 ||
16547             n->as_Vector()->length() == 4);
16548   match(Set dst (AddVS dst (MulVS src1 src2)));
16549   ins_cost(INSN_COST);
16550   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16551   ins_encode %{
16552     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16553             as_FloatRegister($src1$$reg),
16554             as_FloatRegister($src2$$reg));
16555   %}
16556   ins_pipe(vmla64);
16557 %}
16558 
16559 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16560 %{
16561   predicate(n->as_Vector()->length() == 8);
16562   match(Set dst (AddVS dst (MulVS src1 src2)));
16563   ins_cost(INSN_COST);
16564   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16565   ins_encode %{
16566     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16567             as_FloatRegister($src1$$reg),
16568             as_FloatRegister($src2$$reg));
16569   %}
16570   ins_pipe(vmla128);
16571 %}
16572 
16573 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16574 %{
16575   predicate(n->as_Vector()->length() == 2);
16576   match(Set dst (AddVI dst (MulVI src1 src2)));
16577   ins_cost(INSN_COST);
16578   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16579   ins_encode %{
16580     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16581             as_FloatRegister($src1$$reg),
16582             as_FloatRegister($src2$$reg));
16583   %}
16584   ins_pipe(vmla64);
16585 %}
16586 
16587 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16588 %{
16589   predicate(n->as_Vector()->length() == 4);
16590   match(Set dst (AddVI dst (MulVI src1 src2)));
16591   ins_cost(INSN_COST);
16592   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16593   ins_encode %{
16594     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16595             as_FloatRegister($src1$$reg),
16596             as_FloatRegister($src2$$reg));
16597   %}
16598   ins_pipe(vmla128);
16599 %}
16600 
16601 // dst + src1 * src2
16602 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16603   predicate(UseFMA && n->as_Vector()->length() == 2);
16604   match(Set dst (FmaVF  dst (Binary src1 src2)));
16605   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16606   ins_cost(INSN_COST);
16607   ins_encode %{
16608     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16609             as_FloatRegister($src1$$reg),
16610             as_FloatRegister($src2$$reg));
16611   %}
16612   ins_pipe(vmuldiv_fp64);
16613 %}
16614 
16615 // dst + src1 * src2
16616 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16617   predicate(UseFMA && n->as_Vector()->length() == 4);
16618   match(Set dst (FmaVF  dst (Binary src1 src2)));
16619   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16620   ins_cost(INSN_COST);
16621   ins_encode %{
16622     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16623             as_FloatRegister($src1$$reg),
16624             as_FloatRegister($src2$$reg));
16625   %}
16626   ins_pipe(vmuldiv_fp128);
16627 %}
16628 
16629 // dst + src1 * src2
16630 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16631   predicate(UseFMA && n->as_Vector()->length() == 2);
16632   match(Set dst (FmaVD  dst (Binary src1 src2)));
16633   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16634   ins_cost(INSN_COST);
16635   ins_encode %{
16636     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16637             as_FloatRegister($src1$$reg),
16638             as_FloatRegister($src2$$reg));
16639   %}
16640   ins_pipe(vmuldiv_fp128);
16641 %}
16642 
16643 // --------------------------------- MLS --------------------------------------
16644 
16645 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16646 %{
16647   predicate(n->as_Vector()->length() == 2 ||
16648             n->as_Vector()->length() == 4);
16649   match(Set dst (SubVS dst (MulVS src1 src2)));
16650   ins_cost(INSN_COST);
16651   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16652   ins_encode %{
16653     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16654             as_FloatRegister($src1$$reg),
16655             as_FloatRegister($src2$$reg));
16656   %}
16657   ins_pipe(vmla64);
16658 %}
16659 
16660 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16661 %{
16662   predicate(n->as_Vector()->length() == 8);
16663   match(Set dst (SubVS dst (MulVS src1 src2)));
16664   ins_cost(INSN_COST);
16665   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16666   ins_encode %{
16667     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16668             as_FloatRegister($src1$$reg),
16669             as_FloatRegister($src2$$reg));
16670   %}
16671   ins_pipe(vmla128);
16672 %}
16673 
16674 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16675 %{
16676   predicate(n->as_Vector()->length() == 2);
16677   match(Set dst (SubVI dst (MulVI src1 src2)));
16678   ins_cost(INSN_COST);
16679   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16680   ins_encode %{
16681     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16682             as_FloatRegister($src1$$reg),
16683             as_FloatRegister($src2$$reg));
16684   %}
16685   ins_pipe(vmla64);
16686 %}
16687 
16688 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16689 %{
16690   predicate(n->as_Vector()->length() == 4);
16691   match(Set dst (SubVI dst (MulVI src1 src2)));
16692   ins_cost(INSN_COST);
16693   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16694   ins_encode %{
16695     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16696             as_FloatRegister($src1$$reg),
16697             as_FloatRegister($src2$$reg));
16698   %}
16699   ins_pipe(vmla128);
16700 %}
16701 
16702 // dst - src1 * src2
16703 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16704   predicate(UseFMA && n->as_Vector()->length() == 2);
16705   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16706   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16707   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16708   ins_cost(INSN_COST);
16709   ins_encode %{
16710     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16711             as_FloatRegister($src1$$reg),
16712             as_FloatRegister($src2$$reg));
16713   %}
16714   ins_pipe(vmuldiv_fp64);
16715 %}
16716 
16717 // dst - src1 * src2
16718 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16719   predicate(UseFMA && n->as_Vector()->length() == 4);
16720   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16721   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16722   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16723   ins_cost(INSN_COST);
16724   ins_encode %{
16725     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16726             as_FloatRegister($src1$$reg),
16727             as_FloatRegister($src2$$reg));
16728   %}
16729   ins_pipe(vmuldiv_fp128);
16730 %}
16731 
16732 // dst - src1 * src2
16733 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16734   predicate(UseFMA && n->as_Vector()->length() == 2);
16735   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16736   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16737   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16738   ins_cost(INSN_COST);
16739   ins_encode %{
16740     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16741             as_FloatRegister($src1$$reg),
16742             as_FloatRegister($src2$$reg));
16743   %}
16744   ins_pipe(vmuldiv_fp128);
16745 %}
16746 
16747 // --------------------------------- DIV --------------------------------------
16748 
16749 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16750 %{
16751   predicate(n->as_Vector()->length() == 2);
16752   match(Set dst (DivVF src1 src2));
16753   ins_cost(INSN_COST);
16754   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16755   ins_encode %{
16756     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16757             as_FloatRegister($src1$$reg),
16758             as_FloatRegister($src2$$reg));
16759   %}
16760   ins_pipe(vmuldiv_fp64);
16761 %}
16762 
16763 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16764 %{
16765   predicate(n->as_Vector()->length() == 4);
16766   match(Set dst (DivVF src1 src2));
16767   ins_cost(INSN_COST);
16768   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16769   ins_encode %{
16770     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16771             as_FloatRegister($src1$$reg),
16772             as_FloatRegister($src2$$reg));
16773   %}
16774   ins_pipe(vmuldiv_fp128);
16775 %}
16776 
16777 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16778 %{
16779   predicate(n->as_Vector()->length() == 2);
16780   match(Set dst (DivVD src1 src2));
16781   ins_cost(INSN_COST);
16782   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16783   ins_encode %{
16784     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16785             as_FloatRegister($src1$$reg),
16786             as_FloatRegister($src2$$reg));
16787   %}
16788   ins_pipe(vmuldiv_fp128);
16789 %}
16790 
16791 // --------------------------------- SQRT -------------------------------------
16792 
16793 instruct vsqrt2D(vecX dst, vecX src)
16794 %{
16795   predicate(n->as_Vector()->length() == 2);
16796   match(Set dst (SqrtVD src));
16797   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16798   ins_encode %{
16799     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16800              as_FloatRegister($src$$reg));
16801   %}
16802   ins_pipe(vsqrt_fp128);
16803 %}
16804 
16805 // --------------------------------- ABS --------------------------------------
16806 
16807 instruct vabs2F(vecD dst, vecD src)
16808 %{
16809   predicate(n->as_Vector()->length() == 2);
16810   match(Set dst (AbsVF src));
16811   ins_cost(INSN_COST * 3);
16812   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16813   ins_encode %{
16814     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16815             as_FloatRegister($src$$reg));
16816   %}
16817   ins_pipe(vunop_fp64);
16818 %}
16819 
16820 instruct vabs4F(vecX dst, vecX src)
16821 %{
16822   predicate(n->as_Vector()->length() == 4);
16823   match(Set dst (AbsVF src));
16824   ins_cost(INSN_COST * 3);
16825   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16826   ins_encode %{
16827     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16828             as_FloatRegister($src$$reg));
16829   %}
16830   ins_pipe(vunop_fp128);
16831 %}
16832 
16833 instruct vabs2D(vecX dst, vecX src)
16834 %{
16835   predicate(n->as_Vector()->length() == 2);
16836   match(Set dst (AbsVD src));
16837   ins_cost(INSN_COST * 3);
16838   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16839   ins_encode %{
16840     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16841             as_FloatRegister($src$$reg));
16842   %}
16843   ins_pipe(vunop_fp128);
16844 %}
16845 
16846 // --------------------------------- NEG --------------------------------------
16847 
16848 instruct vneg2F(vecD dst, vecD src)
16849 %{
16850   predicate(n->as_Vector()->length() == 2);
16851   match(Set dst (NegVF src));
16852   ins_cost(INSN_COST * 3);
16853   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16854   ins_encode %{
16855     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16856             as_FloatRegister($src$$reg));
16857   %}
16858   ins_pipe(vunop_fp64);
16859 %}
16860 
16861 instruct vneg4F(vecX dst, vecX src)
16862 %{
16863   predicate(n->as_Vector()->length() == 4);
16864   match(Set dst (NegVF src));
16865   ins_cost(INSN_COST * 3);
16866   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16867   ins_encode %{
16868     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16869             as_FloatRegister($src$$reg));
16870   %}
16871   ins_pipe(vunop_fp128);
16872 %}
16873 
16874 instruct vneg2D(vecX dst, vecX src)
16875 %{
16876   predicate(n->as_Vector()->length() == 2);
16877   match(Set dst (NegVD src));
16878   ins_cost(INSN_COST * 3);
16879   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16880   ins_encode %{
16881     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16882             as_FloatRegister($src$$reg));
16883   %}
16884   ins_pipe(vunop_fp128);
16885 %}
16886 
16887 // --------------------------------- AND --------------------------------------
16888 
16889 instruct vand8B(vecD dst, vecD src1, vecD src2)
16890 %{
16891   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16892             n->as_Vector()->length_in_bytes() == 8);
16893   match(Set dst (AndV src1 src2));
16894   ins_cost(INSN_COST);
16895   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16896   ins_encode %{
16897     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16898             as_FloatRegister($src1$$reg),
16899             as_FloatRegister($src2$$reg));
16900   %}
16901   ins_pipe(vlogical64);
16902 %}
16903 
16904 instruct vand16B(vecX dst, vecX src1, vecX src2)
16905 %{
16906   predicate(n->as_Vector()->length_in_bytes() == 16);
16907   match(Set dst (AndV src1 src2));
16908   ins_cost(INSN_COST);
16909   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16910   ins_encode %{
16911     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16912             as_FloatRegister($src1$$reg),
16913             as_FloatRegister($src2$$reg));
16914   %}
16915   ins_pipe(vlogical128);
16916 %}
16917 
16918 // --------------------------------- OR ---------------------------------------
16919 
16920 instruct vor8B(vecD dst, vecD src1, vecD src2)
16921 %{
16922   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16923             n->as_Vector()->length_in_bytes() == 8);
16924   match(Set dst (OrV src1 src2));
16925   ins_cost(INSN_COST);
16926   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16927   ins_encode %{
16928     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16929             as_FloatRegister($src1$$reg),
16930             as_FloatRegister($src2$$reg));
16931   %}
16932   ins_pipe(vlogical64);
16933 %}
16934 
16935 instruct vor16B(vecX dst, vecX src1, vecX src2)
16936 %{
16937   predicate(n->as_Vector()->length_in_bytes() == 16);
16938   match(Set dst (OrV src1 src2));
16939   ins_cost(INSN_COST);
16940   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16941   ins_encode %{
16942     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16943             as_FloatRegister($src1$$reg),
16944             as_FloatRegister($src2$$reg));
16945   %}
16946   ins_pipe(vlogical128);
16947 %}
16948 
16949 // --------------------------------- XOR --------------------------------------
16950 
16951 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16952 %{
16953   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16954             n->as_Vector()->length_in_bytes() == 8);
16955   match(Set dst (XorV src1 src2));
16956   ins_cost(INSN_COST);
16957   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16958   ins_encode %{
16959     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16960             as_FloatRegister($src1$$reg),
16961             as_FloatRegister($src2$$reg));
16962   %}
16963   ins_pipe(vlogical64);
16964 %}
16965 
16966 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16967 %{
16968   predicate(n->as_Vector()->length_in_bytes() == 16);
16969   match(Set dst (XorV src1 src2));
16970   ins_cost(INSN_COST);
16971   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16972   ins_encode %{
16973     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16974             as_FloatRegister($src1$$reg),
16975             as_FloatRegister($src2$$reg));
16976   %}
16977   ins_pipe(vlogical128);
16978 %}
16979 
16980 // ------------------------------ Shift ---------------------------------------
16981 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
16982   predicate(n->as_Vector()->length_in_bytes() == 8);
16983   match(Set dst (LShiftCntV cnt));
16984   match(Set dst (RShiftCntV cnt));
16985   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
16986   ins_encode %{
16987     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
16988   %}
16989   ins_pipe(vdup_reg_reg64);
16990 %}
16991 
16992 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
16993   predicate(n->as_Vector()->length_in_bytes() == 16);
16994   match(Set dst (LShiftCntV cnt));
16995   match(Set dst (RShiftCntV cnt));
16996   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
16997   ins_encode %{
16998     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16999   %}
17000   ins_pipe(vdup_reg_reg128);
17001 %}
17002 
17003 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17004   predicate(n->as_Vector()->length() == 4 ||
17005             n->as_Vector()->length() == 8);
17006   match(Set dst (LShiftVB src shift));
17007   ins_cost(INSN_COST);
17008   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17009   ins_encode %{
17010     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17011             as_FloatRegister($src$$reg),
17012             as_FloatRegister($shift$$reg));
17013   %}
17014   ins_pipe(vshift64);
17015 %}
17016 
17017 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17018   predicate(n->as_Vector()->length() == 16);
17019   match(Set dst (LShiftVB src shift));
17020   ins_cost(INSN_COST);
17021   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17022   ins_encode %{
17023     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17024             as_FloatRegister($src$$reg),
17025             as_FloatRegister($shift$$reg));
17026   %}
17027   ins_pipe(vshift128);
17028 %}
17029 
17030 // Right shifts with vector shift count on aarch64 SIMD are implemented
17031 // as left shift by negative shift count.
17032 // There are two cases for vector shift count.
17033 //
17034 // Case 1: The vector shift count is from replication.
17035 //        |            |
17036 //    LoadVector  RShiftCntV
17037 //        |       /
17038 //     RShiftVI
17039 // Note: In inner loop, multiple neg instructions are used, which can be
17040 // moved to outer loop and merge into one neg instruction.
17041 //
17042 // Case 2: The vector shift count is from loading.
17043 // This case isn't supported by middle-end now. But it's supported by
17044 // panama/vectorIntrinsics(JEP 338: Vector API).
17045 //        |            |
17046 //    LoadVector  LoadVector
17047 //        |       /
17048 //     RShiftVI
17049 //
17050 
17051 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17052   predicate(n->as_Vector()->length() == 4 ||
17053             n->as_Vector()->length() == 8);
17054   match(Set dst (RShiftVB src shift));
17055   ins_cost(INSN_COST);
17056   effect(TEMP tmp);
17057   format %{ "negr  $tmp,$shift\t"
17058             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17059   ins_encode %{
17060     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17061             as_FloatRegister($shift$$reg));
17062     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17063             as_FloatRegister($src$$reg),
17064             as_FloatRegister($tmp$$reg));
17065   %}
17066   ins_pipe(vshift64);
17067 %}
17068 
17069 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17070   predicate(n->as_Vector()->length() == 16);
17071   match(Set dst (RShiftVB src shift));
17072   ins_cost(INSN_COST);
17073   effect(TEMP tmp);
17074   format %{ "negr  $tmp,$shift\t"
17075             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17076   ins_encode %{
17077     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17078             as_FloatRegister($shift$$reg));
17079     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17080             as_FloatRegister($src$$reg),
17081             as_FloatRegister($tmp$$reg));
17082   %}
17083   ins_pipe(vshift128);
17084 %}
17085 
17086 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17087   predicate(n->as_Vector()->length() == 4 ||
17088             n->as_Vector()->length() == 8);
17089   match(Set dst (URShiftVB src shift));
17090   ins_cost(INSN_COST);
17091   effect(TEMP tmp);
17092   format %{ "negr  $tmp,$shift\t"
17093             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17094   ins_encode %{
17095     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17096             as_FloatRegister($shift$$reg));
17097     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17098             as_FloatRegister($src$$reg),
17099             as_FloatRegister($tmp$$reg));
17100   %}
17101   ins_pipe(vshift64);
17102 %}
17103 
17104 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17105   predicate(n->as_Vector()->length() == 16);
17106   match(Set dst (URShiftVB src shift));
17107   ins_cost(INSN_COST);
17108   effect(TEMP tmp);
17109   format %{ "negr  $tmp,$shift\t"
17110             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17111   ins_encode %{
17112     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17113             as_FloatRegister($shift$$reg));
17114     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17115             as_FloatRegister($src$$reg),
17116             as_FloatRegister($tmp$$reg));
17117   %}
17118   ins_pipe(vshift128);
17119 %}
17120 
17121 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17122   predicate(n->as_Vector()->length() == 4 ||
17123             n->as_Vector()->length() == 8);
17124   match(Set dst (LShiftVB src shift));
17125   ins_cost(INSN_COST);
17126   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17127   ins_encode %{
17128     int sh = (int)$shift$$constant;
17129     if (sh >= 8) {
17130       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17131              as_FloatRegister($src$$reg),
17132              as_FloatRegister($src$$reg));
17133     } else {
17134       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17135              as_FloatRegister($src$$reg), sh);
17136     }
17137   %}
17138   ins_pipe(vshift64_imm);
17139 %}
17140 
17141 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17142   predicate(n->as_Vector()->length() == 16);
17143   match(Set dst (LShiftVB src shift));
17144   ins_cost(INSN_COST);
17145   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17146   ins_encode %{
17147     int sh = (int)$shift$$constant;
17148     if (sh >= 8) {
17149       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17150              as_FloatRegister($src$$reg),
17151              as_FloatRegister($src$$reg));
17152     } else {
17153       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17154              as_FloatRegister($src$$reg), sh);
17155     }
17156   %}
17157   ins_pipe(vshift128_imm);
17158 %}
17159 
17160 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17161   predicate(n->as_Vector()->length() == 4 ||
17162             n->as_Vector()->length() == 8);
17163   match(Set dst (RShiftVB src shift));
17164   ins_cost(INSN_COST);
17165   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17166   ins_encode %{
17167     int sh = (int)$shift$$constant;
17168     if (sh >= 8) sh = 7;
17169     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17170            as_FloatRegister($src$$reg), sh);
17171   %}
17172   ins_pipe(vshift64_imm);
17173 %}
17174 
17175 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17176   predicate(n->as_Vector()->length() == 16);
17177   match(Set dst (RShiftVB src shift));
17178   ins_cost(INSN_COST);
17179   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17180   ins_encode %{
17181     int sh = (int)$shift$$constant;
17182     if (sh >= 8) sh = 7;
17183     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17184            as_FloatRegister($src$$reg), sh);
17185   %}
17186   ins_pipe(vshift128_imm);
17187 %}
17188 
17189 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17190   predicate(n->as_Vector()->length() == 4 ||
17191             n->as_Vector()->length() == 8);
17192   match(Set dst (URShiftVB src shift));
17193   ins_cost(INSN_COST);
17194   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17195   ins_encode %{
17196     int sh = (int)$shift$$constant;
17197     if (sh >= 8) {
17198       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17199              as_FloatRegister($src$$reg),
17200              as_FloatRegister($src$$reg));
17201     } else {
17202       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17203              as_FloatRegister($src$$reg), sh);
17204     }
17205   %}
17206   ins_pipe(vshift64_imm);
17207 %}
17208 
17209 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17210   predicate(n->as_Vector()->length() == 16);
17211   match(Set dst (URShiftVB src shift));
17212   ins_cost(INSN_COST);
17213   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17214   ins_encode %{
17215     int sh = (int)$shift$$constant;
17216     if (sh >= 8) {
17217       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17218              as_FloatRegister($src$$reg),
17219              as_FloatRegister($src$$reg));
17220     } else {
17221       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17222              as_FloatRegister($src$$reg), sh);
17223     }
17224   %}
17225   ins_pipe(vshift128_imm);
17226 %}
17227 
17228 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17229   predicate(n->as_Vector()->length() == 2 ||
17230             n->as_Vector()->length() == 4);
17231   match(Set dst (LShiftVS src shift));
17232   ins_cost(INSN_COST);
17233   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17234   ins_encode %{
17235     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17236             as_FloatRegister($src$$reg),
17237             as_FloatRegister($shift$$reg));
17238   %}
17239   ins_pipe(vshift64);
17240 %}
17241 
17242 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17243   predicate(n->as_Vector()->length() == 8);
17244   match(Set dst (LShiftVS src shift));
17245   ins_cost(INSN_COST);
17246   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17247   ins_encode %{
17248     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17249             as_FloatRegister($src$$reg),
17250             as_FloatRegister($shift$$reg));
17251   %}
17252   ins_pipe(vshift128);
17253 %}
17254 
17255 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17256   predicate(n->as_Vector()->length() == 2 ||
17257             n->as_Vector()->length() == 4);
17258   match(Set dst (RShiftVS src shift));
17259   ins_cost(INSN_COST);
17260   effect(TEMP tmp);
17261   format %{ "negr  $tmp,$shift\t"
17262             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17263   ins_encode %{
17264     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17265             as_FloatRegister($shift$$reg));
17266     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17267             as_FloatRegister($src$$reg),
17268             as_FloatRegister($tmp$$reg));
17269   %}
17270   ins_pipe(vshift64);
17271 %}
17272 
17273 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17274   predicate(n->as_Vector()->length() == 8);
17275   match(Set dst (RShiftVS src shift));
17276   ins_cost(INSN_COST);
17277   effect(TEMP tmp);
17278   format %{ "negr  $tmp,$shift\t"
17279             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17280   ins_encode %{
17281     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17282             as_FloatRegister($shift$$reg));
17283     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17284             as_FloatRegister($src$$reg),
17285             as_FloatRegister($tmp$$reg));
17286   %}
17287   ins_pipe(vshift128);
17288 %}
17289 
17290 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17291   predicate(n->as_Vector()->length() == 2 ||
17292             n->as_Vector()->length() == 4);
17293   match(Set dst (URShiftVS src shift));
17294   ins_cost(INSN_COST);
17295   effect(TEMP tmp);
17296   format %{ "negr  $tmp,$shift\t"
17297             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17298   ins_encode %{
17299     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17300             as_FloatRegister($shift$$reg));
17301     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17302             as_FloatRegister($src$$reg),
17303             as_FloatRegister($tmp$$reg));
17304   %}
17305   ins_pipe(vshift64);
17306 %}
17307 
17308 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17309   predicate(n->as_Vector()->length() == 8);
17310   match(Set dst (URShiftVS src shift));
17311   ins_cost(INSN_COST);
17312   effect(TEMP tmp);
17313   format %{ "negr  $tmp,$shift\t"
17314             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17315   ins_encode %{
17316     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17317             as_FloatRegister($shift$$reg));
17318     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17319             as_FloatRegister($src$$reg),
17320             as_FloatRegister($tmp$$reg));
17321   %}
17322   ins_pipe(vshift128);
17323 %}
17324 
17325 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17326   predicate(n->as_Vector()->length() == 2 ||
17327             n->as_Vector()->length() == 4);
17328   match(Set dst (LShiftVS src shift));
17329   ins_cost(INSN_COST);
17330   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17331   ins_encode %{
17332     int sh = (int)$shift$$constant;
17333     if (sh >= 16) {
17334       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17335              as_FloatRegister($src$$reg),
17336              as_FloatRegister($src$$reg));
17337     } else {
17338       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17339              as_FloatRegister($src$$reg), sh);
17340     }
17341   %}
17342   ins_pipe(vshift64_imm);
17343 %}
17344 
17345 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17346   predicate(n->as_Vector()->length() == 8);
17347   match(Set dst (LShiftVS src shift));
17348   ins_cost(INSN_COST);
17349   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17350   ins_encode %{
17351     int sh = (int)$shift$$constant;
17352     if (sh >= 16) {
17353       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17354              as_FloatRegister($src$$reg),
17355              as_FloatRegister($src$$reg));
17356     } else {
17357       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17358              as_FloatRegister($src$$reg), sh);
17359     }
17360   %}
17361   ins_pipe(vshift128_imm);
17362 %}
17363 
17364 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17365   predicate(n->as_Vector()->length() == 2 ||
17366             n->as_Vector()->length() == 4);
17367   match(Set dst (RShiftVS src shift));
17368   ins_cost(INSN_COST);
17369   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17370   ins_encode %{
17371     int sh = (int)$shift$$constant;
17372     if (sh >= 16) sh = 15;
17373     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17374            as_FloatRegister($src$$reg), sh);
17375   %}
17376   ins_pipe(vshift64_imm);
17377 %}
17378 
17379 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17380   predicate(n->as_Vector()->length() == 8);
17381   match(Set dst (RShiftVS src shift));
17382   ins_cost(INSN_COST);
17383   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17384   ins_encode %{
17385     int sh = (int)$shift$$constant;
17386     if (sh >= 16) sh = 15;
17387     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17388            as_FloatRegister($src$$reg), sh);
17389   %}
17390   ins_pipe(vshift128_imm);
17391 %}
17392 
17393 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17394   predicate(n->as_Vector()->length() == 2 ||
17395             n->as_Vector()->length() == 4);
17396   match(Set dst (URShiftVS src shift));
17397   ins_cost(INSN_COST);
17398   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17399   ins_encode %{
17400     int sh = (int)$shift$$constant;
17401     if (sh >= 16) {
17402       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17403              as_FloatRegister($src$$reg),
17404              as_FloatRegister($src$$reg));
17405     } else {
17406       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17407              as_FloatRegister($src$$reg), sh);
17408     }
17409   %}
17410   ins_pipe(vshift64_imm);
17411 %}
17412 
17413 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17414   predicate(n->as_Vector()->length() == 8);
17415   match(Set dst (URShiftVS src shift));
17416   ins_cost(INSN_COST);
17417   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17418   ins_encode %{
17419     int sh = (int)$shift$$constant;
17420     if (sh >= 16) {
17421       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17422              as_FloatRegister($src$$reg),
17423              as_FloatRegister($src$$reg));
17424     } else {
17425       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17426              as_FloatRegister($src$$reg), sh);
17427     }
17428   %}
17429   ins_pipe(vshift128_imm);
17430 %}
17431 
17432 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17433   predicate(n->as_Vector()->length() == 2);
17434   match(Set dst (LShiftVI src shift));
17435   ins_cost(INSN_COST);
17436   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17437   ins_encode %{
17438     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17439             as_FloatRegister($src$$reg),
17440             as_FloatRegister($shift$$reg));
17441   %}
17442   ins_pipe(vshift64);
17443 %}
17444 
17445 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17446   predicate(n->as_Vector()->length() == 4);
17447   match(Set dst (LShiftVI src shift));
17448   ins_cost(INSN_COST);
17449   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17450   ins_encode %{
17451     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17452             as_FloatRegister($src$$reg),
17453             as_FloatRegister($shift$$reg));
17454   %}
17455   ins_pipe(vshift128);
17456 %}
17457 
17458 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17459   predicate(n->as_Vector()->length() == 2);
17460   match(Set dst (RShiftVI src shift));
17461   ins_cost(INSN_COST);
17462   effect(TEMP tmp);
17463   format %{ "negr  $tmp,$shift\t"
17464             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17465   ins_encode %{
17466     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17467             as_FloatRegister($shift$$reg));
17468     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17469             as_FloatRegister($src$$reg),
17470             as_FloatRegister($tmp$$reg));
17471   %}
17472   ins_pipe(vshift64);
17473 %}
17474 
17475 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17476   predicate(n->as_Vector()->length() == 4);
17477   match(Set dst (RShiftVI src shift));
17478   ins_cost(INSN_COST);
17479   effect(TEMP tmp);
17480   format %{ "negr  $tmp,$shift\t"
17481             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17482   ins_encode %{
17483     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17484             as_FloatRegister($shift$$reg));
17485     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17486             as_FloatRegister($src$$reg),
17487             as_FloatRegister($tmp$$reg));
17488   %}
17489   ins_pipe(vshift128);
17490 %}
17491 
17492 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17493   predicate(n->as_Vector()->length() == 2);
17494   match(Set dst (URShiftVI src shift));
17495   ins_cost(INSN_COST);
17496   effect(TEMP tmp);
17497   format %{ "negr  $tmp,$shift\t"
17498             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17499   ins_encode %{
17500     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17501             as_FloatRegister($shift$$reg));
17502     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17503             as_FloatRegister($src$$reg),
17504             as_FloatRegister($tmp$$reg));
17505   %}
17506   ins_pipe(vshift64);
17507 %}
17508 
17509 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17510   predicate(n->as_Vector()->length() == 4);
17511   match(Set dst (URShiftVI src shift));
17512   ins_cost(INSN_COST);
17513   effect(TEMP tmp);
17514   format %{ "negr  $tmp,$shift\t"
17515             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17516   ins_encode %{
17517     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17518             as_FloatRegister($shift$$reg));
17519     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17520             as_FloatRegister($src$$reg),
17521             as_FloatRegister($tmp$$reg));
17522   %}
17523   ins_pipe(vshift128);
17524 %}
17525 
17526 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17527   predicate(n->as_Vector()->length() == 2);
17528   match(Set dst (LShiftVI src shift));
17529   ins_cost(INSN_COST);
17530   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17531   ins_encode %{
17532     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17533            as_FloatRegister($src$$reg),
17534            (int)$shift$$constant);
17535   %}
17536   ins_pipe(vshift64_imm);
17537 %}
17538 
17539 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17540   predicate(n->as_Vector()->length() == 4);
17541   match(Set dst (LShiftVI src shift));
17542   ins_cost(INSN_COST);
17543   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17544   ins_encode %{
17545     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17546            as_FloatRegister($src$$reg),
17547            (int)$shift$$constant);
17548   %}
17549   ins_pipe(vshift128_imm);
17550 %}
17551 
17552 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17553   predicate(n->as_Vector()->length() == 2);
17554   match(Set dst (RShiftVI src shift));
17555   ins_cost(INSN_COST);
17556   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17557   ins_encode %{
17558     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17559             as_FloatRegister($src$$reg),
17560             (int)$shift$$constant);
17561   %}
17562   ins_pipe(vshift64_imm);
17563 %}
17564 
17565 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17566   predicate(n->as_Vector()->length() == 4);
17567   match(Set dst (RShiftVI src shift));
17568   ins_cost(INSN_COST);
17569   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17570   ins_encode %{
17571     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17572             as_FloatRegister($src$$reg),
17573             (int)$shift$$constant);
17574   %}
17575   ins_pipe(vshift128_imm);
17576 %}
17577 
17578 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17579   predicate(n->as_Vector()->length() == 2);
17580   match(Set dst (URShiftVI src shift));
17581   ins_cost(INSN_COST);
17582   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17583   ins_encode %{
17584     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17585             as_FloatRegister($src$$reg),
17586             (int)$shift$$constant);
17587   %}
17588   ins_pipe(vshift64_imm);
17589 %}
17590 
17591 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17592   predicate(n->as_Vector()->length() == 4);
17593   match(Set dst (URShiftVI src shift));
17594   ins_cost(INSN_COST);
17595   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17596   ins_encode %{
17597     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17598             as_FloatRegister($src$$reg),
17599             (int)$shift$$constant);
17600   %}
17601   ins_pipe(vshift128_imm);
17602 %}
17603 
17604 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17605   predicate(n->as_Vector()->length() == 2);
17606   match(Set dst (LShiftVL src shift));
17607   ins_cost(INSN_COST);
17608   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17609   ins_encode %{
17610     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17611             as_FloatRegister($src$$reg),
17612             as_FloatRegister($shift$$reg));
17613   %}
17614   ins_pipe(vshift128);
17615 %}
17616 
17617 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17618   predicate(n->as_Vector()->length() == 2);
17619   match(Set dst (RShiftVL src shift));
17620   ins_cost(INSN_COST);
17621   effect(TEMP tmp);
17622   format %{ "negr  $tmp,$shift\t"
17623             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17624   ins_encode %{
17625     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17626             as_FloatRegister($shift$$reg));
17627     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17628             as_FloatRegister($src$$reg),
17629             as_FloatRegister($tmp$$reg));
17630   %}
17631   ins_pipe(vshift128);
17632 %}
17633 
17634 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17635   predicate(n->as_Vector()->length() == 2);
17636   match(Set dst (URShiftVL src shift));
17637   ins_cost(INSN_COST);
17638   effect(TEMP tmp);
17639   format %{ "negr  $tmp,$shift\t"
17640             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17641   ins_encode %{
17642     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17643             as_FloatRegister($shift$$reg));
17644     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17645             as_FloatRegister($src$$reg),
17646             as_FloatRegister($tmp$$reg));
17647   %}
17648   ins_pipe(vshift128);
17649 %}
17650 
17651 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17652   predicate(n->as_Vector()->length() == 2);
17653   match(Set dst (LShiftVL src shift));
17654   ins_cost(INSN_COST);
17655   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17656   ins_encode %{
17657     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17658            as_FloatRegister($src$$reg),
17659            (int)$shift$$constant);
17660   %}
17661   ins_pipe(vshift128_imm);
17662 %}
17663 
17664 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17665   predicate(n->as_Vector()->length() == 2);
17666   match(Set dst (RShiftVL src shift));
17667   ins_cost(INSN_COST);
17668   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17669   ins_encode %{
17670     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17671             as_FloatRegister($src$$reg),
17672             (int)$shift$$constant);
17673   %}
17674   ins_pipe(vshift128_imm);
17675 %}
17676 
17677 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17678   predicate(n->as_Vector()->length() == 2);
17679   match(Set dst (URShiftVL src shift));
17680   ins_cost(INSN_COST);
17681   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17682   ins_encode %{
17683     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17684             as_FloatRegister($src$$reg),
17685             (int)$shift$$constant);
17686   %}
17687   ins_pipe(vshift128_imm);
17688 %}
17689 
17690 instruct vmax2F(vecD dst, vecD src1, vecD src2)
17691 %{
17692   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17693   match(Set dst (MaxV src1 src2));
17694   ins_cost(INSN_COST);
17695   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
17696   ins_encode %{
17697     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
17698             as_FloatRegister($src1$$reg),
17699             as_FloatRegister($src2$$reg));
17700   %}
17701   ins_pipe(vdop_fp64);
17702 %}
17703 
17704 instruct vmax4F(vecX dst, vecX src1, vecX src2)
17705 %{
17706   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17707   match(Set dst (MaxV src1 src2));
17708   ins_cost(INSN_COST);
17709   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
17710   ins_encode %{
17711     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
17712             as_FloatRegister($src1$$reg),
17713             as_FloatRegister($src2$$reg));
17714   %}
17715   ins_pipe(vdop_fp128);
17716 %}
17717 
17718 instruct vmax2D(vecX dst, vecX src1, vecX src2)
17719 %{
17720   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17721   match(Set dst (MaxV src1 src2));
17722   ins_cost(INSN_COST);
17723   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
17724   ins_encode %{
17725     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
17726             as_FloatRegister($src1$$reg),
17727             as_FloatRegister($src2$$reg));
17728   %}
17729   ins_pipe(vdop_fp128);
17730 %}
17731 
17732 instruct vmin2F(vecD dst, vecD src1, vecD src2)
17733 %{
17734   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17735   match(Set dst (MinV src1 src2));
17736   ins_cost(INSN_COST);
17737   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
17738   ins_encode %{
17739     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
17740             as_FloatRegister($src1$$reg),
17741             as_FloatRegister($src2$$reg));
17742   %}
17743   ins_pipe(vdop_fp64);
17744 %}
17745 
17746 instruct vmin4F(vecX dst, vecX src1, vecX src2)
17747 %{
17748   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17749   match(Set dst (MinV src1 src2));
17750   ins_cost(INSN_COST);
17751   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
17752   ins_encode %{
17753     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
17754             as_FloatRegister($src1$$reg),
17755             as_FloatRegister($src2$$reg));
17756   %}
17757   ins_pipe(vdop_fp128);
17758 %}
17759 
17760 instruct vmin2D(vecX dst, vecX src1, vecX src2)
17761 %{
17762   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17763   match(Set dst (MinV src1 src2));
17764   ins_cost(INSN_COST);
17765   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
17766   ins_encode %{
17767     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
17768             as_FloatRegister($src1$$reg),
17769             as_FloatRegister($src2$$reg));
17770   %}
17771   ins_pipe(vdop_fp128);
17772 %}
17773 
17774 //----------PEEPHOLE RULES-----------------------------------------------------
17775 // These must follow all instruction definitions as they use the names
17776 // defined in the instructions definitions.
17777 //
17778 // peepmatch ( root_instr_name [preceding_instruction]* );
17779 //
17780 // peepconstraint %{
17781 // (instruction_number.operand_name relational_op instruction_number.operand_name
17782 //  [, ...] );
17783 // // instruction numbers are zero-based using left to right order in peepmatch
17784 //
17785 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17786 // // provide an instruction_number.operand_name for each operand that appears
17787 // // in the replacement instruction's match rule
17788 //
17789 // ---------VM FLAGS---------------------------------------------------------
17790 //
17791 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17792 //
17793 // Each peephole rule is given an identifying number starting with zero and
17794 // increasing by one in the order seen by the parser.  An individual peephole
17795 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17796 // on the command-line.
17797 //
17798 // ---------CURRENT LIMITATIONS----------------------------------------------
17799 //
17800 // Only match adjacent instructions in same basic block
17801 // Only equality constraints
17802 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17803 // Only one replacement instruction
17804 //
17805 // ---------EXAMPLE----------------------------------------------------------
17806 //
17807 // // pertinent parts of existing instructions in architecture description
17808 // instruct movI(iRegINoSp dst, iRegI src)
17809 // %{
17810 //   match(Set dst (CopyI src));
17811 // %}
17812 //
17813 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17814 // %{
17815 //   match(Set dst (AddI dst src));
17816 //   effect(KILL cr);
17817 // %}
17818 //
17819 // // Change (inc mov) to lea
17820 // peephole %{
17821 //   // increment preceeded by register-register move
17822 //   peepmatch ( incI_iReg movI );
17823 //   // require that the destination register of the increment
17824 //   // match the destination register of the move
17825 //   peepconstraint ( 0.dst == 1.dst );
17826 //   // construct a replacement instruction that sets
17827 //   // the destination to ( move's source register + one )
17828 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17829 // %}
17830 //
17831 
17832 // Implementation no longer uses movX instructions since
17833 // machine-independent system no longer uses CopyX nodes.
17834 //
17835 // peephole
17836 // %{
17837 //   peepmatch (incI_iReg movI);
17838 //   peepconstraint (0.dst == 1.dst);
17839 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17840 // %}
17841 
17842 // peephole
17843 // %{
17844 //   peepmatch (decI_iReg movI);
17845 //   peepconstraint (0.dst == 1.dst);
17846 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17847 // %}
17848 
17849 // peephole
17850 // %{
17851 //   peepmatch (addI_iReg_imm movI);
17852 //   peepconstraint (0.dst == 1.dst);
17853 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17854 // %}
17855 
17856 // peephole
17857 // %{
17858 //   peepmatch (incL_iReg movL);
17859 //   peepconstraint (0.dst == 1.dst);
17860 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17861 // %}
17862 
17863 // peephole
17864 // %{
17865 //   peepmatch (decL_iReg movL);
17866 //   peepconstraint (0.dst == 1.dst);
17867 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17868 // %}
17869 
17870 // peephole
17871 // %{
17872 //   peepmatch (addL_iReg_imm movL);
17873 //   peepconstraint (0.dst == 1.dst);
17874 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17875 // %}
17876 
17877 // peephole
17878 // %{
17879 //   peepmatch (addP_iReg_imm movP);
17880 //   peepconstraint (0.dst == 1.dst);
17881 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17882 // %}
17883 
17884 // // Change load of spilled value to only a spill
17885 // instruct storeI(memory mem, iRegI src)
17886 // %{
17887 //   match(Set mem (StoreI mem src));
17888 // %}
17889 //
17890 // instruct loadI(iRegINoSp dst, memory mem)
17891 // %{
17892 //   match(Set dst (LoadI mem));
17893 // %}
17894 //
17895 
17896 //----------SMARTSPILL RULES---------------------------------------------------
17897 // These must follow all instruction definitions as they use the names
17898 // defined in the instructions definitions.
17899 
17900 // Local Variables:
17901 // mode: c++
17902 // End: