1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/barrierSetAssembler.hpp"
1000 #include "gc/shared/cardTable.hpp"
1001 #include "gc/shared/cardTableBarrierSet.hpp"
1002 #include "gc/shared/collectedHeap.hpp"
1003 #include "opto/addnode.hpp"
1004 
1005 class CallStubImpl {
1006 
1007   //--------------------------------------------------------------
1008   //---<  Used for optimization in Compile::shorten_branches  >---
1009   //--------------------------------------------------------------
1010 
1011  public:
1012   // Size of call trampoline stub.
1013   static uint size_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 
1017   // number of relocations needed by a call trampoline stub
1018   static uint reloc_call_trampoline() {
1019     return 0; // no call trampolines on this platform
1020   }
1021 };
1022 
1023 class HandlerImpl {
1024 
1025  public:
1026 
1027   static int emit_exception_handler(CodeBuffer &cbuf);
1028   static int emit_deopt_handler(CodeBuffer& cbuf);
1029 
1030   static uint size_exception_handler() {
1031     return MacroAssembler::far_branch_size();
1032   }
1033 
1034   static uint size_deopt_handler() {
1035     // count one adr and one far branch instruction
1036     return 4 * NativeInstruction::instruction_size;
1037   }
1038 };
1039 
1040  bool is_CAS(int opcode, bool maybe_volatile);
1041 
1042   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1043 
1044   bool unnecessary_acquire(const Node *barrier);
1045   bool needs_acquiring_load(const Node *load);
1046 
1047   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1048 
1049   bool unnecessary_release(const Node *barrier);
1050   bool unnecessary_volatile(const Node *barrier);
1051   bool needs_releasing_store(const Node *store);
1052 
1053   // predicate controlling translation of CompareAndSwapX
1054   bool needs_acquiring_load_exclusive(const Node *load);
1055 
1056   // predicate controlling translation of StoreCM
1057   bool unnecessary_storestore(const Node *storecm);
1058 
1059   // predicate controlling addressing modes
1060   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1061 %}
1062 
1063 source %{
1064 
1065   // Optimizaton of volatile gets and puts
1066   // -------------------------------------
1067   //
1068   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1069   // use to implement volatile reads and writes. For a volatile read
1070   // we simply need
1071   //
1072   //   ldar<x>
1073   //
1074   // and for a volatile write we need
1075   //
1076   //   stlr<x>
1077   //
1078   // Alternatively, we can implement them by pairing a normal
1079   // load/store with a memory barrier. For a volatile read we need
1080   //
1081   //   ldr<x>
1082   //   dmb ishld
1083   //
1084   // for a volatile write
1085   //
1086   //   dmb ish
1087   //   str<x>
1088   //   dmb ish
1089   //
1090   // We can also use ldaxr and stlxr to implement compare and swap CAS
1091   // sequences. These are normally translated to an instruction
1092   // sequence like the following
1093   //
1094   //   dmb      ish
1095   // retry:
1096   //   ldxr<x>   rval raddr
1097   //   cmp       rval rold
1098   //   b.ne done
1099   //   stlxr<x>  rval, rnew, rold
1100   //   cbnz      rval retry
1101   // done:
1102   //   cset      r0, eq
1103   //   dmb ishld
1104   //
1105   // Note that the exclusive store is already using an stlxr
1106   // instruction. That is required to ensure visibility to other
1107   // threads of the exclusive write (assuming it succeeds) before that
1108   // of any subsequent writes.
1109   //
1110   // The following instruction sequence is an improvement on the above
1111   //
1112   // retry:
1113   //   ldaxr<x>  rval raddr
1114   //   cmp       rval rold
1115   //   b.ne done
1116   //   stlxr<x>  rval, rnew, rold
1117   //   cbnz      rval retry
1118   // done:
1119   //   cset      r0, eq
1120   //
1121   // We don't need the leading dmb ish since the stlxr guarantees
1122   // visibility of prior writes in the case that the swap is
1123   // successful. Crucially we don't have to worry about the case where
1124   // the swap is not successful since no valid program should be
1125   // relying on visibility of prior changes by the attempting thread
1126   // in the case where the CAS fails.
1127   //
1128   // Similarly, we don't need the trailing dmb ishld if we substitute
1129   // an ldaxr instruction since that will provide all the guarantees we
1130   // require regarding observation of changes made by other threads
1131   // before any change to the CAS address observed by the load.
1132   //
1133   // In order to generate the desired instruction sequence we need to
1134   // be able to identify specific 'signature' ideal graph node
1135   // sequences which i) occur as a translation of a volatile reads or
1136   // writes or CAS operations and ii) do not occur through any other
1137   // translation or graph transformation. We can then provide
1138   // alternative aldc matching rules which translate these node
1139   // sequences to the desired machine code sequences. Selection of the
1140   // alternative rules can be implemented by predicates which identify
1141   // the relevant node sequences.
1142   //
1143   // The ideal graph generator translates a volatile read to the node
1144   // sequence
1145   //
1146   //   LoadX[mo_acquire]
1147   //   MemBarAcquire
1148   //
1149   // As a special case when using the compressed oops optimization we
1150   // may also see this variant
1151   //
1152   //   LoadN[mo_acquire]
1153   //   DecodeN
1154   //   MemBarAcquire
1155   //
1156   // A volatile write is translated to the node sequence
1157   //
1158   //   MemBarRelease
1159   //   StoreX[mo_release] {CardMark}-optional
1160   //   MemBarVolatile
1161   //
1162   // n.b. the above node patterns are generated with a strict
1163   // 'signature' configuration of input and output dependencies (see
1164   // the predicates below for exact details). The card mark may be as
1165   // simple as a few extra nodes or, in a few GC configurations, may
1166   // include more complex control flow between the leading and
1167   // trailing memory barriers. However, whatever the card mark
1168   // configuration these signatures are unique to translated volatile
1169   // reads/stores -- they will not appear as a result of any other
1170   // bytecode translation or inlining nor as a consequence of
1171   // optimizing transforms.
1172   //
1173   // We also want to catch inlined unsafe volatile gets and puts and
1174   // be able to implement them using either ldar<x>/stlr<x> or some
1175   // combination of ldr<x>/stlr<x> and dmb instructions.
1176   //
1177   // Inlined unsafe volatiles puts manifest as a minor variant of the
1178   // normal volatile put node sequence containing an extra cpuorder
1179   // membar
1180   //
1181   //   MemBarRelease
1182   //   MemBarCPUOrder
1183   //   StoreX[mo_release] {CardMark}-optional
1184   //   MemBarCPUOrder
1185   //   MemBarVolatile
1186   //
1187   // n.b. as an aside, a cpuorder membar is not itself subject to
1188   // matching and translation by adlc rules.  However, the rule
1189   // predicates need to detect its presence in order to correctly
1190   // select the desired adlc rules.
1191   //
1192   // Inlined unsafe volatile gets manifest as a slightly different
1193   // node sequence to a normal volatile get because of the
1194   // introduction of some CPUOrder memory barriers to bracket the
1195   // Load. However, but the same basic skeleton of a LoadX feeding a
1196   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1197   // present
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarCPUOrder LoadX[mo_acquire]
1202   //        ||            |
1203   //        ||       {DecodeN} optional
1204   //        ||       /
1205   //     MemBarAcquire
1206   //
1207   // In this case the acquire membar does not directly depend on the
1208   // load. However, we can be sure that the load is generated from an
1209   // inlined unsafe volatile get if we see it dependent on this unique
1210   // sequence of membar nodes. Similarly, given an acquire membar we
1211   // can know that it was added because of an inlined unsafe volatile
1212   // get if it is fed and feeds a cpuorder membar and if its feed
1213   // membar also feeds an acquiring load.
1214   //
1215   // Finally an inlined (Unsafe) CAS operation is translated to the
1216   // following ideal graph
1217   //
1218   //   MemBarRelease
1219   //   MemBarCPUOrder
1220   //   CompareAndSwapX {CardMark}-optional
1221   //   MemBarCPUOrder
1222   //   MemBarAcquire
1223   //
1224   // So, where we can identify these volatile read and write
1225   // signatures we can choose to plant either of the above two code
1226   // sequences. For a volatile read we can simply plant a normal
1227   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1228   // also choose to inhibit translation of the MemBarAcquire and
1229   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1230   //
1231   // When we recognise a volatile store signature we can choose to
1232   // plant at a dmb ish as a translation for the MemBarRelease, a
1233   // normal str<x> and then a dmb ish for the MemBarVolatile.
1234   // Alternatively, we can inhibit translation of the MemBarRelease
1235   // and MemBarVolatile and instead plant a simple stlr<x>
1236   // instruction.
1237   //
1238   // when we recognise a CAS signature we can choose to plant a dmb
1239   // ish as a translation for the MemBarRelease, the conventional
1240   // macro-instruction sequence for the CompareAndSwap node (which
1241   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1242   // Alternatively, we can elide generation of the dmb instructions
1243   // and plant the alternative CompareAndSwap macro-instruction
1244   // sequence (which uses ldaxr<x>).
1245   //
1246   // Of course, the above only applies when we see these signature
1247   // configurations. We still want to plant dmb instructions in any
1248   // other cases where we may see a MemBarAcquire, MemBarRelease or
1249   // MemBarVolatile. For example, at the end of a constructor which
1250   // writes final/volatile fields we will see a MemBarRelease
1251   // instruction and this needs a 'dmb ish' lest we risk the
1252   // constructed object being visible without making the
1253   // final/volatile field writes visible.
1254   //
1255   // n.b. the translation rules below which rely on detection of the
1256   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1257   // If we see anything other than the signature configurations we
1258   // always just translate the loads and stores to ldr<x> and str<x>
1259   // and translate acquire, release and volatile membars to the
1260   // relevant dmb instructions.
1261   //
1262 
1263   // is_CAS(int opcode, bool maybe_volatile)
1264   //
1265   // return true if opcode is one of the possible CompareAndSwapX
1266   // values otherwise false.
1267 
1268   bool is_CAS(int opcode, bool maybe_volatile)
1269   {
1270     switch(opcode) {
1271       // We handle these
1272     case Op_CompareAndSwapI:
1273     case Op_CompareAndSwapL:
1274     case Op_CompareAndSwapP:
1275     case Op_CompareAndSwapN:
1276     case Op_CompareAndSwapB:
1277     case Op_CompareAndSwapS:
1278     case Op_GetAndSetI:
1279     case Op_GetAndSetL:
1280     case Op_GetAndSetP:
1281     case Op_GetAndSetN:
1282     case Op_GetAndAddI:
1283     case Op_GetAndAddL:
1284 #if INCLUDE_SHENANDOAHGC
1285     case Op_ShenandoahCompareAndSwapP:
1286     case Op_ShenandoahCompareAndSwapN:
1287 #endif
1288       return true;
1289     case Op_CompareAndExchangeI:
1290     case Op_CompareAndExchangeN:
1291     case Op_CompareAndExchangeB:
1292     case Op_CompareAndExchangeS:
1293     case Op_CompareAndExchangeL:
1294     case Op_CompareAndExchangeP:
1295     case Op_WeakCompareAndSwapB:
1296     case Op_WeakCompareAndSwapS:
1297     case Op_WeakCompareAndSwapI:
1298     case Op_WeakCompareAndSwapL:
1299     case Op_WeakCompareAndSwapP:
1300     case Op_WeakCompareAndSwapN:
1301       return maybe_volatile;
1302     default:
1303       return false;
1304     }
1305   }
1306 
1307   // helper to determine the maximum number of Phi nodes we may need to
1308   // traverse when searching from a card mark membar for the merge mem
1309   // feeding a trailing membar or vice versa
1310 
1311 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1312 
1313 bool unnecessary_acquire(const Node *barrier)
1314 {
1315   assert(barrier->is_MemBar(), "expecting a membar");
1316 
1317   if (UseBarriersForVolatile) {
1318     // we need to plant a dmb
1319     return false;
1320   }
1321 
1322   MemBarNode* mb = barrier->as_MemBar();
1323 
1324   if (mb->trailing_load()) {
1325     return true;
1326   }
1327 
1328   if (mb->trailing_load_store()) {
1329     Node* load_store = mb->in(MemBarNode::Precedent);
1330     assert(load_store->is_LoadStore(), "unexpected graph shape");
1331     return is_CAS(load_store->Opcode(), true);
1332   }
1333 
1334   return false;
1335 }
1336 
1337 bool needs_acquiring_load(const Node *n)
1338 {
1339   assert(n->is_Load(), "expecting a load");
1340   if (UseBarriersForVolatile) {
1341     // we use a normal load and a dmb
1342     return false;
1343   }
1344 
1345   LoadNode *ld = n->as_Load();
1346 
1347   return ld->is_acquire();
1348 }
1349 
1350 bool unnecessary_release(const Node *n)
1351 {
1352   assert((n->is_MemBar() &&
1353           n->Opcode() == Op_MemBarRelease),
1354          "expecting a release membar");
1355 
1356   if (UseBarriersForVolatile) {
1357     // we need to plant a dmb
1358     return false;
1359   }
1360 
1361   MemBarNode *barrier = n->as_MemBar();
1362   if (!barrier->leading()) {
1363     return false;
1364   } else {
1365     Node* trailing = barrier->trailing_membar();
1366     MemBarNode* trailing_mb = trailing->as_MemBar();
1367     assert(trailing_mb->trailing(), "Not a trailing membar?");
1368     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1369 
1370     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1371     if (mem->is_Store()) {
1372       assert(mem->as_Store()->is_release(), "");
1373       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1374       return true;
1375     } else {
1376       assert(mem->is_LoadStore(), "");
1377       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1378       return is_CAS(mem->Opcode(), true);
1379     }
1380   }
1381   return false;
1382 }
1383 
1384 bool unnecessary_volatile(const Node *n)
1385 {
1386   // assert n->is_MemBar();
1387   if (UseBarriersForVolatile) {
1388     // we need to plant a dmb
1389     return false;
1390   }
1391 
1392   MemBarNode *mbvol = n->as_MemBar();
1393 
1394   bool release = mbvol->trailing_store();
1395   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1396 #ifdef ASSERT
1397   if (release) {
1398     Node* leading = mbvol->leading_membar();
1399     assert(leading->Opcode() == Op_MemBarRelease, "");
1400     assert(leading->as_MemBar()->leading_store(), "");
1401     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1402   }
1403 #endif
1404 
1405   return release;
1406 }
1407 
1408 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1409 
1410 bool needs_releasing_store(const Node *n)
1411 {
1412   // assert n->is_Store();
1413   if (UseBarriersForVolatile) {
1414     // we use a normal store and dmb combination
1415     return false;
1416   }
1417 
1418   StoreNode *st = n->as_Store();
1419 
1420   return st->trailing_membar() != NULL;
1421 }
1422 
1423 // predicate controlling translation of CAS
1424 //
1425 // returns true if CAS needs to use an acquiring load otherwise false
1426 
1427 bool needs_acquiring_load_exclusive(const Node *n)
1428 {
1429   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1430   if (UseBarriersForVolatile) {
1431     return false;
1432   }
1433 
1434   LoadStoreNode* ldst = n->as_LoadStore();
1435   if (is_CAS(n->Opcode(), false)) {
1436     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1437   } else {
1438     return ldst->trailing_membar() != NULL;
1439   }
1440 
1441   // so we can just return true here
1442   return true;
1443 }
1444 
1445 // predicate controlling translation of StoreCM
1446 //
1447 // returns true if a StoreStore must precede the card write otherwise
1448 // false
1449 
1450 bool unnecessary_storestore(const Node *storecm)
1451 {
1452   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1453 
1454   // we need to generate a dmb ishst between an object put and the
1455   // associated card mark when we are using CMS without conditional
1456   // card marking
1457 
1458   if (UseConcMarkSweepGC && !UseCondCardMark) {
1459     return false;
1460   }
1461 
1462   // a storestore is unnecesary in all other cases
1463 
1464   return true;
1465 }
1466 
1467 
1468 #define __ _masm.
1469 
1470 // advance declarations for helper functions to convert register
1471 // indices to register objects
1472 
1473 // the ad file has to provide implementations of certain methods
1474 // expected by the generic code
1475 //
1476 // REQUIRED FUNCTIONALITY
1477 
1478 //=============================================================================
1479 
1480 // !!!!! Special hack to get all types of calls to specify the byte offset
1481 //       from the start of the call to the point where the return address
1482 //       will point.
1483 
1484 int MachCallStaticJavaNode::ret_addr_offset()
1485 {
1486   // call should be a simple bl
1487   int off = 4;
1488   return off;
1489 }
1490 
1491 int MachCallDynamicJavaNode::ret_addr_offset()
1492 {
1493   return 16; // movz, movk, movk, bl
1494 }
1495 
1496 int MachCallRuntimeNode::ret_addr_offset() {
1497   // for generated stubs the call will be
1498   //   bl(addr)
1499   // or with far branches
1500   //   bl(trampoline_stub)
1501   // for real runtime callouts it will be six instructions
1502   // see aarch64_enc_java_to_runtime
1503   //   adr(rscratch2, retaddr)
1504   //   lea(rscratch1, RuntimeAddress(addr)
1505   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1506   //   blr(rscratch1)
1507   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1508   if (cb) {
1509     return 1 * NativeInstruction::instruction_size;
1510   } else {
1511     return 6 * NativeInstruction::instruction_size;
1512   }
1513 }
1514 
1515 // Indicate if the safepoint node needs the polling page as an input
1516 
1517 // the shared code plants the oop data at the start of the generated
1518 // code for the safepoint node and that needs ot be at the load
1519 // instruction itself. so we cannot plant a mov of the safepoint poll
1520 // address followed by a load. setting this to true means the mov is
1521 // scheduled as a prior instruction. that's better for scheduling
1522 // anyway.
1523 
1524 bool SafePointNode::needs_polling_address_input()
1525 {
1526   return true;
1527 }
1528 
1529 //=============================================================================
1530 
1531 #ifndef PRODUCT
1532 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1533   st->print("BREAKPOINT");
1534 }
1535 #endif
1536 
1537 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1538   MacroAssembler _masm(&cbuf);
1539   __ brk(0);
1540 }
1541 
1542 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1543   return MachNode::size(ra_);
1544 }
1545 
1546 //=============================================================================
1547 
1548 #ifndef PRODUCT
1549   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1550     st->print("nop \t# %d bytes pad for loops and calls", _count);
1551   }
1552 #endif
1553 
1554   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1555     MacroAssembler _masm(&cbuf);
1556     for (int i = 0; i < _count; i++) {
1557       __ nop();
1558     }
1559   }
1560 
1561   uint MachNopNode::size(PhaseRegAlloc*) const {
1562     return _count * NativeInstruction::instruction_size;
1563   }
1564 
1565 //=============================================================================
1566 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1567 
1568 int Compile::ConstantTable::calculate_table_base_offset() const {
1569   return 0;  // absolute addressing, no offset
1570 }
1571 
1572 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1573 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1574   ShouldNotReachHere();
1575 }
1576 
1577 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1578   // Empty encoding
1579 }
1580 
1581 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1582   return 0;
1583 }
1584 
1585 #ifndef PRODUCT
1586 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1587   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1588 }
1589 #endif
1590 
1591 #ifndef PRODUCT
1592 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1593   Compile* C = ra_->C;
1594 
1595   int framesize = C->frame_slots() << LogBytesPerInt;
1596 
1597   if (C->need_stack_bang(framesize))
1598     st->print("# stack bang size=%d\n\t", framesize);
1599 
1600   if (framesize < ((1 << 9) + 2 * wordSize)) {
1601     st->print("sub  sp, sp, #%d\n\t", framesize);
1602     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1603     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1604   } else {
1605     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1606     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1607     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1608     st->print("sub  sp, sp, rscratch1");
1609   }
1610 }
1611 #endif
1612 
1613 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1614   Compile* C = ra_->C;
1615   MacroAssembler _masm(&cbuf);
1616 
1617   // n.b. frame size includes space for return pc and rfp
1618   const long framesize = C->frame_size_in_bytes();
1619   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1620 
1621   // insert a nop at the start of the prolog so we can patch in a
1622   // branch if we need to invalidate the method later
1623   __ nop();
1624 
1625   int bangsize = C->bang_size_in_bytes();
1626   if (C->need_stack_bang(bangsize) && UseStackBanging)
1627     __ generate_stack_overflow_check(bangsize);
1628 
1629   __ build_frame(framesize);
1630 
1631   if (VerifyStackAtCalls) {
1632     Unimplemented();
1633   }
1634 
1635   C->set_frame_complete(cbuf.insts_size());
1636 
1637   if (C->has_mach_constant_base_node()) {
1638     // NOTE: We set the table base offset here because users might be
1639     // emitted before MachConstantBaseNode.
1640     Compile::ConstantTable& constant_table = C->constant_table();
1641     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1642   }
1643 }
1644 
1645 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1646 {
1647   return MachNode::size(ra_); // too many variables; just compute it
1648                               // the hard way
1649 }
1650 
1651 int MachPrologNode::reloc() const
1652 {
1653   return 0;
1654 }
1655 
1656 //=============================================================================
1657 
1658 #ifndef PRODUCT
1659 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1660   Compile* C = ra_->C;
1661   int framesize = C->frame_slots() << LogBytesPerInt;
1662 
1663   st->print("# pop frame %d\n\t",framesize);
1664 
1665   if (framesize == 0) {
1666     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1667   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1668     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1669     st->print("add  sp, sp, #%d\n\t", framesize);
1670   } else {
1671     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1672     st->print("add  sp, sp, rscratch1\n\t");
1673     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1674   }
1675 
1676   if (do_polling() && C->is_method_compilation()) {
1677     st->print("# touch polling page\n\t");
1678     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1679     st->print("ldr zr, [rscratch1]");
1680   }
1681 }
1682 #endif
1683 
1684 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1685   Compile* C = ra_->C;
1686   MacroAssembler _masm(&cbuf);
1687   int framesize = C->frame_slots() << LogBytesPerInt;
1688 
1689   __ remove_frame(framesize);
1690 
1691   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1692     __ reserved_stack_check();
1693   }
1694 
1695   if (do_polling() && C->is_method_compilation()) {
1696     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1697   }
1698 }
1699 
1700 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1701   // Variable size. Determine dynamically.
1702   return MachNode::size(ra_);
1703 }
1704 
1705 int MachEpilogNode::reloc() const {
1706   // Return number of relocatable values contained in this instruction.
1707   return 1; // 1 for polling page.
1708 }
1709 
1710 const Pipeline * MachEpilogNode::pipeline() const {
1711   return MachNode::pipeline_class();
1712 }
1713 
1714 // This method seems to be obsolete. It is declared in machnode.hpp
1715 // and defined in all *.ad files, but it is never called. Should we
1716 // get rid of it?
1717 int MachEpilogNode::safepoint_offset() const {
1718   assert(do_polling(), "no return for this epilog node");
1719   return 4;
1720 }
1721 
1722 //=============================================================================
1723 
1724 // Figure out which register class each belongs in: rc_int, rc_float or
1725 // rc_stack.
1726 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1727 
1728 static enum RC rc_class(OptoReg::Name reg) {
1729 
1730   if (reg == OptoReg::Bad) {
1731     return rc_bad;
1732   }
1733 
1734   // we have 30 int registers * 2 halves
1735   // (rscratch1 and rscratch2 are omitted)
1736   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1737 
1738   if (reg < slots_of_int_registers) {
1739     return rc_int;
1740   }
1741 
1742   // we have 32 float register * 4 halves
1743   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1744     return rc_float;
1745   }
1746 
1747   // Between float regs & stack is the flags regs.
1748   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1749 
1750   return rc_stack;
1751 }
1752 
1753 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1754   Compile* C = ra_->C;
1755 
1756   // Get registers to move.
1757   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1758   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1759   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1760   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1761 
1762   enum RC src_hi_rc = rc_class(src_hi);
1763   enum RC src_lo_rc = rc_class(src_lo);
1764   enum RC dst_hi_rc = rc_class(dst_hi);
1765   enum RC dst_lo_rc = rc_class(dst_lo);
1766 
1767   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1768 
1769   if (src_hi != OptoReg::Bad) {
1770     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1771            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1772            "expected aligned-adjacent pairs");
1773   }
1774 
1775   if (src_lo == dst_lo && src_hi == dst_hi) {
1776     return 0;            // Self copy, no move.
1777   }
1778 
1779   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1780               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1781   int src_offset = ra_->reg2offset(src_lo);
1782   int dst_offset = ra_->reg2offset(dst_lo);
1783 
1784   if (bottom_type()->isa_vect() != NULL) {
1785     uint ireg = ideal_reg();
1786     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1787     if (cbuf) {
1788       MacroAssembler _masm(cbuf);
1789       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1790       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1791         // stack->stack
1792         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1793         if (ireg == Op_VecD) {
1794           __ unspill(rscratch1, true, src_offset);
1795           __ spill(rscratch1, true, dst_offset);
1796         } else {
1797           __ spill_copy128(src_offset, dst_offset);
1798         }
1799       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1800         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1801                ireg == Op_VecD ? __ T8B : __ T16B,
1802                as_FloatRegister(Matcher::_regEncode[src_lo]));
1803       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1804         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1805                        ireg == Op_VecD ? __ D : __ Q,
1806                        ra_->reg2offset(dst_lo));
1807       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1808         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1809                        ireg == Op_VecD ? __ D : __ Q,
1810                        ra_->reg2offset(src_lo));
1811       } else {
1812         ShouldNotReachHere();
1813       }
1814     }
1815   } else if (cbuf) {
1816     MacroAssembler _masm(cbuf);
1817     switch (src_lo_rc) {
1818     case rc_int:
1819       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1820         if (is64) {
1821             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1822                    as_Register(Matcher::_regEncode[src_lo]));
1823         } else {
1824             MacroAssembler _masm(cbuf);
1825             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1826                     as_Register(Matcher::_regEncode[src_lo]));
1827         }
1828       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1829         if (is64) {
1830             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1831                      as_Register(Matcher::_regEncode[src_lo]));
1832         } else {
1833             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1834                      as_Register(Matcher::_regEncode[src_lo]));
1835         }
1836       } else {                    // gpr --> stack spill
1837         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1838         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1839       }
1840       break;
1841     case rc_float:
1842       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1843         if (is64) {
1844             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1845                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1846         } else {
1847             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1848                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1849         }
1850       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1851           if (cbuf) {
1852             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1853                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1854         } else {
1855             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1856                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1857         }
1858       } else {                    // fpr --> stack spill
1859         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1860         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1861                  is64 ? __ D : __ S, dst_offset);
1862       }
1863       break;
1864     case rc_stack:
1865       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1866         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1867       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1868         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1869                    is64 ? __ D : __ S, src_offset);
1870       } else {                    // stack --> stack copy
1871         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1872         __ unspill(rscratch1, is64, src_offset);
1873         __ spill(rscratch1, is64, dst_offset);
1874       }
1875       break;
1876     default:
1877       assert(false, "bad rc_class for spill");
1878       ShouldNotReachHere();
1879     }
1880   }
1881 
1882   if (st) {
1883     st->print("spill ");
1884     if (src_lo_rc == rc_stack) {
1885       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1886     } else {
1887       st->print("%s -> ", Matcher::regName[src_lo]);
1888     }
1889     if (dst_lo_rc == rc_stack) {
1890       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1891     } else {
1892       st->print("%s", Matcher::regName[dst_lo]);
1893     }
1894     if (bottom_type()->isa_vect() != NULL) {
1895       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1896     } else {
1897       st->print("\t# spill size = %d", is64 ? 64:32);
1898     }
1899   }
1900 
1901   return 0;
1902 
1903 }
1904 
1905 #ifndef PRODUCT
1906 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1907   if (!ra_)
1908     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1909   else
1910     implementation(NULL, ra_, false, st);
1911 }
1912 #endif
1913 
1914 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1915   implementation(&cbuf, ra_, false, NULL);
1916 }
1917 
1918 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1919   return MachNode::size(ra_);
1920 }
1921 
1922 //=============================================================================
1923 
1924 #ifndef PRODUCT
1925 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1926   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1927   int reg = ra_->get_reg_first(this);
1928   st->print("add %s, rsp, #%d]\t# box lock",
1929             Matcher::regName[reg], offset);
1930 }
1931 #endif
1932 
1933 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1934   MacroAssembler _masm(&cbuf);
1935 
1936   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1937   int reg    = ra_->get_encode(this);
1938 
1939   // This add will handle any 24-bit signed offset. 24 bits allows an
1940   // 8 megabyte stack frame.
1941   __ add(as_Register(reg), sp, offset);
1942 }
1943 
1944 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1945   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1946   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1947 
1948   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1949     return NativeInstruction::instruction_size;
1950   } else {
1951     return 2 * NativeInstruction::instruction_size;
1952   }
1953 }
1954 
1955 //=============================================================================
1956 
1957 #ifndef PRODUCT
1958 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1959 {
1960   st->print_cr("# MachUEPNode");
1961   if (UseCompressedClassPointers) {
1962     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1963     if (Universe::narrow_klass_shift() != 0) {
1964       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1965     }
1966   } else {
1967    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1968   }
1969   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1970   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1971 }
1972 #endif
1973 
1974 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1975 {
1976   // This is the unverified entry point.
1977   MacroAssembler _masm(&cbuf);
1978 
1979   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1980   Label skip;
1981   // TODO
1982   // can we avoid this skip and still use a reloc?
1983   __ br(Assembler::EQ, skip);
1984   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1985   __ bind(skip);
1986 }
1987 
1988 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1989 {
1990   return MachNode::size(ra_);
1991 }
1992 
1993 // REQUIRED EMIT CODE
1994 
1995 //=============================================================================
1996 
1997 // Emit exception handler code.
1998 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1999 {
2000   // mov rscratch1 #exception_blob_entry_point
2001   // br rscratch1
2002   // Note that the code buffer's insts_mark is always relative to insts.
2003   // That's why we must use the macroassembler to generate a handler.
2004   MacroAssembler _masm(&cbuf);
2005   address base = __ start_a_stub(size_exception_handler());
2006   if (base == NULL) {
2007     ciEnv::current()->record_failure("CodeCache is full");
2008     return 0;  // CodeBuffer::expand failed
2009   }
2010   int offset = __ offset();
2011   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2012   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2013   __ end_a_stub();
2014   return offset;
2015 }
2016 
2017 // Emit deopt handler code.
2018 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2019 {
2020   // Note that the code buffer's insts_mark is always relative to insts.
2021   // That's why we must use the macroassembler to generate a handler.
2022   MacroAssembler _masm(&cbuf);
2023   address base = __ start_a_stub(size_deopt_handler());
2024   if (base == NULL) {
2025     ciEnv::current()->record_failure("CodeCache is full");
2026     return 0;  // CodeBuffer::expand failed
2027   }
2028   int offset = __ offset();
2029 
2030   __ adr(lr, __ pc());
2031   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2032 
2033   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2034   __ end_a_stub();
2035   return offset;
2036 }
2037 
2038 // REQUIRED MATCHER CODE
2039 
2040 //=============================================================================
2041 
2042 const bool Matcher::match_rule_supported(int opcode) {
2043 
2044   switch (opcode) {
2045   default:
2046     break;
2047   }
2048 
2049   if (!has_match_rule(opcode)) {
2050     return false;
2051   }
2052 
2053   return true;  // Per default match rules are supported.
2054 }
2055 
2056 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2057 
2058   // TODO
2059   // identify extra cases that we might want to provide match rules for
2060   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2061   bool ret_value = match_rule_supported(opcode);
2062   // Add rules here.
2063 
2064   return ret_value;  // Per default match rules are supported.
2065 }
2066 
2067 const bool Matcher::has_predicated_vectors(void) {
2068   return false;
2069 }
2070 
2071 const int Matcher::float_pressure(int default_pressure_threshold) {
2072   return default_pressure_threshold;
2073 }
2074 
2075 int Matcher::regnum_to_fpu_offset(int regnum)
2076 {
2077   Unimplemented();
2078   return 0;
2079 }
2080 
2081 // Is this branch offset short enough that a short branch can be used?
2082 //
2083 // NOTE: If the platform does not provide any short branch variants, then
2084 //       this method should return false for offset 0.
2085 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2086   // The passed offset is relative to address of the branch.
2087 
2088   return (-32768 <= offset && offset < 32768);
2089 }
2090 
2091 const bool Matcher::isSimpleConstant64(jlong value) {
2092   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2093   // Probably always true, even if a temp register is required.
2094   return true;
2095 }
2096 
2097 // true just means we have fast l2f conversion
2098 const bool Matcher::convL2FSupported(void) {
2099   return true;
2100 }
2101 
2102 // Vector width in bytes.
2103 const int Matcher::vector_width_in_bytes(BasicType bt) {
2104   int size = MIN2(16,(int)MaxVectorSize);
2105   // Minimum 2 values in vector
2106   if (size < 2*type2aelembytes(bt)) size = 0;
2107   // But never < 4
2108   if (size < 4) size = 0;
2109   return size;
2110 }
2111 
2112 // Limits on vector size (number of elements) loaded into vector.
2113 const int Matcher::max_vector_size(const BasicType bt) {
2114   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2115 }
2116 const int Matcher::min_vector_size(const BasicType bt) {
2117 //  For the moment limit the vector size to 8 bytes
2118     int size = 8 / type2aelembytes(bt);
2119     if (size < 2) size = 2;
2120     return size;
2121 }
2122 
2123 // Vector ideal reg.
2124 const uint Matcher::vector_ideal_reg(int len) {
2125   switch(len) {
2126     case  8: return Op_VecD;
2127     case 16: return Op_VecX;
2128   }
2129   ShouldNotReachHere();
2130   return 0;
2131 }
2132 
2133 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2134   switch(size) {
2135     case  8: return Op_VecD;
2136     case 16: return Op_VecX;
2137   }
2138   ShouldNotReachHere();
2139   return 0;
2140 }
2141 
2142 // AES support not yet implemented
2143 const bool Matcher::pass_original_key_for_aes() {
2144   return false;
2145 }
2146 
2147 // aarch64 supports misaligned vectors store/load.
2148 const bool Matcher::misaligned_vectors_ok() {
2149   return true;
2150 }
2151 
2152 // false => size gets scaled to BytesPerLong, ok.
2153 const bool Matcher::init_array_count_is_in_bytes = false;
2154 
2155 // Use conditional move (CMOVL)
2156 const int Matcher::long_cmove_cost() {
2157   // long cmoves are no more expensive than int cmoves
2158   return 0;
2159 }
2160 
2161 const int Matcher::float_cmove_cost() {
2162   // float cmoves are no more expensive than int cmoves
2163   return 0;
2164 }
2165 
2166 // Does the CPU require late expand (see block.cpp for description of late expand)?
2167 const bool Matcher::require_postalloc_expand = false;
2168 
2169 // Do we need to mask the count passed to shift instructions or does
2170 // the cpu only look at the lower 5/6 bits anyway?
2171 const bool Matcher::need_masked_shift_count = false;
2172 
2173 // This affects two different things:
2174 //  - how Decode nodes are matched
2175 //  - how ImplicitNullCheck opportunities are recognized
2176 // If true, the matcher will try to remove all Decodes and match them
2177 // (as operands) into nodes. NullChecks are not prepared to deal with
2178 // Decodes by final_graph_reshaping().
2179 // If false, final_graph_reshaping() forces the decode behind the Cmp
2180 // for a NullCheck. The matcher matches the Decode node into a register.
2181 // Implicit_null_check optimization moves the Decode along with the
2182 // memory operation back up before the NullCheck.
2183 bool Matcher::narrow_oop_use_complex_address() {
2184   return Universe::narrow_oop_shift() == 0;
2185 }
2186 
2187 bool Matcher::narrow_klass_use_complex_address() {
2188 // TODO
2189 // decide whether we need to set this to true
2190   return false;
2191 }
2192 
2193 bool Matcher::const_oop_prefer_decode() {
2194   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2195   return Universe::narrow_oop_base() == NULL;
2196 }
2197 
2198 bool Matcher::const_klass_prefer_decode() {
2199   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2200   return Universe::narrow_klass_base() == NULL;
2201 }
2202 
2203 // Is it better to copy float constants, or load them directly from
2204 // memory?  Intel can load a float constant from a direct address,
2205 // requiring no extra registers.  Most RISCs will have to materialize
2206 // an address into a register first, so they would do better to copy
2207 // the constant from stack.
2208 const bool Matcher::rematerialize_float_constants = false;
2209 
2210 // If CPU can load and store mis-aligned doubles directly then no
2211 // fixup is needed.  Else we split the double into 2 integer pieces
2212 // and move it piece-by-piece.  Only happens when passing doubles into
2213 // C code as the Java calling convention forces doubles to be aligned.
2214 const bool Matcher::misaligned_doubles_ok = true;
2215 
2216 // No-op on amd64
2217 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2218   Unimplemented();
2219 }
2220 
2221 // Advertise here if the CPU requires explicit rounding operations to
2222 // implement the UseStrictFP mode.
2223 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2224 
2225 // Are floats converted to double when stored to stack during
2226 // deoptimization?
2227 bool Matcher::float_in_double() { return false; }
2228 
2229 // Do ints take an entire long register or just half?
2230 // The relevant question is how the int is callee-saved:
2231 // the whole long is written but de-opt'ing will have to extract
2232 // the relevant 32 bits.
2233 const bool Matcher::int_in_long = true;
2234 
2235 // Return whether or not this register is ever used as an argument.
2236 // This function is used on startup to build the trampoline stubs in
2237 // generateOptoStub.  Registers not mentioned will be killed by the VM
2238 // call in the trampoline, and arguments in those registers not be
2239 // available to the callee.
2240 bool Matcher::can_be_java_arg(int reg)
2241 {
2242   return
2243     reg ==  R0_num || reg == R0_H_num ||
2244     reg ==  R1_num || reg == R1_H_num ||
2245     reg ==  R2_num || reg == R2_H_num ||
2246     reg ==  R3_num || reg == R3_H_num ||
2247     reg ==  R4_num || reg == R4_H_num ||
2248     reg ==  R5_num || reg == R5_H_num ||
2249     reg ==  R6_num || reg == R6_H_num ||
2250     reg ==  R7_num || reg == R7_H_num ||
2251     reg ==  V0_num || reg == V0_H_num ||
2252     reg ==  V1_num || reg == V1_H_num ||
2253     reg ==  V2_num || reg == V2_H_num ||
2254     reg ==  V3_num || reg == V3_H_num ||
2255     reg ==  V4_num || reg == V4_H_num ||
2256     reg ==  V5_num || reg == V5_H_num ||
2257     reg ==  V6_num || reg == V6_H_num ||
2258     reg ==  V7_num || reg == V7_H_num;
2259 }
2260 
2261 bool Matcher::is_spillable_arg(int reg)
2262 {
2263   return can_be_java_arg(reg);
2264 }
2265 
2266 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2267   return false;
2268 }
2269 
2270 RegMask Matcher::divI_proj_mask() {
2271   ShouldNotReachHere();
2272   return RegMask();
2273 }
2274 
2275 // Register for MODI projection of divmodI.
2276 RegMask Matcher::modI_proj_mask() {
2277   ShouldNotReachHere();
2278   return RegMask();
2279 }
2280 
2281 // Register for DIVL projection of divmodL.
2282 RegMask Matcher::divL_proj_mask() {
2283   ShouldNotReachHere();
2284   return RegMask();
2285 }
2286 
2287 // Register for MODL projection of divmodL.
2288 RegMask Matcher::modL_proj_mask() {
2289   ShouldNotReachHere();
2290   return RegMask();
2291 }
2292 
2293 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2294   return FP_REG_mask();
2295 }
2296 
2297 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2298   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2299     Node* u = addp->fast_out(i);
2300     if (u->is_Mem()) {
2301       int opsize = u->as_Mem()->memory_size();
2302       assert(opsize > 0, "unexpected memory operand size");
2303       if (u->as_Mem()->memory_size() != (1<<shift)) {
2304         return false;
2305       }
2306     }
2307   }
2308   return true;
2309 }
2310 
2311 const bool Matcher::convi2l_type_required = false;
2312 
2313 // Should the Matcher clone shifts on addressing modes, expecting them
2314 // to be subsumed into complex addressing expressions or compute them
2315 // into registers?
2316 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2317   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2318     return true;
2319   }
2320 
2321   Node *off = m->in(AddPNode::Offset);
2322   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2323       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2324       // Are there other uses besides address expressions?
2325       !is_visited(off)) {
2326     address_visited.set(off->_idx); // Flag as address_visited
2327     mstack.push(off->in(2), Visit);
2328     Node *conv = off->in(1);
2329     if (conv->Opcode() == Op_ConvI2L &&
2330         // Are there other uses besides address expressions?
2331         !is_visited(conv)) {
2332       address_visited.set(conv->_idx); // Flag as address_visited
2333       mstack.push(conv->in(1), Pre_Visit);
2334     } else {
2335       mstack.push(conv, Pre_Visit);
2336     }
2337     address_visited.test_set(m->_idx); // Flag as address_visited
2338     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2339     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2340     return true;
2341   } else if (off->Opcode() == Op_ConvI2L &&
2342              // Are there other uses besides address expressions?
2343              !is_visited(off)) {
2344     address_visited.test_set(m->_idx); // Flag as address_visited
2345     address_visited.set(off->_idx); // Flag as address_visited
2346     mstack.push(off->in(1), Pre_Visit);
2347     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2348     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2349     return true;
2350   }
2351   return false;
2352 }
2353 
2354 void Compile::reshape_address(AddPNode* addp) {
2355 }
2356 
2357 
2358 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2359   MacroAssembler _masm(&cbuf);                                          \
2360   {                                                                     \
2361     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2362     guarantee(DISP == 0, "mode not permitted for volatile");            \
2363     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2364     __ INSN(REG, as_Register(BASE));                                    \
2365   }
2366 
2367 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2368 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2369 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2370                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2371 
2372   // Used for all non-volatile memory accesses.  The use of
2373   // $mem->opcode() to discover whether this pattern uses sign-extended
2374   // offsets is something of a kludge.
2375   static void loadStore(MacroAssembler masm, mem_insn insn,
2376                          Register reg, int opcode,
2377                          Register base, int index, int size, int disp)
2378   {
2379     Address::extend scale;
2380 
2381     // Hooboy, this is fugly.  We need a way to communicate to the
2382     // encoder that the index needs to be sign extended, so we have to
2383     // enumerate all the cases.
2384     switch (opcode) {
2385     case INDINDEXSCALEDI2L:
2386     case INDINDEXSCALEDI2LN:
2387     case INDINDEXI2L:
2388     case INDINDEXI2LN:
2389       scale = Address::sxtw(size);
2390       break;
2391     default:
2392       scale = Address::lsl(size);
2393     }
2394 
2395     if (index == -1) {
2396       (masm.*insn)(reg, Address(base, disp));
2397     } else {
2398       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2399       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2400     }
2401   }
2402 
2403   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2404                          FloatRegister reg, int opcode,
2405                          Register base, int index, int size, int disp)
2406   {
2407     Address::extend scale;
2408 
2409     switch (opcode) {
2410     case INDINDEXSCALEDI2L:
2411     case INDINDEXSCALEDI2LN:
2412       scale = Address::sxtw(size);
2413       break;
2414     default:
2415       scale = Address::lsl(size);
2416     }
2417 
2418      if (index == -1) {
2419       (masm.*insn)(reg, Address(base, disp));
2420     } else {
2421       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2422       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2423     }
2424   }
2425 
2426   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2427                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2428                          int opcode, Register base, int index, int size, int disp)
2429   {
2430     if (index == -1) {
2431       (masm.*insn)(reg, T, Address(base, disp));
2432     } else {
2433       assert(disp == 0, "unsupported address mode");
2434       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2435     }
2436   }
2437 
2438 %}
2439 
2440 
2441 
2442 //----------ENCODING BLOCK-----------------------------------------------------
2443 // This block specifies the encoding classes used by the compiler to
2444 // output byte streams.  Encoding classes are parameterized macros
2445 // used by Machine Instruction Nodes in order to generate the bit
2446 // encoding of the instruction.  Operands specify their base encoding
2447 // interface with the interface keyword.  There are currently
2448 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2449 // COND_INTER.  REG_INTER causes an operand to generate a function
2450 // which returns its register number when queried.  CONST_INTER causes
2451 // an operand to generate a function which returns the value of the
2452 // constant when queried.  MEMORY_INTER causes an operand to generate
2453 // four functions which return the Base Register, the Index Register,
2454 // the Scale Value, and the Offset Value of the operand when queried.
2455 // COND_INTER causes an operand to generate six functions which return
2456 // the encoding code (ie - encoding bits for the instruction)
2457 // associated with each basic boolean condition for a conditional
2458 // instruction.
2459 //
2460 // Instructions specify two basic values for encoding.  Again, a
2461 // function is available to check if the constant displacement is an
2462 // oop. They use the ins_encode keyword to specify their encoding
2463 // classes (which must be a sequence of enc_class names, and their
2464 // parameters, specified in the encoding block), and they use the
2465 // opcode keyword to specify, in order, their primary, secondary, and
2466 // tertiary opcode.  Only the opcode sections which a particular
2467 // instruction needs for encoding need to be specified.
2468 encode %{
2469   // Build emit functions for each basic byte or larger field in the
2470   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2471   // from C++ code in the enc_class source block.  Emit functions will
2472   // live in the main source block for now.  In future, we can
2473   // generalize this by adding a syntax that specifies the sizes of
2474   // fields in an order, so that the adlc can build the emit functions
2475   // automagically
2476 
2477   // catch all for unimplemented encodings
2478   enc_class enc_unimplemented %{
2479     MacroAssembler _masm(&cbuf);
2480     __ unimplemented("C2 catch all");
2481   %}
2482 
2483   // BEGIN Non-volatile memory access
2484 
2485   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2486     Register dst_reg = as_Register($dst$$reg);
2487     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2488                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2489   %}
2490 
2491   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2492     Register dst_reg = as_Register($dst$$reg);
2493     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2494                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2495   %}
2496 
2497   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2498     Register dst_reg = as_Register($dst$$reg);
2499     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2500                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2501   %}
2502 
2503   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2504     Register dst_reg = as_Register($dst$$reg);
2505     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2506                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2507   %}
2508 
2509   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2510     Register dst_reg = as_Register($dst$$reg);
2511     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2512                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2513   %}
2514 
2515   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2516     Register dst_reg = as_Register($dst$$reg);
2517     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2518                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2519   %}
2520 
2521   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2522     Register dst_reg = as_Register($dst$$reg);
2523     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2524                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2525   %}
2526 
2527   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2528     Register dst_reg = as_Register($dst$$reg);
2529     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2530                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2531   %}
2532 
2533   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2534     Register dst_reg = as_Register($dst$$reg);
2535     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2536                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2537   %}
2538 
2539   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2540     Register dst_reg = as_Register($dst$$reg);
2541     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2542                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2543   %}
2544 
2545   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2546     Register dst_reg = as_Register($dst$$reg);
2547     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2548                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2549   %}
2550 
2551   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2552     Register dst_reg = as_Register($dst$$reg);
2553     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2554                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2555   %}
2556 
2557   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2558     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2559     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2560                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2561   %}
2562 
2563   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2564     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2565     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2566                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2567   %}
2568 
2569   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2570     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2571     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2572        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2573   %}
2574 
2575   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2576     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2577     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2578        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2579   %}
2580 
2581   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2582     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2583     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2584        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2585   %}
2586 
2587   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2588     Register src_reg = as_Register($src$$reg);
2589     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2590                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2591   %}
2592 
2593   enc_class aarch64_enc_strb0(memory mem) %{
2594     MacroAssembler _masm(&cbuf);
2595     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2596                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2597   %}
2598 
2599   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2600     MacroAssembler _masm(&cbuf);
2601     __ membar(Assembler::StoreStore);
2602     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2604   %}
2605 
2606   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2607     Register src_reg = as_Register($src$$reg);
2608     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2609                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2610   %}
2611 
2612   enc_class aarch64_enc_strh0(memory mem) %{
2613     MacroAssembler _masm(&cbuf);
2614     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2615                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2616   %}
2617 
2618   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2619     Register src_reg = as_Register($src$$reg);
2620     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2621                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2622   %}
2623 
2624   enc_class aarch64_enc_strw0(memory mem) %{
2625     MacroAssembler _masm(&cbuf);
2626     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2627                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2628   %}
2629 
2630   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2631     Register src_reg = as_Register($src$$reg);
2632     // we sometimes get asked to store the stack pointer into the
2633     // current thread -- we cannot do that directly on AArch64
2634     if (src_reg == r31_sp) {
2635       MacroAssembler _masm(&cbuf);
2636       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2637       __ mov(rscratch2, sp);
2638       src_reg = rscratch2;
2639     }
2640     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2641                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2642   %}
2643 
2644   enc_class aarch64_enc_str0(memory mem) %{
2645     MacroAssembler _masm(&cbuf);
2646     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2647                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2648   %}
2649 
2650   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2651     FloatRegister src_reg = as_FloatRegister($src$$reg);
2652     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2653                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2654   %}
2655 
2656   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2657     FloatRegister src_reg = as_FloatRegister($src$$reg);
2658     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2659                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2660   %}
2661 
2662   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2663     FloatRegister src_reg = as_FloatRegister($src$$reg);
2664     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2665        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2666   %}
2667 
2668   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2669     FloatRegister src_reg = as_FloatRegister($src$$reg);
2670     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2671        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2672   %}
2673 
2674   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2675     FloatRegister src_reg = as_FloatRegister($src$$reg);
2676     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2677        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2678   %}
2679 
2680   // END Non-volatile memory access
2681 
2682   // volatile loads and stores
2683 
2684   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2685     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2686                  rscratch1, stlrb);
2687   %}
2688 
2689   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2690     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2691                  rscratch1, stlrh);
2692   %}
2693 
2694   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2695     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2696                  rscratch1, stlrw);
2697   %}
2698 
2699 
2700   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2701     Register dst_reg = as_Register($dst$$reg);
2702     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2703              rscratch1, ldarb);
2704     __ sxtbw(dst_reg, dst_reg);
2705   %}
2706 
2707   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2708     Register dst_reg = as_Register($dst$$reg);
2709     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2710              rscratch1, ldarb);
2711     __ sxtb(dst_reg, dst_reg);
2712   %}
2713 
2714   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2715     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2716              rscratch1, ldarb);
2717   %}
2718 
2719   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2720     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2721              rscratch1, ldarb);
2722   %}
2723 
2724   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2725     Register dst_reg = as_Register($dst$$reg);
2726     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2727              rscratch1, ldarh);
2728     __ sxthw(dst_reg, dst_reg);
2729   %}
2730 
2731   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2732     Register dst_reg = as_Register($dst$$reg);
2733     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2734              rscratch1, ldarh);
2735     __ sxth(dst_reg, dst_reg);
2736   %}
2737 
2738   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2739     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2740              rscratch1, ldarh);
2741   %}
2742 
2743   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2744     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2745              rscratch1, ldarh);
2746   %}
2747 
2748   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2749     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2750              rscratch1, ldarw);
2751   %}
2752 
2753   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2754     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2755              rscratch1, ldarw);
2756   %}
2757 
2758   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2759     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2760              rscratch1, ldar);
2761   %}
2762 
2763   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2764     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2765              rscratch1, ldarw);
2766     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2767   %}
2768 
2769   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2770     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2771              rscratch1, ldar);
2772     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2773   %}
2774 
2775   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2776     Register src_reg = as_Register($src$$reg);
2777     // we sometimes get asked to store the stack pointer into the
2778     // current thread -- we cannot do that directly on AArch64
2779     if (src_reg == r31_sp) {
2780         MacroAssembler _masm(&cbuf);
2781       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2782       __ mov(rscratch2, sp);
2783       src_reg = rscratch2;
2784     }
2785     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2786                  rscratch1, stlr);
2787   %}
2788 
2789   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2790     {
2791       MacroAssembler _masm(&cbuf);
2792       FloatRegister src_reg = as_FloatRegister($src$$reg);
2793       __ fmovs(rscratch2, src_reg);
2794     }
2795     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2796                  rscratch1, stlrw);
2797   %}
2798 
2799   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2800     {
2801       MacroAssembler _masm(&cbuf);
2802       FloatRegister src_reg = as_FloatRegister($src$$reg);
2803       __ fmovd(rscratch2, src_reg);
2804     }
2805     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2806                  rscratch1, stlr);
2807   %}
2808 
2809   // synchronized read/update encodings
2810 
2811   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2812     MacroAssembler _masm(&cbuf);
2813     Register dst_reg = as_Register($dst$$reg);
2814     Register base = as_Register($mem$$base);
2815     int index = $mem$$index;
2816     int scale = $mem$$scale;
2817     int disp = $mem$$disp;
2818     if (index == -1) {
2819        if (disp != 0) {
2820         __ lea(rscratch1, Address(base, disp));
2821         __ ldaxr(dst_reg, rscratch1);
2822       } else {
2823         // TODO
2824         // should we ever get anything other than this case?
2825         __ ldaxr(dst_reg, base);
2826       }
2827     } else {
2828       Register index_reg = as_Register(index);
2829       if (disp == 0) {
2830         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2831         __ ldaxr(dst_reg, rscratch1);
2832       } else {
2833         __ lea(rscratch1, Address(base, disp));
2834         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2835         __ ldaxr(dst_reg, rscratch1);
2836       }
2837     }
2838   %}
2839 
2840   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2841     MacroAssembler _masm(&cbuf);
2842     Register src_reg = as_Register($src$$reg);
2843     Register base = as_Register($mem$$base);
2844     int index = $mem$$index;
2845     int scale = $mem$$scale;
2846     int disp = $mem$$disp;
2847     if (index == -1) {
2848        if (disp != 0) {
2849         __ lea(rscratch2, Address(base, disp));
2850         __ stlxr(rscratch1, src_reg, rscratch2);
2851       } else {
2852         // TODO
2853         // should we ever get anything other than this case?
2854         __ stlxr(rscratch1, src_reg, base);
2855       }
2856     } else {
2857       Register index_reg = as_Register(index);
2858       if (disp == 0) {
2859         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2860         __ stlxr(rscratch1, src_reg, rscratch2);
2861       } else {
2862         __ lea(rscratch2, Address(base, disp));
2863         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2864         __ stlxr(rscratch1, src_reg, rscratch2);
2865       }
2866     }
2867     __ cmpw(rscratch1, zr);
2868   %}
2869 
2870   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2871     MacroAssembler _masm(&cbuf);
2872     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2873     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2874                Assembler::xword, /*acquire*/ false, /*release*/ true,
2875                /*weak*/ false, noreg);
2876   %}
2877 
2878   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2879     MacroAssembler _masm(&cbuf);
2880     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2881     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2882                Assembler::word, /*acquire*/ false, /*release*/ true,
2883                /*weak*/ false, noreg);
2884   %}
2885 
2886   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2887     MacroAssembler _masm(&cbuf);
2888     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2889     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2890                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2891                /*weak*/ false, noreg);
2892   %}
2893 
2894   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2895     MacroAssembler _masm(&cbuf);
2896     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2897     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2898                Assembler::byte, /*acquire*/ false, /*release*/ true,
2899                /*weak*/ false, noreg);
2900   %}
2901 
2902 
2903   // The only difference between aarch64_enc_cmpxchg and
2904   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2905   // CompareAndSwap sequence to serve as a barrier on acquiring a
2906   // lock.
2907   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2908     MacroAssembler _masm(&cbuf);
2909     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2910     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2911                Assembler::xword, /*acquire*/ true, /*release*/ true,
2912                /*weak*/ false, noreg);
2913   %}
2914 
2915   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2916     MacroAssembler _masm(&cbuf);
2917     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2918     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2919                Assembler::word, /*acquire*/ true, /*release*/ true,
2920                /*weak*/ false, noreg);
2921   %}
2922 
2923   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2924     MacroAssembler _masm(&cbuf);
2925     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2926     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2927                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2928                /*weak*/ false, noreg);
2929   %}
2930 
2931   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2932     MacroAssembler _masm(&cbuf);
2933     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2934     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2935                Assembler::byte, /*acquire*/ true, /*release*/ true,
2936                /*weak*/ false, noreg);
2937   %}
2938 
2939   // auxiliary used for CompareAndSwapX to set result register
2940   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2941     MacroAssembler _masm(&cbuf);
2942     Register res_reg = as_Register($res$$reg);
2943     __ cset(res_reg, Assembler::EQ);
2944   %}
2945 
2946   // prefetch encodings
2947 
2948   enc_class aarch64_enc_prefetchw(memory mem) %{
2949     MacroAssembler _masm(&cbuf);
2950     Register base = as_Register($mem$$base);
2951     int index = $mem$$index;
2952     int scale = $mem$$scale;
2953     int disp = $mem$$disp;
2954     if (index == -1) {
2955       __ prfm(Address(base, disp), PSTL1KEEP);
2956     } else {
2957       Register index_reg = as_Register(index);
2958       if (disp == 0) {
2959         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2960       } else {
2961         __ lea(rscratch1, Address(base, disp));
2962         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
2963       }
2964     }
2965   %}
2966 
2967   /// mov envcodings
2968 
2969   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
2970     MacroAssembler _masm(&cbuf);
2971     u_int32_t con = (u_int32_t)$src$$constant;
2972     Register dst_reg = as_Register($dst$$reg);
2973     if (con == 0) {
2974       __ movw(dst_reg, zr);
2975     } else {
2976       __ movw(dst_reg, con);
2977     }
2978   %}
2979 
2980   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
2981     MacroAssembler _masm(&cbuf);
2982     Register dst_reg = as_Register($dst$$reg);
2983     u_int64_t con = (u_int64_t)$src$$constant;
2984     if (con == 0) {
2985       __ mov(dst_reg, zr);
2986     } else {
2987       __ mov(dst_reg, con);
2988     }
2989   %}
2990 
2991   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
2992     MacroAssembler _masm(&cbuf);
2993     Register dst_reg = as_Register($dst$$reg);
2994     address con = (address)$src$$constant;
2995     if (con == NULL || con == (address)1) {
2996       ShouldNotReachHere();
2997     } else {
2998       relocInfo::relocType rtype = $src->constant_reloc();
2999       if (rtype == relocInfo::oop_type) {
3000         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3001       } else if (rtype == relocInfo::metadata_type) {
3002         __ mov_metadata(dst_reg, (Metadata*)con);
3003       } else {
3004         assert(rtype == relocInfo::none, "unexpected reloc type");
3005         if (con < (address)(uintptr_t)os::vm_page_size()) {
3006           __ mov(dst_reg, con);
3007         } else {
3008           unsigned long offset;
3009           __ adrp(dst_reg, con, offset);
3010           __ add(dst_reg, dst_reg, offset);
3011         }
3012       }
3013     }
3014   %}
3015 
3016   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3017     MacroAssembler _masm(&cbuf);
3018     Register dst_reg = as_Register($dst$$reg);
3019     __ mov(dst_reg, zr);
3020   %}
3021 
3022   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3023     MacroAssembler _masm(&cbuf);
3024     Register dst_reg = as_Register($dst$$reg);
3025     __ mov(dst_reg, (u_int64_t)1);
3026   %}
3027 
3028   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3029     MacroAssembler _masm(&cbuf);
3030     address page = (address)$src$$constant;
3031     Register dst_reg = as_Register($dst$$reg);
3032     unsigned long off;
3033     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3034     assert(off == 0, "assumed offset == 0");
3035   %}
3036 
3037   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3038     MacroAssembler _masm(&cbuf);
3039     __ load_byte_map_base($dst$$Register);
3040   %}
3041 
3042   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3043     MacroAssembler _masm(&cbuf);
3044     Register dst_reg = as_Register($dst$$reg);
3045     address con = (address)$src$$constant;
3046     if (con == NULL) {
3047       ShouldNotReachHere();
3048     } else {
3049       relocInfo::relocType rtype = $src->constant_reloc();
3050       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3051       __ set_narrow_oop(dst_reg, (jobject)con);
3052     }
3053   %}
3054 
3055   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3056     MacroAssembler _masm(&cbuf);
3057     Register dst_reg = as_Register($dst$$reg);
3058     __ mov(dst_reg, zr);
3059   %}
3060 
3061   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3062     MacroAssembler _masm(&cbuf);
3063     Register dst_reg = as_Register($dst$$reg);
3064     address con = (address)$src$$constant;
3065     if (con == NULL) {
3066       ShouldNotReachHere();
3067     } else {
3068       relocInfo::relocType rtype = $src->constant_reloc();
3069       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3070       __ set_narrow_klass(dst_reg, (Klass *)con);
3071     }
3072   %}
3073 
3074   // arithmetic encodings
3075 
3076   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3077     MacroAssembler _masm(&cbuf);
3078     Register dst_reg = as_Register($dst$$reg);
3079     Register src_reg = as_Register($src1$$reg);
3080     int32_t con = (int32_t)$src2$$constant;
3081     // add has primary == 0, subtract has primary == 1
3082     if ($primary) { con = -con; }
3083     if (con < 0) {
3084       __ subw(dst_reg, src_reg, -con);
3085     } else {
3086       __ addw(dst_reg, src_reg, con);
3087     }
3088   %}
3089 
3090   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3091     MacroAssembler _masm(&cbuf);
3092     Register dst_reg = as_Register($dst$$reg);
3093     Register src_reg = as_Register($src1$$reg);
3094     int32_t con = (int32_t)$src2$$constant;
3095     // add has primary == 0, subtract has primary == 1
3096     if ($primary) { con = -con; }
3097     if (con < 0) {
3098       __ sub(dst_reg, src_reg, -con);
3099     } else {
3100       __ add(dst_reg, src_reg, con);
3101     }
3102   %}
3103 
3104   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3105     MacroAssembler _masm(&cbuf);
3106    Register dst_reg = as_Register($dst$$reg);
3107    Register src1_reg = as_Register($src1$$reg);
3108    Register src2_reg = as_Register($src2$$reg);
3109     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3110   %}
3111 
3112   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3113     MacroAssembler _masm(&cbuf);
3114    Register dst_reg = as_Register($dst$$reg);
3115    Register src1_reg = as_Register($src1$$reg);
3116    Register src2_reg = as_Register($src2$$reg);
3117     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3118   %}
3119 
3120   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3121     MacroAssembler _masm(&cbuf);
3122    Register dst_reg = as_Register($dst$$reg);
3123    Register src1_reg = as_Register($src1$$reg);
3124    Register src2_reg = as_Register($src2$$reg);
3125     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3126   %}
3127 
3128   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3129     MacroAssembler _masm(&cbuf);
3130    Register dst_reg = as_Register($dst$$reg);
3131    Register src1_reg = as_Register($src1$$reg);
3132    Register src2_reg = as_Register($src2$$reg);
3133     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3134   %}
3135 
3136   // compare instruction encodings
3137 
3138   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3139     MacroAssembler _masm(&cbuf);
3140     Register reg1 = as_Register($src1$$reg);
3141     Register reg2 = as_Register($src2$$reg);
3142     __ cmpw(reg1, reg2);
3143   %}
3144 
3145   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3146     MacroAssembler _masm(&cbuf);
3147     Register reg = as_Register($src1$$reg);
3148     int32_t val = $src2$$constant;
3149     if (val >= 0) {
3150       __ subsw(zr, reg, val);
3151     } else {
3152       __ addsw(zr, reg, -val);
3153     }
3154   %}
3155 
3156   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3157     MacroAssembler _masm(&cbuf);
3158     Register reg1 = as_Register($src1$$reg);
3159     u_int32_t val = (u_int32_t)$src2$$constant;
3160     __ movw(rscratch1, val);
3161     __ cmpw(reg1, rscratch1);
3162   %}
3163 
3164   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3165     MacroAssembler _masm(&cbuf);
3166     Register reg1 = as_Register($src1$$reg);
3167     Register reg2 = as_Register($src2$$reg);
3168     __ cmp(reg1, reg2);
3169   %}
3170 
3171   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3172     MacroAssembler _masm(&cbuf);
3173     Register reg = as_Register($src1$$reg);
3174     int64_t val = $src2$$constant;
3175     if (val >= 0) {
3176       __ subs(zr, reg, val);
3177     } else if (val != -val) {
3178       __ adds(zr, reg, -val);
3179     } else {
3180     // aargh, Long.MIN_VALUE is a special case
3181       __ orr(rscratch1, zr, (u_int64_t)val);
3182       __ subs(zr, reg, rscratch1);
3183     }
3184   %}
3185 
3186   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3187     MacroAssembler _masm(&cbuf);
3188     Register reg1 = as_Register($src1$$reg);
3189     u_int64_t val = (u_int64_t)$src2$$constant;
3190     __ mov(rscratch1, val);
3191     __ cmp(reg1, rscratch1);
3192   %}
3193 
3194   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3195     MacroAssembler _masm(&cbuf);
3196     Register reg1 = as_Register($src1$$reg);
3197     Register reg2 = as_Register($src2$$reg);
3198     __ cmp(reg1, reg2);
3199   %}
3200 
3201   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3202     MacroAssembler _masm(&cbuf);
3203     Register reg1 = as_Register($src1$$reg);
3204     Register reg2 = as_Register($src2$$reg);
3205     __ cmpw(reg1, reg2);
3206   %}
3207 
3208   enc_class aarch64_enc_testp(iRegP src) %{
3209     MacroAssembler _masm(&cbuf);
3210     Register reg = as_Register($src$$reg);
3211     __ cmp(reg, zr);
3212   %}
3213 
3214   enc_class aarch64_enc_testn(iRegN src) %{
3215     MacroAssembler _masm(&cbuf);
3216     Register reg = as_Register($src$$reg);
3217     __ cmpw(reg, zr);
3218   %}
3219 
3220   enc_class aarch64_enc_b(label lbl) %{
3221     MacroAssembler _masm(&cbuf);
3222     Label *L = $lbl$$label;
3223     __ b(*L);
3224   %}
3225 
3226   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3227     MacroAssembler _masm(&cbuf);
3228     Label *L = $lbl$$label;
3229     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3230   %}
3231 
3232   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3233     MacroAssembler _masm(&cbuf);
3234     Label *L = $lbl$$label;
3235     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3236   %}
3237 
3238   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3239   %{
3240      Register sub_reg = as_Register($sub$$reg);
3241      Register super_reg = as_Register($super$$reg);
3242      Register temp_reg = as_Register($temp$$reg);
3243      Register result_reg = as_Register($result$$reg);
3244 
3245      Label miss;
3246      MacroAssembler _masm(&cbuf);
3247      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3248                                      NULL, &miss,
3249                                      /*set_cond_codes:*/ true);
3250      if ($primary) {
3251        __ mov(result_reg, zr);
3252      }
3253      __ bind(miss);
3254   %}
3255 
3256   enc_class aarch64_enc_java_static_call(method meth) %{
3257     MacroAssembler _masm(&cbuf);
3258 
3259     address addr = (address)$meth$$method;
3260     address call;
3261     if (!_method) {
3262       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3263       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3264       if (call == NULL) {
3265         ciEnv::current()->record_failure("CodeCache is full");
3266         return;
3267       }
3268     } else {
3269       int method_index = resolved_method_index(cbuf);
3270       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3271                                                   : static_call_Relocation::spec(method_index);
3272       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3273       if (call == NULL) {
3274         ciEnv::current()->record_failure("CodeCache is full");
3275         return;
3276       }
3277       // Emit stub for static call
3278       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3279       if (stub == NULL) {
3280         ciEnv::current()->record_failure("CodeCache is full");
3281         return;
3282       }
3283     }
3284   %}
3285 
3286   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3287     MacroAssembler _masm(&cbuf);
3288     int method_index = resolved_method_index(cbuf);
3289     address call = __ ic_call((address)$meth$$method, method_index);
3290     if (call == NULL) {
3291       ciEnv::current()->record_failure("CodeCache is full");
3292       return;
3293     }
3294   %}
3295 
3296   enc_class aarch64_enc_call_epilog() %{
3297     MacroAssembler _masm(&cbuf);
3298     if (VerifyStackAtCalls) {
3299       // Check that stack depth is unchanged: find majik cookie on stack
3300       __ call_Unimplemented();
3301     }
3302   %}
3303 
3304   enc_class aarch64_enc_java_to_runtime(method meth) %{
3305     MacroAssembler _masm(&cbuf);
3306 
3307     // some calls to generated routines (arraycopy code) are scheduled
3308     // by C2 as runtime calls. if so we can call them using a br (they
3309     // will be in a reachable segment) otherwise we have to use a blr
3310     // which loads the absolute address into a register.
3311     address entry = (address)$meth$$method;
3312     CodeBlob *cb = CodeCache::find_blob(entry);
3313     if (cb) {
3314       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3315       if (call == NULL) {
3316         ciEnv::current()->record_failure("CodeCache is full");
3317         return;
3318       }
3319     } else {
3320       Label retaddr;
3321       __ adr(rscratch2, retaddr);
3322       __ lea(rscratch1, RuntimeAddress(entry));
3323       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3324       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3325       __ blr(rscratch1);
3326       __ bind(retaddr);
3327       __ add(sp, sp, 2 * wordSize);
3328     }
3329   %}
3330 
3331   enc_class aarch64_enc_rethrow() %{
3332     MacroAssembler _masm(&cbuf);
3333     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3334   %}
3335 
3336   enc_class aarch64_enc_ret() %{
3337     MacroAssembler _masm(&cbuf);
3338     __ ret(lr);
3339   %}
3340 
3341   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3342     MacroAssembler _masm(&cbuf);
3343     Register target_reg = as_Register($jump_target$$reg);
3344     __ br(target_reg);
3345   %}
3346 
3347   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3348     MacroAssembler _masm(&cbuf);
3349     Register target_reg = as_Register($jump_target$$reg);
3350     // exception oop should be in r0
3351     // ret addr has been popped into lr
3352     // callee expects it in r3
3353     __ mov(r3, lr);
3354     __ br(target_reg);
3355   %}
3356 
3357   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3358     MacroAssembler _masm(&cbuf);
3359     Register oop = as_Register($object$$reg);
3360     Register box = as_Register($box$$reg);
3361     Register disp_hdr = as_Register($tmp$$reg);
3362     Register tmp = as_Register($tmp2$$reg);
3363     Label cont;
3364     Label object_has_monitor;
3365     Label cas_failed;
3366 
3367     assert_different_registers(oop, box, tmp, disp_hdr);
3368 
3369     // Load markOop from object into displaced_header.
3370     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3371 
3372     // Always do locking in runtime.
3373     if (EmitSync & 0x01) {
3374       __ cmp(oop, zr);
3375       return;
3376     }
3377 
3378     if (UseBiasedLocking && !UseOptoBiasInlining) {
3379       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3380     }
3381 
3382     // Check for existing monitor
3383     if ((EmitSync & 0x02) == 0) {
3384       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3385     }
3386 
3387     // Set tmp to be (markOop of object | UNLOCK_VALUE).
3388     __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
3389 
3390     // Initialize the box. (Must happen before we update the object mark!)
3391     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3392 
3393     // Compare object markOop with an unlocked value (tmp) and if
3394     // equal exchange the stack address of our box with object markOop.
3395     // On failure disp_hdr contains the possibly locked markOop.
3396     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3397                /*release*/ true, /*weak*/ false, disp_hdr);
3398     __ br(Assembler::EQ, cont);
3399 
3400     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3401 
3402     // If the compare-and-exchange succeeded, then we found an unlocked
3403     // object, will have now locked it will continue at label cont
3404 
3405     __ bind(cas_failed);
3406     // We did not see an unlocked object so try the fast recursive case.
3407 
3408     // Check if the owner is self by comparing the value in the
3409     // markOop of object (disp_hdr) with the stack pointer.
3410     __ mov(rscratch1, sp);
3411     __ sub(disp_hdr, disp_hdr, rscratch1);
3412     __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
3413     // If condition is true we are cont and hence we can store 0 as the
3414     // displaced header in the box, which indicates that it is a recursive lock.
3415     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3416     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3417 
3418     if ((EmitSync & 0x02) == 0) {
3419       __ b(cont);
3420 
3421       // Handle existing monitor.
3422       __ bind(object_has_monitor);
3423       // The object's monitor m is unlocked iff m->owner == NULL,
3424       // otherwise m->owner may contain a thread or a stack address.
3425       //
3426       // Try to CAS m->owner from NULL to current thread.
3427       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3428     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3429                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3430 
3431       // Store a non-null value into the box to avoid looking like a re-entrant
3432       // lock. The fast-path monitor unlock code checks for
3433       // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
3434       // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
3435       __ mov(tmp, (address)markOopDesc::unused_mark());
3436       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3437     }
3438 
3439     __ bind(cont);
3440     // flag == EQ indicates success
3441     // flag == NE indicates failure
3442   %}
3443 
3444   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3445     MacroAssembler _masm(&cbuf);
3446     Register oop = as_Register($object$$reg);
3447     Register box = as_Register($box$$reg);
3448     Register disp_hdr = as_Register($tmp$$reg);
3449     Register tmp = as_Register($tmp2$$reg);
3450     Label cont;
3451     Label object_has_monitor;
3452 
3453     assert_different_registers(oop, box, tmp, disp_hdr);
3454 
3455     // Always do locking in runtime.
3456     if (EmitSync & 0x01) {
3457       __ cmp(oop, zr); // Oop can't be 0 here => always false.
3458       return;
3459     }
3460 
3461     if (UseBiasedLocking && !UseOptoBiasInlining) {
3462       __ biased_locking_exit(oop, tmp, cont);
3463     }
3464 
3465     // Find the lock address and load the displaced header from the stack.
3466     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3467 
3468     // If the displaced header is 0, we have a recursive unlock.
3469     __ cmp(disp_hdr, zr);
3470     __ br(Assembler::EQ, cont);
3471 
3472     // Handle existing monitor.
3473     if ((EmitSync & 0x02) == 0) {
3474       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3475       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3476     }
3477 
3478     // Check if it is still a light weight lock, this is is true if we
3479     // see the stack address of the basicLock in the markOop of the
3480     // object.
3481 
3482     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3483                /*release*/ true, /*weak*/ false, tmp);
3484     __ b(cont);
3485 
3486     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3487 
3488     // Handle existing monitor.
3489     if ((EmitSync & 0x02) == 0) {
3490       __ bind(object_has_monitor);
3491       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3492       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3493       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3494       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3495       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3496       __ cmp(rscratch1, zr); // Sets flags for result
3497       __ br(Assembler::NE, cont);
3498 
3499       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3500       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3501       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3502       __ cmp(rscratch1, zr); // Sets flags for result
3503       __ cbnz(rscratch1, cont);
3504       // need a release store here
3505       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3506       __ stlr(zr, tmp); // set unowned
3507     }
3508 
3509     __ bind(cont);
3510     // flag == EQ indicates success
3511     // flag == NE indicates failure
3512   %}
3513 
3514 %}
3515 
3516 //----------FRAME--------------------------------------------------------------
3517 // Definition of frame structure and management information.
3518 //
3519 //  S T A C K   L A Y O U T    Allocators stack-slot number
3520 //                             |   (to get allocators register number
3521 //  G  Owned by    |        |  v    add OptoReg::stack0())
3522 //  r   CALLER     |        |
3523 //  o     |        +--------+      pad to even-align allocators stack-slot
3524 //  w     V        |  pad0  |        numbers; owned by CALLER
3525 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3526 //  h     ^        |   in   |  5
3527 //        |        |  args  |  4   Holes in incoming args owned by SELF
3528 //  |     |        |        |  3
3529 //  |     |        +--------+
3530 //  V     |        | old out|      Empty on Intel, window on Sparc
3531 //        |    old |preserve|      Must be even aligned.
3532 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3533 //        |        |   in   |  3   area for Intel ret address
3534 //     Owned by    |preserve|      Empty on Sparc.
3535 //       SELF      +--------+
3536 //        |        |  pad2  |  2   pad to align old SP
3537 //        |        +--------+  1
3538 //        |        | locks  |  0
3539 //        |        +--------+----> OptoReg::stack0(), even aligned
3540 //        |        |  pad1  | 11   pad to align new SP
3541 //        |        +--------+
3542 //        |        |        | 10
3543 //        |        | spills |  9   spills
3544 //        V        |        |  8   (pad0 slot for callee)
3545 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3546 //        ^        |  out   |  7
3547 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3548 //     Owned by    +--------+
3549 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3550 //        |    new |preserve|      Must be even-aligned.
3551 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3552 //        |        |        |
3553 //
3554 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3555 //         known from SELF's arguments and the Java calling convention.
3556 //         Region 6-7 is determined per call site.
3557 // Note 2: If the calling convention leaves holes in the incoming argument
3558 //         area, those holes are owned by SELF.  Holes in the outgoing area
3559 //         are owned by the CALLEE.  Holes should not be nessecary in the
3560 //         incoming area, as the Java calling convention is completely under
3561 //         the control of the AD file.  Doubles can be sorted and packed to
3562 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3563 //         varargs C calling conventions.
3564 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3565 //         even aligned with pad0 as needed.
3566 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3567 //           (the latter is true on Intel but is it false on AArch64?)
3568 //         region 6-11 is even aligned; it may be padded out more so that
3569 //         the region from SP to FP meets the minimum stack alignment.
3570 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3571 //         alignment.  Region 11, pad1, may be dynamically extended so that
3572 //         SP meets the minimum alignment.
3573 
3574 frame %{
3575   // What direction does stack grow in (assumed to be same for C & Java)
3576   stack_direction(TOWARDS_LOW);
3577 
3578   // These three registers define part of the calling convention
3579   // between compiled code and the interpreter.
3580 
3581   // Inline Cache Register or methodOop for I2C.
3582   inline_cache_reg(R12);
3583 
3584   // Method Oop Register when calling interpreter.
3585   interpreter_method_oop_reg(R12);
3586 
3587   // Number of stack slots consumed by locking an object
3588   sync_stack_slots(2);
3589 
3590   // Compiled code's Frame Pointer
3591   frame_pointer(R31);
3592 
3593   // Interpreter stores its frame pointer in a register which is
3594   // stored to the stack by I2CAdaptors.
3595   // I2CAdaptors convert from interpreted java to compiled java.
3596   interpreter_frame_pointer(R29);
3597 
3598   // Stack alignment requirement
3599   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3600 
3601   // Number of stack slots between incoming argument block and the start of
3602   // a new frame.  The PROLOG must add this many slots to the stack.  The
3603   // EPILOG must remove this many slots. aarch64 needs two slots for
3604   // return address and fp.
3605   // TODO think this is correct but check
3606   in_preserve_stack_slots(4);
3607 
3608   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3609   // for calls to C.  Supports the var-args backing area for register parms.
3610   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3611 
3612   // The after-PROLOG location of the return address.  Location of
3613   // return address specifies a type (REG or STACK) and a number
3614   // representing the register number (i.e. - use a register name) or
3615   // stack slot.
3616   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3617   // Otherwise, it is above the locks and verification slot and alignment word
3618   // TODO this may well be correct but need to check why that - 2 is there
3619   // ppc port uses 0 but we definitely need to allow for fixed_slots
3620   // which folds in the space used for monitors
3621   return_addr(STACK - 2 +
3622               align_up((Compile::current()->in_preserve_stack_slots() +
3623                         Compile::current()->fixed_slots()),
3624                        stack_alignment_in_slots()));
3625 
3626   // Body of function which returns an integer array locating
3627   // arguments either in registers or in stack slots.  Passed an array
3628   // of ideal registers called "sig" and a "length" count.  Stack-slot
3629   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3630   // arguments for a CALLEE.  Incoming stack arguments are
3631   // automatically biased by the preserve_stack_slots field above.
3632 
3633   calling_convention
3634   %{
3635     // No difference between ingoing/outgoing just pass false
3636     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3637   %}
3638 
3639   c_calling_convention
3640   %{
3641     // This is obviously always outgoing
3642     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3643   %}
3644 
3645   // Location of compiled Java return values.  Same as C for now.
3646   return_value
3647   %{
3648     // TODO do we allow ideal_reg == Op_RegN???
3649     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3650            "only return normal values");
3651 
3652     static const int lo[Op_RegL + 1] = { // enum name
3653       0,                                 // Op_Node
3654       0,                                 // Op_Set
3655       R0_num,                            // Op_RegN
3656       R0_num,                            // Op_RegI
3657       R0_num,                            // Op_RegP
3658       V0_num,                            // Op_RegF
3659       V0_num,                            // Op_RegD
3660       R0_num                             // Op_RegL
3661     };
3662 
3663     static const int hi[Op_RegL + 1] = { // enum name
3664       0,                                 // Op_Node
3665       0,                                 // Op_Set
3666       OptoReg::Bad,                       // Op_RegN
3667       OptoReg::Bad,                      // Op_RegI
3668       R0_H_num,                          // Op_RegP
3669       OptoReg::Bad,                      // Op_RegF
3670       V0_H_num,                          // Op_RegD
3671       R0_H_num                           // Op_RegL
3672     };
3673 
3674     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3675   %}
3676 %}
3677 
3678 //----------ATTRIBUTES---------------------------------------------------------
3679 //----------Operand Attributes-------------------------------------------------
3680 op_attrib op_cost(1);        // Required cost attribute
3681 
3682 //----------Instruction Attributes---------------------------------------------
3683 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3684 ins_attrib ins_size(32);        // Required size attribute (in bits)
3685 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3686                                 // a non-matching short branch variant
3687                                 // of some long branch?
3688 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3689                                 // be a power of 2) specifies the
3690                                 // alignment that some part of the
3691                                 // instruction (not necessarily the
3692                                 // start) requires.  If > 1, a
3693                                 // compute_padding() function must be
3694                                 // provided for the instruction
3695 
3696 //----------OPERANDS-----------------------------------------------------------
3697 // Operand definitions must precede instruction definitions for correct parsing
3698 // in the ADLC because operands constitute user defined types which are used in
3699 // instruction definitions.
3700 
3701 //----------Simple Operands----------------------------------------------------
3702 
3703 // Integer operands 32 bit
3704 // 32 bit immediate
3705 operand immI()
3706 %{
3707   match(ConI);
3708 
3709   op_cost(0);
3710   format %{ %}
3711   interface(CONST_INTER);
3712 %}
3713 
3714 // 32 bit zero
3715 operand immI0()
3716 %{
3717   predicate(n->get_int() == 0);
3718   match(ConI);
3719 
3720   op_cost(0);
3721   format %{ %}
3722   interface(CONST_INTER);
3723 %}
3724 
3725 // 32 bit unit increment
3726 operand immI_1()
3727 %{
3728   predicate(n->get_int() == 1);
3729   match(ConI);
3730 
3731   op_cost(0);
3732   format %{ %}
3733   interface(CONST_INTER);
3734 %}
3735 
3736 // 32 bit unit decrement
3737 operand immI_M1()
3738 %{
3739   predicate(n->get_int() == -1);
3740   match(ConI);
3741 
3742   op_cost(0);
3743   format %{ %}
3744   interface(CONST_INTER);
3745 %}
3746 
3747 // Shift values for add/sub extension shift
3748 operand immIExt()
3749 %{
3750   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3751   match(ConI);
3752 
3753   op_cost(0);
3754   format %{ %}
3755   interface(CONST_INTER);
3756 %}
3757 
3758 operand immI_le_4()
3759 %{
3760   predicate(n->get_int() <= 4);
3761   match(ConI);
3762 
3763   op_cost(0);
3764   format %{ %}
3765   interface(CONST_INTER);
3766 %}
3767 
3768 operand immI_31()
3769 %{
3770   predicate(n->get_int() == 31);
3771   match(ConI);
3772 
3773   op_cost(0);
3774   format %{ %}
3775   interface(CONST_INTER);
3776 %}
3777 
3778 operand immI_8()
3779 %{
3780   predicate(n->get_int() == 8);
3781   match(ConI);
3782 
3783   op_cost(0);
3784   format %{ %}
3785   interface(CONST_INTER);
3786 %}
3787 
3788 operand immI_16()
3789 %{
3790   predicate(n->get_int() == 16);
3791   match(ConI);
3792 
3793   op_cost(0);
3794   format %{ %}
3795   interface(CONST_INTER);
3796 %}
3797 
3798 operand immI_24()
3799 %{
3800   predicate(n->get_int() == 24);
3801   match(ConI);
3802 
3803   op_cost(0);
3804   format %{ %}
3805   interface(CONST_INTER);
3806 %}
3807 
3808 operand immI_32()
3809 %{
3810   predicate(n->get_int() == 32);
3811   match(ConI);
3812 
3813   op_cost(0);
3814   format %{ %}
3815   interface(CONST_INTER);
3816 %}
3817 
3818 operand immI_48()
3819 %{
3820   predicate(n->get_int() == 48);
3821   match(ConI);
3822 
3823   op_cost(0);
3824   format %{ %}
3825   interface(CONST_INTER);
3826 %}
3827 
3828 operand immI_56()
3829 %{
3830   predicate(n->get_int() == 56);
3831   match(ConI);
3832 
3833   op_cost(0);
3834   format %{ %}
3835   interface(CONST_INTER);
3836 %}
3837 
3838 operand immI_63()
3839 %{
3840   predicate(n->get_int() == 63);
3841   match(ConI);
3842 
3843   op_cost(0);
3844   format %{ %}
3845   interface(CONST_INTER);
3846 %}
3847 
3848 operand immI_64()
3849 %{
3850   predicate(n->get_int() == 64);
3851   match(ConI);
3852 
3853   op_cost(0);
3854   format %{ %}
3855   interface(CONST_INTER);
3856 %}
3857 
3858 operand immI_255()
3859 %{
3860   predicate(n->get_int() == 255);
3861   match(ConI);
3862 
3863   op_cost(0);
3864   format %{ %}
3865   interface(CONST_INTER);
3866 %}
3867 
3868 operand immI_65535()
3869 %{
3870   predicate(n->get_int() == 65535);
3871   match(ConI);
3872 
3873   op_cost(0);
3874   format %{ %}
3875   interface(CONST_INTER);
3876 %}
3877 
3878 operand immL_255()
3879 %{
3880   predicate(n->get_long() == 255L);
3881   match(ConL);
3882 
3883   op_cost(0);
3884   format %{ %}
3885   interface(CONST_INTER);
3886 %}
3887 
3888 operand immL_65535()
3889 %{
3890   predicate(n->get_long() == 65535L);
3891   match(ConL);
3892 
3893   op_cost(0);
3894   format %{ %}
3895   interface(CONST_INTER);
3896 %}
3897 
3898 operand immL_4294967295()
3899 %{
3900   predicate(n->get_long() == 4294967295L);
3901   match(ConL);
3902 
3903   op_cost(0);
3904   format %{ %}
3905   interface(CONST_INTER);
3906 %}
3907 
3908 operand immL_bitmask()
3909 %{
3910   predicate((n->get_long() != 0)
3911             && ((n->get_long() & 0xc000000000000000l) == 0)
3912             && is_power_of_2(n->get_long() + 1));
3913   match(ConL);
3914 
3915   op_cost(0);
3916   format %{ %}
3917   interface(CONST_INTER);
3918 %}
3919 
3920 operand immI_bitmask()
3921 %{
3922   predicate((n->get_int() != 0)
3923             && ((n->get_int() & 0xc0000000) == 0)
3924             && is_power_of_2(n->get_int() + 1));
3925   match(ConI);
3926 
3927   op_cost(0);
3928   format %{ %}
3929   interface(CONST_INTER);
3930 %}
3931 
3932 // Scale values for scaled offset addressing modes (up to long but not quad)
3933 operand immIScale()
3934 %{
3935   predicate(0 <= n->get_int() && (n->get_int() <= 3));
3936   match(ConI);
3937 
3938   op_cost(0);
3939   format %{ %}
3940   interface(CONST_INTER);
3941 %}
3942 
3943 // 26 bit signed offset -- for pc-relative branches
3944 operand immI26()
3945 %{
3946   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
3947   match(ConI);
3948 
3949   op_cost(0);
3950   format %{ %}
3951   interface(CONST_INTER);
3952 %}
3953 
3954 // 19 bit signed offset -- for pc-relative loads
3955 operand immI19()
3956 %{
3957   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
3958   match(ConI);
3959 
3960   op_cost(0);
3961   format %{ %}
3962   interface(CONST_INTER);
3963 %}
3964 
3965 // 12 bit unsigned offset -- for base plus immediate loads
3966 operand immIU12()
3967 %{
3968   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
3969   match(ConI);
3970 
3971   op_cost(0);
3972   format %{ %}
3973   interface(CONST_INTER);
3974 %}
3975 
3976 operand immLU12()
3977 %{
3978   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
3979   match(ConL);
3980 
3981   op_cost(0);
3982   format %{ %}
3983   interface(CONST_INTER);
3984 %}
3985 
3986 // Offset for scaled or unscaled immediate loads and stores
3987 operand immIOffset()
3988 %{
3989   predicate(Address::offset_ok_for_immed(n->get_int()));
3990   match(ConI);
3991 
3992   op_cost(0);
3993   format %{ %}
3994   interface(CONST_INTER);
3995 %}
3996 
3997 operand immIOffset4()
3998 %{
3999   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4000   match(ConI);
4001 
4002   op_cost(0);
4003   format %{ %}
4004   interface(CONST_INTER);
4005 %}
4006 
4007 operand immIOffset8()
4008 %{
4009   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4010   match(ConI);
4011 
4012   op_cost(0);
4013   format %{ %}
4014   interface(CONST_INTER);
4015 %}
4016 
4017 operand immIOffset16()
4018 %{
4019   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4020   match(ConI);
4021 
4022   op_cost(0);
4023   format %{ %}
4024   interface(CONST_INTER);
4025 %}
4026 
4027 operand immLoffset()
4028 %{
4029   predicate(Address::offset_ok_for_immed(n->get_long()));
4030   match(ConL);
4031 
4032   op_cost(0);
4033   format %{ %}
4034   interface(CONST_INTER);
4035 %}
4036 
4037 operand immLoffset4()
4038 %{
4039   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4040   match(ConL);
4041 
4042   op_cost(0);
4043   format %{ %}
4044   interface(CONST_INTER);
4045 %}
4046 
4047 operand immLoffset8()
4048 %{
4049   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4050   match(ConL);
4051 
4052   op_cost(0);
4053   format %{ %}
4054   interface(CONST_INTER);
4055 %}
4056 
4057 operand immLoffset16()
4058 %{
4059   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4060   match(ConL);
4061 
4062   op_cost(0);
4063   format %{ %}
4064   interface(CONST_INTER);
4065 %}
4066 
4067 // 32 bit integer valid for add sub immediate
4068 operand immIAddSub()
4069 %{
4070   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4071   match(ConI);
4072   op_cost(0);
4073   format %{ %}
4074   interface(CONST_INTER);
4075 %}
4076 
4077 // 32 bit unsigned integer valid for logical immediate
4078 // TODO -- check this is right when e.g the mask is 0x80000000
4079 operand immILog()
4080 %{
4081   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4082   match(ConI);
4083 
4084   op_cost(0);
4085   format %{ %}
4086   interface(CONST_INTER);
4087 %}
4088 
4089 // Integer operands 64 bit
4090 // 64 bit immediate
4091 operand immL()
4092 %{
4093   match(ConL);
4094 
4095   op_cost(0);
4096   format %{ %}
4097   interface(CONST_INTER);
4098 %}
4099 
4100 // 64 bit zero
4101 operand immL0()
4102 %{
4103   predicate(n->get_long() == 0);
4104   match(ConL);
4105 
4106   op_cost(0);
4107   format %{ %}
4108   interface(CONST_INTER);
4109 %}
4110 
4111 // 64 bit unit increment
4112 operand immL_1()
4113 %{
4114   predicate(n->get_long() == 1);
4115   match(ConL);
4116 
4117   op_cost(0);
4118   format %{ %}
4119   interface(CONST_INTER);
4120 %}
4121 
4122 // 64 bit unit decrement
4123 operand immL_M1()
4124 %{
4125   predicate(n->get_long() == -1);
4126   match(ConL);
4127 
4128   op_cost(0);
4129   format %{ %}
4130   interface(CONST_INTER);
4131 %}
4132 
4133 // 32 bit offset of pc in thread anchor
4134 
4135 operand immL_pc_off()
4136 %{
4137   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4138                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4139   match(ConL);
4140 
4141   op_cost(0);
4142   format %{ %}
4143   interface(CONST_INTER);
4144 %}
4145 
4146 // 64 bit integer valid for add sub immediate
4147 operand immLAddSub()
4148 %{
4149   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4150   match(ConL);
4151   op_cost(0);
4152   format %{ %}
4153   interface(CONST_INTER);
4154 %}
4155 
4156 // 64 bit integer valid for logical immediate
4157 operand immLLog()
4158 %{
4159   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4160   match(ConL);
4161   op_cost(0);
4162   format %{ %}
4163   interface(CONST_INTER);
4164 %}
4165 
4166 // Long Immediate: low 32-bit mask
4167 operand immL_32bits()
4168 %{
4169   predicate(n->get_long() == 0xFFFFFFFFL);
4170   match(ConL);
4171   op_cost(0);
4172   format %{ %}
4173   interface(CONST_INTER);
4174 %}
4175 
4176 // Pointer operands
4177 // Pointer Immediate
4178 operand immP()
4179 %{
4180   match(ConP);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 // NULL Pointer Immediate
4188 operand immP0()
4189 %{
4190   predicate(n->get_ptr() == 0);
4191   match(ConP);
4192 
4193   op_cost(0);
4194   format %{ %}
4195   interface(CONST_INTER);
4196 %}
4197 
4198 // Pointer Immediate One
4199 // this is used in object initialization (initial object header)
4200 operand immP_1()
4201 %{
4202   predicate(n->get_ptr() == 1);
4203   match(ConP);
4204 
4205   op_cost(0);
4206   format %{ %}
4207   interface(CONST_INTER);
4208 %}
4209 
4210 // Polling Page Pointer Immediate
4211 operand immPollPage()
4212 %{
4213   predicate((address)n->get_ptr() == os::get_polling_page());
4214   match(ConP);
4215 
4216   op_cost(0);
4217   format %{ %}
4218   interface(CONST_INTER);
4219 %}
4220 
4221 // Card Table Byte Map Base
4222 operand immByteMapBase()
4223 %{
4224   // Get base of card map
4225   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4226             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4227   match(ConP);
4228 
4229   op_cost(0);
4230   format %{ %}
4231   interface(CONST_INTER);
4232 %}
4233 
4234 // Pointer Immediate Minus One
4235 // this is used when we want to write the current PC to the thread anchor
4236 operand immP_M1()
4237 %{
4238   predicate(n->get_ptr() == -1);
4239   match(ConP);
4240 
4241   op_cost(0);
4242   format %{ %}
4243   interface(CONST_INTER);
4244 %}
4245 
4246 // Pointer Immediate Minus Two
4247 // this is used when we want to write the current PC to the thread anchor
4248 operand immP_M2()
4249 %{
4250   predicate(n->get_ptr() == -2);
4251   match(ConP);
4252 
4253   op_cost(0);
4254   format %{ %}
4255   interface(CONST_INTER);
4256 %}
4257 
4258 // Float and Double operands
4259 // Double Immediate
4260 operand immD()
4261 %{
4262   match(ConD);
4263   op_cost(0);
4264   format %{ %}
4265   interface(CONST_INTER);
4266 %}
4267 
4268 // Double Immediate: +0.0d
4269 operand immD0()
4270 %{
4271   predicate(jlong_cast(n->getd()) == 0);
4272   match(ConD);
4273 
4274   op_cost(0);
4275   format %{ %}
4276   interface(CONST_INTER);
4277 %}
4278 
4279 // constant 'double +0.0'.
4280 operand immDPacked()
4281 %{
4282   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4283   match(ConD);
4284   op_cost(0);
4285   format %{ %}
4286   interface(CONST_INTER);
4287 %}
4288 
4289 // Float Immediate
4290 operand immF()
4291 %{
4292   match(ConF);
4293   op_cost(0);
4294   format %{ %}
4295   interface(CONST_INTER);
4296 %}
4297 
4298 // Float Immediate: +0.0f.
4299 operand immF0()
4300 %{
4301   predicate(jint_cast(n->getf()) == 0);
4302   match(ConF);
4303 
4304   op_cost(0);
4305   format %{ %}
4306   interface(CONST_INTER);
4307 %}
4308 
4309 //
4310 operand immFPacked()
4311 %{
4312   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4313   match(ConF);
4314   op_cost(0);
4315   format %{ %}
4316   interface(CONST_INTER);
4317 %}
4318 
4319 // Narrow pointer operands
4320 // Narrow Pointer Immediate
4321 operand immN()
4322 %{
4323   match(ConN);
4324 
4325   op_cost(0);
4326   format %{ %}
4327   interface(CONST_INTER);
4328 %}
4329 
4330 // Narrow NULL Pointer Immediate
4331 operand immN0()
4332 %{
4333   predicate(n->get_narrowcon() == 0);
4334   match(ConN);
4335 
4336   op_cost(0);
4337   format %{ %}
4338   interface(CONST_INTER);
4339 %}
4340 
4341 operand immNKlass()
4342 %{
4343   match(ConNKlass);
4344 
4345   op_cost(0);
4346   format %{ %}
4347   interface(CONST_INTER);
4348 %}
4349 
4350 // Integer 32 bit Register Operands
4351 // Integer 32 bitRegister (excludes SP)
4352 operand iRegI()
4353 %{
4354   constraint(ALLOC_IN_RC(any_reg32));
4355   match(RegI);
4356   match(iRegINoSp);
4357   op_cost(0);
4358   format %{ %}
4359   interface(REG_INTER);
4360 %}
4361 
4362 // Integer 32 bit Register not Special
4363 operand iRegINoSp()
4364 %{
4365   constraint(ALLOC_IN_RC(no_special_reg32));
4366   match(RegI);
4367   op_cost(0);
4368   format %{ %}
4369   interface(REG_INTER);
4370 %}
4371 
4372 // Integer 64 bit Register Operands
4373 // Integer 64 bit Register (includes SP)
4374 operand iRegL()
4375 %{
4376   constraint(ALLOC_IN_RC(any_reg));
4377   match(RegL);
4378   match(iRegLNoSp);
4379   op_cost(0);
4380   format %{ %}
4381   interface(REG_INTER);
4382 %}
4383 
4384 // Integer 64 bit Register not Special
4385 operand iRegLNoSp()
4386 %{
4387   constraint(ALLOC_IN_RC(no_special_reg));
4388   match(RegL);
4389   match(iRegL_R0);
4390   format %{ %}
4391   interface(REG_INTER);
4392 %}
4393 
4394 // Pointer Register Operands
4395 // Pointer Register
4396 operand iRegP()
4397 %{
4398   constraint(ALLOC_IN_RC(ptr_reg));
4399   match(RegP);
4400   match(iRegPNoSp);
4401   match(iRegP_R0);
4402   //match(iRegP_R2);
4403   //match(iRegP_R4);
4404   //match(iRegP_R5);
4405   match(thread_RegP);
4406   op_cost(0);
4407   format %{ %}
4408   interface(REG_INTER);
4409 %}
4410 
4411 // Pointer 64 bit Register not Special
4412 operand iRegPNoSp()
4413 %{
4414   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4415   match(RegP);
4416   // match(iRegP);
4417   // match(iRegP_R0);
4418   // match(iRegP_R2);
4419   // match(iRegP_R4);
4420   // match(iRegP_R5);
4421   // match(thread_RegP);
4422   op_cost(0);
4423   format %{ %}
4424   interface(REG_INTER);
4425 %}
4426 
4427 // Pointer 64 bit Register R0 only
4428 operand iRegP_R0()
4429 %{
4430   constraint(ALLOC_IN_RC(r0_reg));
4431   match(RegP);
4432   // match(iRegP);
4433   match(iRegPNoSp);
4434   op_cost(0);
4435   format %{ %}
4436   interface(REG_INTER);
4437 %}
4438 
4439 // Pointer 64 bit Register R1 only
4440 operand iRegP_R1()
4441 %{
4442   constraint(ALLOC_IN_RC(r1_reg));
4443   match(RegP);
4444   // match(iRegP);
4445   match(iRegPNoSp);
4446   op_cost(0);
4447   format %{ %}
4448   interface(REG_INTER);
4449 %}
4450 
4451 // Pointer 64 bit Register R2 only
4452 operand iRegP_R2()
4453 %{
4454   constraint(ALLOC_IN_RC(r2_reg));
4455   match(RegP);
4456   // match(iRegP);
4457   match(iRegPNoSp);
4458   op_cost(0);
4459   format %{ %}
4460   interface(REG_INTER);
4461 %}
4462 
4463 // Pointer 64 bit Register R3 only
4464 operand iRegP_R3()
4465 %{
4466   constraint(ALLOC_IN_RC(r3_reg));
4467   match(RegP);
4468   // match(iRegP);
4469   match(iRegPNoSp);
4470   op_cost(0);
4471   format %{ %}
4472   interface(REG_INTER);
4473 %}
4474 
4475 // Pointer 64 bit Register R4 only
4476 operand iRegP_R4()
4477 %{
4478   constraint(ALLOC_IN_RC(r4_reg));
4479   match(RegP);
4480   // match(iRegP);
4481   match(iRegPNoSp);
4482   op_cost(0);
4483   format %{ %}
4484   interface(REG_INTER);
4485 %}
4486 
4487 // Pointer 64 bit Register R5 only
4488 operand iRegP_R5()
4489 %{
4490   constraint(ALLOC_IN_RC(r5_reg));
4491   match(RegP);
4492   // match(iRegP);
4493   match(iRegPNoSp);
4494   op_cost(0);
4495   format %{ %}
4496   interface(REG_INTER);
4497 %}
4498 
4499 // Pointer 64 bit Register R10 only
4500 operand iRegP_R10()
4501 %{
4502   constraint(ALLOC_IN_RC(r10_reg));
4503   match(RegP);
4504   // match(iRegP);
4505   match(iRegPNoSp);
4506   op_cost(0);
4507   format %{ %}
4508   interface(REG_INTER);
4509 %}
4510 
4511 // Long 64 bit Register R0 only
4512 operand iRegL_R0()
4513 %{
4514   constraint(ALLOC_IN_RC(r0_reg));
4515   match(RegL);
4516   match(iRegLNoSp);
4517   op_cost(0);
4518   format %{ %}
4519   interface(REG_INTER);
4520 %}
4521 
4522 // Long 64 bit Register R2 only
4523 operand iRegL_R2()
4524 %{
4525   constraint(ALLOC_IN_RC(r2_reg));
4526   match(RegL);
4527   match(iRegLNoSp);
4528   op_cost(0);
4529   format %{ %}
4530   interface(REG_INTER);
4531 %}
4532 
4533 // Long 64 bit Register R3 only
4534 operand iRegL_R3()
4535 %{
4536   constraint(ALLOC_IN_RC(r3_reg));
4537   match(RegL);
4538   match(iRegLNoSp);
4539   op_cost(0);
4540   format %{ %}
4541   interface(REG_INTER);
4542 %}
4543 
4544 // Long 64 bit Register R11 only
4545 operand iRegL_R11()
4546 %{
4547   constraint(ALLOC_IN_RC(r11_reg));
4548   match(RegL);
4549   match(iRegLNoSp);
4550   op_cost(0);
4551   format %{ %}
4552   interface(REG_INTER);
4553 %}
4554 
4555 // Pointer 64 bit Register FP only
4556 operand iRegP_FP()
4557 %{
4558   constraint(ALLOC_IN_RC(fp_reg));
4559   match(RegP);
4560   // match(iRegP);
4561   op_cost(0);
4562   format %{ %}
4563   interface(REG_INTER);
4564 %}
4565 
4566 // Register R0 only
4567 operand iRegI_R0()
4568 %{
4569   constraint(ALLOC_IN_RC(int_r0_reg));
4570   match(RegI);
4571   match(iRegINoSp);
4572   op_cost(0);
4573   format %{ %}
4574   interface(REG_INTER);
4575 %}
4576 
4577 // Register R2 only
4578 operand iRegI_R2()
4579 %{
4580   constraint(ALLOC_IN_RC(int_r2_reg));
4581   match(RegI);
4582   match(iRegINoSp);
4583   op_cost(0);
4584   format %{ %}
4585   interface(REG_INTER);
4586 %}
4587 
4588 // Register R3 only
4589 operand iRegI_R3()
4590 %{
4591   constraint(ALLOC_IN_RC(int_r3_reg));
4592   match(RegI);
4593   match(iRegINoSp);
4594   op_cost(0);
4595   format %{ %}
4596   interface(REG_INTER);
4597 %}
4598 
4599 
4600 // Register R4 only
4601 operand iRegI_R4()
4602 %{
4603   constraint(ALLOC_IN_RC(int_r4_reg));
4604   match(RegI);
4605   match(iRegINoSp);
4606   op_cost(0);
4607   format %{ %}
4608   interface(REG_INTER);
4609 %}
4610 
4611 
4612 // Pointer Register Operands
4613 // Narrow Pointer Register
4614 operand iRegN()
4615 %{
4616   constraint(ALLOC_IN_RC(any_reg32));
4617   match(RegN);
4618   match(iRegNNoSp);
4619   op_cost(0);
4620   format %{ %}
4621   interface(REG_INTER);
4622 %}
4623 
4624 operand iRegN_R0()
4625 %{
4626   constraint(ALLOC_IN_RC(r0_reg));
4627   match(iRegN);
4628   op_cost(0);
4629   format %{ %}
4630   interface(REG_INTER);
4631 %}
4632 
4633 operand iRegN_R2()
4634 %{
4635   constraint(ALLOC_IN_RC(r2_reg));
4636   match(iRegN);
4637   op_cost(0);
4638   format %{ %}
4639   interface(REG_INTER);
4640 %}
4641 
4642 operand iRegN_R3()
4643 %{
4644   constraint(ALLOC_IN_RC(r3_reg));
4645   match(iRegN);
4646   op_cost(0);
4647   format %{ %}
4648   interface(REG_INTER);
4649 %}
4650 
4651 // Integer 64 bit Register not Special
4652 operand iRegNNoSp()
4653 %{
4654   constraint(ALLOC_IN_RC(no_special_reg32));
4655   match(RegN);
4656   op_cost(0);
4657   format %{ %}
4658   interface(REG_INTER);
4659 %}
4660 
4661 // heap base register -- used for encoding immN0
4662 
4663 operand iRegIHeapbase()
4664 %{
4665   constraint(ALLOC_IN_RC(heapbase_reg));
4666   match(RegI);
4667   op_cost(0);
4668   format %{ %}
4669   interface(REG_INTER);
4670 %}
4671 
4672 // Float Register
4673 // Float register operands
4674 operand vRegF()
4675 %{
4676   constraint(ALLOC_IN_RC(float_reg));
4677   match(RegF);
4678 
4679   op_cost(0);
4680   format %{ %}
4681   interface(REG_INTER);
4682 %}
4683 
4684 // Double Register
4685 // Double register operands
4686 operand vRegD()
4687 %{
4688   constraint(ALLOC_IN_RC(double_reg));
4689   match(RegD);
4690 
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 operand vecD()
4697 %{
4698   constraint(ALLOC_IN_RC(vectord_reg));
4699   match(VecD);
4700 
4701   op_cost(0);
4702   format %{ %}
4703   interface(REG_INTER);
4704 %}
4705 
4706 operand vecX()
4707 %{
4708   constraint(ALLOC_IN_RC(vectorx_reg));
4709   match(VecX);
4710 
4711   op_cost(0);
4712   format %{ %}
4713   interface(REG_INTER);
4714 %}
4715 
4716 operand vRegD_V0()
4717 %{
4718   constraint(ALLOC_IN_RC(v0_reg));
4719   match(RegD);
4720   op_cost(0);
4721   format %{ %}
4722   interface(REG_INTER);
4723 %}
4724 
4725 operand vRegD_V1()
4726 %{
4727   constraint(ALLOC_IN_RC(v1_reg));
4728   match(RegD);
4729   op_cost(0);
4730   format %{ %}
4731   interface(REG_INTER);
4732 %}
4733 
4734 operand vRegD_V2()
4735 %{
4736   constraint(ALLOC_IN_RC(v2_reg));
4737   match(RegD);
4738   op_cost(0);
4739   format %{ %}
4740   interface(REG_INTER);
4741 %}
4742 
4743 operand vRegD_V3()
4744 %{
4745   constraint(ALLOC_IN_RC(v3_reg));
4746   match(RegD);
4747   op_cost(0);
4748   format %{ %}
4749   interface(REG_INTER);
4750 %}
4751 
4752 // Flags register, used as output of signed compare instructions
4753 
4754 // note that on AArch64 we also use this register as the output for
4755 // for floating point compare instructions (CmpF CmpD). this ensures
4756 // that ordered inequality tests use GT, GE, LT or LE none of which
4757 // pass through cases where the result is unordered i.e. one or both
4758 // inputs to the compare is a NaN. this means that the ideal code can
4759 // replace e.g. a GT with an LE and not end up capturing the NaN case
4760 // (where the comparison should always fail). EQ and NE tests are
4761 // always generated in ideal code so that unordered folds into the NE
4762 // case, matching the behaviour of AArch64 NE.
4763 //
4764 // This differs from x86 where the outputs of FP compares use a
4765 // special FP flags registers and where compares based on this
4766 // register are distinguished into ordered inequalities (cmpOpUCF) and
4767 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4768 // to explicitly handle the unordered case in branches. x86 also has
4769 // to include extra CMoveX rules to accept a cmpOpUCF input.
4770 
4771 operand rFlagsReg()
4772 %{
4773   constraint(ALLOC_IN_RC(int_flags));
4774   match(RegFlags);
4775 
4776   op_cost(0);
4777   format %{ "RFLAGS" %}
4778   interface(REG_INTER);
4779 %}
4780 
4781 // Flags register, used as output of unsigned compare instructions
4782 operand rFlagsRegU()
4783 %{
4784   constraint(ALLOC_IN_RC(int_flags));
4785   match(RegFlags);
4786 
4787   op_cost(0);
4788   format %{ "RFLAGSU" %}
4789   interface(REG_INTER);
4790 %}
4791 
4792 // Special Registers
4793 
4794 // Method Register
4795 operand inline_cache_RegP(iRegP reg)
4796 %{
4797   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4798   match(reg);
4799   match(iRegPNoSp);
4800   op_cost(0);
4801   format %{ %}
4802   interface(REG_INTER);
4803 %}
4804 
4805 operand interpreter_method_oop_RegP(iRegP reg)
4806 %{
4807   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4808   match(reg);
4809   match(iRegPNoSp);
4810   op_cost(0);
4811   format %{ %}
4812   interface(REG_INTER);
4813 %}
4814 
4815 // Thread Register
4816 operand thread_RegP(iRegP reg)
4817 %{
4818   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4819   match(reg);
4820   op_cost(0);
4821   format %{ %}
4822   interface(REG_INTER);
4823 %}
4824 
4825 operand lr_RegP(iRegP reg)
4826 %{
4827   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4828   match(reg);
4829   op_cost(0);
4830   format %{ %}
4831   interface(REG_INTER);
4832 %}
4833 
4834 //----------Memory Operands----------------------------------------------------
4835 
4836 operand indirect(iRegP reg)
4837 %{
4838   constraint(ALLOC_IN_RC(ptr_reg));
4839   match(reg);
4840   op_cost(0);
4841   format %{ "[$reg]" %}
4842   interface(MEMORY_INTER) %{
4843     base($reg);
4844     index(0xffffffff);
4845     scale(0x0);
4846     disp(0x0);
4847   %}
4848 %}
4849 
4850 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4851 %{
4852   constraint(ALLOC_IN_RC(ptr_reg));
4853   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4854   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4855   op_cost(0);
4856   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4857   interface(MEMORY_INTER) %{
4858     base($reg);
4859     index($ireg);
4860     scale($scale);
4861     disp(0x0);
4862   %}
4863 %}
4864 
4865 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4866 %{
4867   constraint(ALLOC_IN_RC(ptr_reg));
4868   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4869   match(AddP reg (LShiftL lreg scale));
4870   op_cost(0);
4871   format %{ "$reg, $lreg lsl($scale)" %}
4872   interface(MEMORY_INTER) %{
4873     base($reg);
4874     index($lreg);
4875     scale($scale);
4876     disp(0x0);
4877   %}
4878 %}
4879 
4880 operand indIndexI2L(iRegP reg, iRegI ireg)
4881 %{
4882   constraint(ALLOC_IN_RC(ptr_reg));
4883   match(AddP reg (ConvI2L ireg));
4884   op_cost(0);
4885   format %{ "$reg, $ireg, 0, I2L" %}
4886   interface(MEMORY_INTER) %{
4887     base($reg);
4888     index($ireg);
4889     scale(0x0);
4890     disp(0x0);
4891   %}
4892 %}
4893 
4894 operand indIndex(iRegP reg, iRegL lreg)
4895 %{
4896   constraint(ALLOC_IN_RC(ptr_reg));
4897   match(AddP reg lreg);
4898   op_cost(0);
4899   format %{ "$reg, $lreg" %}
4900   interface(MEMORY_INTER) %{
4901     base($reg);
4902     index($lreg);
4903     scale(0x0);
4904     disp(0x0);
4905   %}
4906 %}
4907 
4908 operand indOffI(iRegP reg, immIOffset off)
4909 %{
4910   constraint(ALLOC_IN_RC(ptr_reg));
4911   match(AddP reg off);
4912   op_cost(0);
4913   format %{ "[$reg, $off]" %}
4914   interface(MEMORY_INTER) %{
4915     base($reg);
4916     index(0xffffffff);
4917     scale(0x0);
4918     disp($off);
4919   %}
4920 %}
4921 
4922 operand indOffI4(iRegP reg, immIOffset4 off)
4923 %{
4924   constraint(ALLOC_IN_RC(ptr_reg));
4925   match(AddP reg off);
4926   op_cost(0);
4927   format %{ "[$reg, $off]" %}
4928   interface(MEMORY_INTER) %{
4929     base($reg);
4930     index(0xffffffff);
4931     scale(0x0);
4932     disp($off);
4933   %}
4934 %}
4935 
4936 operand indOffI8(iRegP reg, immIOffset8 off)
4937 %{
4938   constraint(ALLOC_IN_RC(ptr_reg));
4939   match(AddP reg off);
4940   op_cost(0);
4941   format %{ "[$reg, $off]" %}
4942   interface(MEMORY_INTER) %{
4943     base($reg);
4944     index(0xffffffff);
4945     scale(0x0);
4946     disp($off);
4947   %}
4948 %}
4949 
4950 operand indOffI16(iRegP reg, immIOffset16 off)
4951 %{
4952   constraint(ALLOC_IN_RC(ptr_reg));
4953   match(AddP reg off);
4954   op_cost(0);
4955   format %{ "[$reg, $off]" %}
4956   interface(MEMORY_INTER) %{
4957     base($reg);
4958     index(0xffffffff);
4959     scale(0x0);
4960     disp($off);
4961   %}
4962 %}
4963 
4964 operand indOffL(iRegP reg, immLoffset off)
4965 %{
4966   constraint(ALLOC_IN_RC(ptr_reg));
4967   match(AddP reg off);
4968   op_cost(0);
4969   format %{ "[$reg, $off]" %}
4970   interface(MEMORY_INTER) %{
4971     base($reg);
4972     index(0xffffffff);
4973     scale(0x0);
4974     disp($off);
4975   %}
4976 %}
4977 
4978 operand indOffL4(iRegP reg, immLoffset4 off)
4979 %{
4980   constraint(ALLOC_IN_RC(ptr_reg));
4981   match(AddP reg off);
4982   op_cost(0);
4983   format %{ "[$reg, $off]" %}
4984   interface(MEMORY_INTER) %{
4985     base($reg);
4986     index(0xffffffff);
4987     scale(0x0);
4988     disp($off);
4989   %}
4990 %}
4991 
4992 operand indOffL8(iRegP reg, immLoffset8 off)
4993 %{
4994   constraint(ALLOC_IN_RC(ptr_reg));
4995   match(AddP reg off);
4996   op_cost(0);
4997   format %{ "[$reg, $off]" %}
4998   interface(MEMORY_INTER) %{
4999     base($reg);
5000     index(0xffffffff);
5001     scale(0x0);
5002     disp($off);
5003   %}
5004 %}
5005 
5006 operand indOffL16(iRegP reg, immLoffset16 off)
5007 %{
5008   constraint(ALLOC_IN_RC(ptr_reg));
5009   match(AddP reg off);
5010   op_cost(0);
5011   format %{ "[$reg, $off]" %}
5012   interface(MEMORY_INTER) %{
5013     base($reg);
5014     index(0xffffffff);
5015     scale(0x0);
5016     disp($off);
5017   %}
5018 %}
5019 
5020 operand indirectN(iRegN reg)
5021 %{
5022   predicate(Universe::narrow_oop_shift() == 0);
5023   constraint(ALLOC_IN_RC(ptr_reg));
5024   match(DecodeN reg);
5025   op_cost(0);
5026   format %{ "[$reg]\t# narrow" %}
5027   interface(MEMORY_INTER) %{
5028     base($reg);
5029     index(0xffffffff);
5030     scale(0x0);
5031     disp(0x0);
5032   %}
5033 %}
5034 
5035 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5036 %{
5037   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5038   constraint(ALLOC_IN_RC(ptr_reg));
5039   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5040   op_cost(0);
5041   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5042   interface(MEMORY_INTER) %{
5043     base($reg);
5044     index($ireg);
5045     scale($scale);
5046     disp(0x0);
5047   %}
5048 %}
5049 
5050 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5051 %{
5052   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5053   constraint(ALLOC_IN_RC(ptr_reg));
5054   match(AddP (DecodeN reg) (LShiftL lreg scale));
5055   op_cost(0);
5056   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5057   interface(MEMORY_INTER) %{
5058     base($reg);
5059     index($lreg);
5060     scale($scale);
5061     disp(0x0);
5062   %}
5063 %}
5064 
5065 operand indIndexI2LN(iRegN reg, iRegI ireg)
5066 %{
5067   predicate(Universe::narrow_oop_shift() == 0);
5068   constraint(ALLOC_IN_RC(ptr_reg));
5069   match(AddP (DecodeN reg) (ConvI2L ireg));
5070   op_cost(0);
5071   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5072   interface(MEMORY_INTER) %{
5073     base($reg);
5074     index($ireg);
5075     scale(0x0);
5076     disp(0x0);
5077   %}
5078 %}
5079 
5080 operand indIndexN(iRegN reg, iRegL lreg)
5081 %{
5082   predicate(Universe::narrow_oop_shift() == 0);
5083   constraint(ALLOC_IN_RC(ptr_reg));
5084   match(AddP (DecodeN reg) lreg);
5085   op_cost(0);
5086   format %{ "$reg, $lreg\t# narrow" %}
5087   interface(MEMORY_INTER) %{
5088     base($reg);
5089     index($lreg);
5090     scale(0x0);
5091     disp(0x0);
5092   %}
5093 %}
5094 
5095 operand indOffIN(iRegN reg, immIOffset off)
5096 %{
5097   predicate(Universe::narrow_oop_shift() == 0);
5098   constraint(ALLOC_IN_RC(ptr_reg));
5099   match(AddP (DecodeN reg) off);
5100   op_cost(0);
5101   format %{ "[$reg, $off]\t# narrow" %}
5102   interface(MEMORY_INTER) %{
5103     base($reg);
5104     index(0xffffffff);
5105     scale(0x0);
5106     disp($off);
5107   %}
5108 %}
5109 
5110 operand indOffLN(iRegN reg, immLoffset off)
5111 %{
5112   predicate(Universe::narrow_oop_shift() == 0);
5113   constraint(ALLOC_IN_RC(ptr_reg));
5114   match(AddP (DecodeN reg) off);
5115   op_cost(0);
5116   format %{ "[$reg, $off]\t# narrow" %}
5117   interface(MEMORY_INTER) %{
5118     base($reg);
5119     index(0xffffffff);
5120     scale(0x0);
5121     disp($off);
5122   %}
5123 %}
5124 
5125 
5126 
5127 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5128 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5129 %{
5130   constraint(ALLOC_IN_RC(ptr_reg));
5131   match(AddP reg off);
5132   op_cost(0);
5133   format %{ "[$reg, $off]" %}
5134   interface(MEMORY_INTER) %{
5135     base($reg);
5136     index(0xffffffff);
5137     scale(0x0);
5138     disp($off);
5139   %}
5140 %}
5141 
5142 //----------Special Memory Operands--------------------------------------------
5143 // Stack Slot Operand - This operand is used for loading and storing temporary
5144 //                      values on the stack where a match requires a value to
5145 //                      flow through memory.
5146 operand stackSlotP(sRegP reg)
5147 %{
5148   constraint(ALLOC_IN_RC(stack_slots));
5149   op_cost(100);
5150   // No match rule because this operand is only generated in matching
5151   // match(RegP);
5152   format %{ "[$reg]" %}
5153   interface(MEMORY_INTER) %{
5154     base(0x1e);  // RSP
5155     index(0x0);  // No Index
5156     scale(0x0);  // No Scale
5157     disp($reg);  // Stack Offset
5158   %}
5159 %}
5160 
5161 operand stackSlotI(sRegI reg)
5162 %{
5163   constraint(ALLOC_IN_RC(stack_slots));
5164   // No match rule because this operand is only generated in matching
5165   // match(RegI);
5166   format %{ "[$reg]" %}
5167   interface(MEMORY_INTER) %{
5168     base(0x1e);  // RSP
5169     index(0x0);  // No Index
5170     scale(0x0);  // No Scale
5171     disp($reg);  // Stack Offset
5172   %}
5173 %}
5174 
5175 operand stackSlotF(sRegF reg)
5176 %{
5177   constraint(ALLOC_IN_RC(stack_slots));
5178   // No match rule because this operand is only generated in matching
5179   // match(RegF);
5180   format %{ "[$reg]" %}
5181   interface(MEMORY_INTER) %{
5182     base(0x1e);  // RSP
5183     index(0x0);  // No Index
5184     scale(0x0);  // No Scale
5185     disp($reg);  // Stack Offset
5186   %}
5187 %}
5188 
5189 operand stackSlotD(sRegD reg)
5190 %{
5191   constraint(ALLOC_IN_RC(stack_slots));
5192   // No match rule because this operand is only generated in matching
5193   // match(RegD);
5194   format %{ "[$reg]" %}
5195   interface(MEMORY_INTER) %{
5196     base(0x1e);  // RSP
5197     index(0x0);  // No Index
5198     scale(0x0);  // No Scale
5199     disp($reg);  // Stack Offset
5200   %}
5201 %}
5202 
5203 operand stackSlotL(sRegL reg)
5204 %{
5205   constraint(ALLOC_IN_RC(stack_slots));
5206   // No match rule because this operand is only generated in matching
5207   // match(RegL);
5208   format %{ "[$reg]" %}
5209   interface(MEMORY_INTER) %{
5210     base(0x1e);  // RSP
5211     index(0x0);  // No Index
5212     scale(0x0);  // No Scale
5213     disp($reg);  // Stack Offset
5214   %}
5215 %}
5216 
5217 // Operands for expressing Control Flow
5218 // NOTE: Label is a predefined operand which should not be redefined in
5219 //       the AD file. It is generically handled within the ADLC.
5220 
5221 //----------Conditional Branch Operands----------------------------------------
5222 // Comparison Op  - This is the operation of the comparison, and is limited to
5223 //                  the following set of codes:
5224 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5225 //
5226 // Other attributes of the comparison, such as unsignedness, are specified
5227 // by the comparison instruction that sets a condition code flags register.
5228 // That result is represented by a flags operand whose subtype is appropriate
5229 // to the unsignedness (etc.) of the comparison.
5230 //
5231 // Later, the instruction which matches both the Comparison Op (a Bool) and
5232 // the flags (produced by the Cmp) specifies the coding of the comparison op
5233 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5234 
5235 // used for signed integral comparisons and fp comparisons
5236 
5237 operand cmpOp()
5238 %{
5239   match(Bool);
5240 
5241   format %{ "" %}
5242   interface(COND_INTER) %{
5243     equal(0x0, "eq");
5244     not_equal(0x1, "ne");
5245     less(0xb, "lt");
5246     greater_equal(0xa, "ge");
5247     less_equal(0xd, "le");
5248     greater(0xc, "gt");
5249     overflow(0x6, "vs");
5250     no_overflow(0x7, "vc");
5251   %}
5252 %}
5253 
5254 // used for unsigned integral comparisons
5255 
5256 operand cmpOpU()
5257 %{
5258   match(Bool);
5259 
5260   format %{ "" %}
5261   interface(COND_INTER) %{
5262     equal(0x0, "eq");
5263     not_equal(0x1, "ne");
5264     less(0x3, "lo");
5265     greater_equal(0x2, "hs");
5266     less_equal(0x9, "ls");
5267     greater(0x8, "hi");
5268     overflow(0x6, "vs");
5269     no_overflow(0x7, "vc");
5270   %}
5271 %}
5272 
5273 // used for certain integral comparisons which can be
5274 // converted to cbxx or tbxx instructions
5275 
5276 operand cmpOpEqNe()
5277 %{
5278   match(Bool);
5279   match(CmpOp);
5280   op_cost(0);
5281   predicate(n->as_Bool()->_test._test == BoolTest::ne
5282             || n->as_Bool()->_test._test == BoolTest::eq);
5283 
5284   format %{ "" %}
5285   interface(COND_INTER) %{
5286     equal(0x0, "eq");
5287     not_equal(0x1, "ne");
5288     less(0xb, "lt");
5289     greater_equal(0xa, "ge");
5290     less_equal(0xd, "le");
5291     greater(0xc, "gt");
5292     overflow(0x6, "vs");
5293     no_overflow(0x7, "vc");
5294   %}
5295 %}
5296 
5297 // used for certain integral comparisons which can be
5298 // converted to cbxx or tbxx instructions
5299 
5300 operand cmpOpLtGe()
5301 %{
5302   match(Bool);
5303   match(CmpOp);
5304   op_cost(0);
5305 
5306   predicate(n->as_Bool()->_test._test == BoolTest::lt
5307             || n->as_Bool()->_test._test == BoolTest::ge);
5308 
5309   format %{ "" %}
5310   interface(COND_INTER) %{
5311     equal(0x0, "eq");
5312     not_equal(0x1, "ne");
5313     less(0xb, "lt");
5314     greater_equal(0xa, "ge");
5315     less_equal(0xd, "le");
5316     greater(0xc, "gt");
5317     overflow(0x6, "vs");
5318     no_overflow(0x7, "vc");
5319   %}
5320 %}
5321 
5322 // used for certain unsigned integral comparisons which can be
5323 // converted to cbxx or tbxx instructions
5324 
5325 operand cmpOpUEqNeLtGe()
5326 %{
5327   match(Bool);
5328   match(CmpOp);
5329   op_cost(0);
5330 
5331   predicate(n->as_Bool()->_test._test == BoolTest::eq
5332             || n->as_Bool()->_test._test == BoolTest::ne
5333             || n->as_Bool()->_test._test == BoolTest::lt
5334             || n->as_Bool()->_test._test == BoolTest::ge);
5335 
5336   format %{ "" %}
5337   interface(COND_INTER) %{
5338     equal(0x0, "eq");
5339     not_equal(0x1, "ne");
5340     less(0xb, "lt");
5341     greater_equal(0xa, "ge");
5342     less_equal(0xd, "le");
5343     greater(0xc, "gt");
5344     overflow(0x6, "vs");
5345     no_overflow(0x7, "vc");
5346   %}
5347 %}
5348 
5349 // Special operand allowing long args to int ops to be truncated for free
5350 
5351 operand iRegL2I(iRegL reg) %{
5352 
5353   op_cost(0);
5354 
5355   match(ConvL2I reg);
5356 
5357   format %{ "l2i($reg)" %}
5358 
5359   interface(REG_INTER)
5360 %}
5361 
5362 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5363 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5364 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5365 
5366 //----------OPERAND CLASSES----------------------------------------------------
5367 // Operand Classes are groups of operands that are used as to simplify
5368 // instruction definitions by not requiring the AD writer to specify
5369 // separate instructions for every form of operand when the
5370 // instruction accepts multiple operand types with the same basic
5371 // encoding and format. The classic case of this is memory operands.
5372 
5373 // memory is used to define read/write location for load/store
5374 // instruction defs. we can turn a memory op into an Address
5375 
5376 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5377                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5378 
5379 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5380 // operations. it allows the src to be either an iRegI or a (ConvL2I
5381 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5382 // can be elided because the 32-bit instruction will just employ the
5383 // lower 32 bits anyway.
5384 //
5385 // n.b. this does not elide all L2I conversions. if the truncated
5386 // value is consumed by more than one operation then the ConvL2I
5387 // cannot be bundled into the consuming nodes so an l2i gets planted
5388 // (actually a movw $dst $src) and the downstream instructions consume
5389 // the result of the l2i as an iRegI input. That's a shame since the
5390 // movw is actually redundant but its not too costly.
5391 
5392 opclass iRegIorL2I(iRegI, iRegL2I);
5393 
5394 //----------PIPELINE-----------------------------------------------------------
5395 // Rules which define the behavior of the target architectures pipeline.
5396 
5397 // For specific pipelines, eg A53, define the stages of that pipeline
5398 //pipe_desc(ISS, EX1, EX2, WR);
5399 #define ISS S0
5400 #define EX1 S1
5401 #define EX2 S2
5402 #define WR  S3
5403 
5404 // Integer ALU reg operation
5405 pipeline %{
5406 
5407 attributes %{
5408   // ARM instructions are of fixed length
5409   fixed_size_instructions;        // Fixed size instructions TODO does
5410   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5411   // ARM instructions come in 32-bit word units
5412   instruction_unit_size = 4;         // An instruction is 4 bytes long
5413   instruction_fetch_unit_size = 64;  // The processor fetches one line
5414   instruction_fetch_units = 1;       // of 64 bytes
5415 
5416   // List of nop instructions
5417   nops( MachNop );
5418 %}
5419 
5420 // We don't use an actual pipeline model so don't care about resources
5421 // or description. we do use pipeline classes to introduce fixed
5422 // latencies
5423 
5424 //----------RESOURCES----------------------------------------------------------
5425 // Resources are the functional units available to the machine
5426 
5427 resources( INS0, INS1, INS01 = INS0 | INS1,
5428            ALU0, ALU1, ALU = ALU0 | ALU1,
5429            MAC,
5430            DIV,
5431            BRANCH,
5432            LDST,
5433            NEON_FP);
5434 
5435 //----------PIPELINE DESCRIPTION-----------------------------------------------
5436 // Pipeline Description specifies the stages in the machine's pipeline
5437 
5438 // Define the pipeline as a generic 6 stage pipeline
5439 pipe_desc(S0, S1, S2, S3, S4, S5);
5440 
5441 //----------PIPELINE CLASSES---------------------------------------------------
5442 // Pipeline Classes describe the stages in which input and output are
5443 // referenced by the hardware pipeline.
5444 
5445 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5446 %{
5447   single_instruction;
5448   src1   : S1(read);
5449   src2   : S2(read);
5450   dst    : S5(write);
5451   INS01  : ISS;
5452   NEON_FP : S5;
5453 %}
5454 
5455 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5456 %{
5457   single_instruction;
5458   src1   : S1(read);
5459   src2   : S2(read);
5460   dst    : S5(write);
5461   INS01  : ISS;
5462   NEON_FP : S5;
5463 %}
5464 
5465 pipe_class fp_uop_s(vRegF dst, vRegF src)
5466 %{
5467   single_instruction;
5468   src    : S1(read);
5469   dst    : S5(write);
5470   INS01  : ISS;
5471   NEON_FP : S5;
5472 %}
5473 
5474 pipe_class fp_uop_d(vRegD dst, vRegD src)
5475 %{
5476   single_instruction;
5477   src    : S1(read);
5478   dst    : S5(write);
5479   INS01  : ISS;
5480   NEON_FP : S5;
5481 %}
5482 
5483 pipe_class fp_d2f(vRegF dst, vRegD src)
5484 %{
5485   single_instruction;
5486   src    : S1(read);
5487   dst    : S5(write);
5488   INS01  : ISS;
5489   NEON_FP : S5;
5490 %}
5491 
5492 pipe_class fp_f2d(vRegD dst, vRegF src)
5493 %{
5494   single_instruction;
5495   src    : S1(read);
5496   dst    : S5(write);
5497   INS01  : ISS;
5498   NEON_FP : S5;
5499 %}
5500 
5501 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5502 %{
5503   single_instruction;
5504   src    : S1(read);
5505   dst    : S5(write);
5506   INS01  : ISS;
5507   NEON_FP : S5;
5508 %}
5509 
5510 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5511 %{
5512   single_instruction;
5513   src    : S1(read);
5514   dst    : S5(write);
5515   INS01  : ISS;
5516   NEON_FP : S5;
5517 %}
5518 
5519 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5520 %{
5521   single_instruction;
5522   src    : S1(read);
5523   dst    : S5(write);
5524   INS01  : ISS;
5525   NEON_FP : S5;
5526 %}
5527 
5528 pipe_class fp_l2f(vRegF dst, iRegL src)
5529 %{
5530   single_instruction;
5531   src    : S1(read);
5532   dst    : S5(write);
5533   INS01  : ISS;
5534   NEON_FP : S5;
5535 %}
5536 
5537 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5538 %{
5539   single_instruction;
5540   src    : S1(read);
5541   dst    : S5(write);
5542   INS01  : ISS;
5543   NEON_FP : S5;
5544 %}
5545 
5546 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5547 %{
5548   single_instruction;
5549   src    : S1(read);
5550   dst    : S5(write);
5551   INS01  : ISS;
5552   NEON_FP : S5;
5553 %}
5554 
5555 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5556 %{
5557   single_instruction;
5558   src    : S1(read);
5559   dst    : S5(write);
5560   INS01  : ISS;
5561   NEON_FP : S5;
5562 %}
5563 
5564 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5565 %{
5566   single_instruction;
5567   src    : S1(read);
5568   dst    : S5(write);
5569   INS01  : ISS;
5570   NEON_FP : S5;
5571 %}
5572 
5573 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5574 %{
5575   single_instruction;
5576   src1   : S1(read);
5577   src2   : S2(read);
5578   dst    : S5(write);
5579   INS0   : ISS;
5580   NEON_FP : S5;
5581 %}
5582 
5583 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5584 %{
5585   single_instruction;
5586   src1   : S1(read);
5587   src2   : S2(read);
5588   dst    : S5(write);
5589   INS0   : ISS;
5590   NEON_FP : S5;
5591 %}
5592 
5593 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5594 %{
5595   single_instruction;
5596   cr     : S1(read);
5597   src1   : S1(read);
5598   src2   : S1(read);
5599   dst    : S3(write);
5600   INS01  : ISS;
5601   NEON_FP : S3;
5602 %}
5603 
5604 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5605 %{
5606   single_instruction;
5607   cr     : S1(read);
5608   src1   : S1(read);
5609   src2   : S1(read);
5610   dst    : S3(write);
5611   INS01  : ISS;
5612   NEON_FP : S3;
5613 %}
5614 
5615 pipe_class fp_imm_s(vRegF dst)
5616 %{
5617   single_instruction;
5618   dst    : S3(write);
5619   INS01  : ISS;
5620   NEON_FP : S3;
5621 %}
5622 
5623 pipe_class fp_imm_d(vRegD dst)
5624 %{
5625   single_instruction;
5626   dst    : S3(write);
5627   INS01  : ISS;
5628   NEON_FP : S3;
5629 %}
5630 
5631 pipe_class fp_load_constant_s(vRegF dst)
5632 %{
5633   single_instruction;
5634   dst    : S4(write);
5635   INS01  : ISS;
5636   NEON_FP : S4;
5637 %}
5638 
5639 pipe_class fp_load_constant_d(vRegD dst)
5640 %{
5641   single_instruction;
5642   dst    : S4(write);
5643   INS01  : ISS;
5644   NEON_FP : S4;
5645 %}
5646 
5647 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5648 %{
5649   single_instruction;
5650   dst    : S5(write);
5651   src1   : S1(read);
5652   src2   : S1(read);
5653   INS01  : ISS;
5654   NEON_FP : S5;
5655 %}
5656 
5657 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5658 %{
5659   single_instruction;
5660   dst    : S5(write);
5661   src1   : S1(read);
5662   src2   : S1(read);
5663   INS0   : ISS;
5664   NEON_FP : S5;
5665 %}
5666 
5667 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5668 %{
5669   single_instruction;
5670   dst    : S5(write);
5671   src1   : S1(read);
5672   src2   : S1(read);
5673   dst    : S1(read);
5674   INS01  : ISS;
5675   NEON_FP : S5;
5676 %}
5677 
5678 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5679 %{
5680   single_instruction;
5681   dst    : S5(write);
5682   src1   : S1(read);
5683   src2   : S1(read);
5684   dst    : S1(read);
5685   INS0   : ISS;
5686   NEON_FP : S5;
5687 %}
5688 
5689 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5690 %{
5691   single_instruction;
5692   dst    : S4(write);
5693   src1   : S2(read);
5694   src2   : S2(read);
5695   INS01  : ISS;
5696   NEON_FP : S4;
5697 %}
5698 
5699 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5700 %{
5701   single_instruction;
5702   dst    : S4(write);
5703   src1   : S2(read);
5704   src2   : S2(read);
5705   INS0   : ISS;
5706   NEON_FP : S4;
5707 %}
5708 
5709 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5710 %{
5711   single_instruction;
5712   dst    : S3(write);
5713   src1   : S2(read);
5714   src2   : S2(read);
5715   INS01  : ISS;
5716   NEON_FP : S3;
5717 %}
5718 
5719 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5720 %{
5721   single_instruction;
5722   dst    : S3(write);
5723   src1   : S2(read);
5724   src2   : S2(read);
5725   INS0   : ISS;
5726   NEON_FP : S3;
5727 %}
5728 
5729 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5730 %{
5731   single_instruction;
5732   dst    : S3(write);
5733   src    : S1(read);
5734   shift  : S1(read);
5735   INS01  : ISS;
5736   NEON_FP : S3;
5737 %}
5738 
5739 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5740 %{
5741   single_instruction;
5742   dst    : S3(write);
5743   src    : S1(read);
5744   shift  : S1(read);
5745   INS0   : ISS;
5746   NEON_FP : S3;
5747 %}
5748 
5749 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5750 %{
5751   single_instruction;
5752   dst    : S3(write);
5753   src    : S1(read);
5754   INS01  : ISS;
5755   NEON_FP : S3;
5756 %}
5757 
5758 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5759 %{
5760   single_instruction;
5761   dst    : S3(write);
5762   src    : S1(read);
5763   INS0   : ISS;
5764   NEON_FP : S3;
5765 %}
5766 
5767 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5768 %{
5769   single_instruction;
5770   dst    : S5(write);
5771   src1   : S1(read);
5772   src2   : S1(read);
5773   INS01  : ISS;
5774   NEON_FP : S5;
5775 %}
5776 
5777 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5778 %{
5779   single_instruction;
5780   dst    : S5(write);
5781   src1   : S1(read);
5782   src2   : S1(read);
5783   INS0   : ISS;
5784   NEON_FP : S5;
5785 %}
5786 
5787 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5788 %{
5789   single_instruction;
5790   dst    : S5(write);
5791   src1   : S1(read);
5792   src2   : S1(read);
5793   INS0   : ISS;
5794   NEON_FP : S5;
5795 %}
5796 
5797 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5798 %{
5799   single_instruction;
5800   dst    : S5(write);
5801   src1   : S1(read);
5802   src2   : S1(read);
5803   INS0   : ISS;
5804   NEON_FP : S5;
5805 %}
5806 
5807 pipe_class vsqrt_fp128(vecX dst, vecX src)
5808 %{
5809   single_instruction;
5810   dst    : S5(write);
5811   src    : S1(read);
5812   INS0   : ISS;
5813   NEON_FP : S5;
5814 %}
5815 
5816 pipe_class vunop_fp64(vecD dst, vecD src)
5817 %{
5818   single_instruction;
5819   dst    : S5(write);
5820   src    : S1(read);
5821   INS01  : ISS;
5822   NEON_FP : S5;
5823 %}
5824 
5825 pipe_class vunop_fp128(vecX dst, vecX src)
5826 %{
5827   single_instruction;
5828   dst    : S5(write);
5829   src    : S1(read);
5830   INS0   : ISS;
5831   NEON_FP : S5;
5832 %}
5833 
5834 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5835 %{
5836   single_instruction;
5837   dst    : S3(write);
5838   src    : S1(read);
5839   INS01  : ISS;
5840   NEON_FP : S3;
5841 %}
5842 
5843 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5844 %{
5845   single_instruction;
5846   dst    : S3(write);
5847   src    : S1(read);
5848   INS01  : ISS;
5849   NEON_FP : S3;
5850 %}
5851 
5852 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5853 %{
5854   single_instruction;
5855   dst    : S3(write);
5856   src    : S1(read);
5857   INS01  : ISS;
5858   NEON_FP : S3;
5859 %}
5860 
5861 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5862 %{
5863   single_instruction;
5864   dst    : S3(write);
5865   src    : S1(read);
5866   INS01  : ISS;
5867   NEON_FP : S3;
5868 %}
5869 
5870 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5871 %{
5872   single_instruction;
5873   dst    : S3(write);
5874   src    : S1(read);
5875   INS01  : ISS;
5876   NEON_FP : S3;
5877 %}
5878 
5879 pipe_class vmovi_reg_imm64(vecD dst)
5880 %{
5881   single_instruction;
5882   dst    : S3(write);
5883   INS01  : ISS;
5884   NEON_FP : S3;
5885 %}
5886 
5887 pipe_class vmovi_reg_imm128(vecX dst)
5888 %{
5889   single_instruction;
5890   dst    : S3(write);
5891   INS0   : ISS;
5892   NEON_FP : S3;
5893 %}
5894 
5895 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
5896 %{
5897   single_instruction;
5898   dst    : S5(write);
5899   mem    : ISS(read);
5900   INS01  : ISS;
5901   NEON_FP : S3;
5902 %}
5903 
5904 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
5905 %{
5906   single_instruction;
5907   dst    : S5(write);
5908   mem    : ISS(read);
5909   INS01  : ISS;
5910   NEON_FP : S3;
5911 %}
5912 
5913 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
5914 %{
5915   single_instruction;
5916   mem    : ISS(read);
5917   src    : S2(read);
5918   INS01  : ISS;
5919   NEON_FP : S3;
5920 %}
5921 
5922 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
5923 %{
5924   single_instruction;
5925   mem    : ISS(read);
5926   src    : S2(read);
5927   INS01  : ISS;
5928   NEON_FP : S3;
5929 %}
5930 
5931 //------- Integer ALU operations --------------------------
5932 
5933 // Integer ALU reg-reg operation
5934 // Operands needed in EX1, result generated in EX2
5935 // Eg.  ADD     x0, x1, x2
5936 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5937 %{
5938   single_instruction;
5939   dst    : EX2(write);
5940   src1   : EX1(read);
5941   src2   : EX1(read);
5942   INS01  : ISS; // Dual issue as instruction 0 or 1
5943   ALU    : EX2;
5944 %}
5945 
5946 // Integer ALU reg-reg operation with constant shift
5947 // Shifted register must be available in LATE_ISS instead of EX1
5948 // Eg.  ADD     x0, x1, x2, LSL #2
5949 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5950 %{
5951   single_instruction;
5952   dst    : EX2(write);
5953   src1   : EX1(read);
5954   src2   : ISS(read);
5955   INS01  : ISS;
5956   ALU    : EX2;
5957 %}
5958 
5959 // Integer ALU reg operation with constant shift
5960 // Eg.  LSL     x0, x1, #shift
5961 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5962 %{
5963   single_instruction;
5964   dst    : EX2(write);
5965   src1   : ISS(read);
5966   INS01  : ISS;
5967   ALU    : EX2;
5968 %}
5969 
5970 // Integer ALU reg-reg operation with variable shift
5971 // Both operands must be available in LATE_ISS instead of EX1
5972 // Result is available in EX1 instead of EX2
5973 // Eg.  LSLV    x0, x1, x2
5974 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5975 %{
5976   single_instruction;
5977   dst    : EX1(write);
5978   src1   : ISS(read);
5979   src2   : ISS(read);
5980   INS01  : ISS;
5981   ALU    : EX1;
5982 %}
5983 
5984 // Integer ALU reg-reg operation with extract
5985 // As for _vshift above, but result generated in EX2
5986 // Eg.  EXTR    x0, x1, x2, #N
5987 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5988 %{
5989   single_instruction;
5990   dst    : EX2(write);
5991   src1   : ISS(read);
5992   src2   : ISS(read);
5993   INS1   : ISS; // Can only dual issue as Instruction 1
5994   ALU    : EX1;
5995 %}
5996 
5997 // Integer ALU reg operation
5998 // Eg.  NEG     x0, x1
5999 pipe_class ialu_reg(iRegI dst, iRegI src)
6000 %{
6001   single_instruction;
6002   dst    : EX2(write);
6003   src    : EX1(read);
6004   INS01  : ISS;
6005   ALU    : EX2;
6006 %}
6007 
6008 // Integer ALU reg mmediate operation
6009 // Eg.  ADD     x0, x1, #N
6010 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6011 %{
6012   single_instruction;
6013   dst    : EX2(write);
6014   src1   : EX1(read);
6015   INS01  : ISS;
6016   ALU    : EX2;
6017 %}
6018 
6019 // Integer ALU immediate operation (no source operands)
6020 // Eg.  MOV     x0, #N
6021 pipe_class ialu_imm(iRegI dst)
6022 %{
6023   single_instruction;
6024   dst    : EX1(write);
6025   INS01  : ISS;
6026   ALU    : EX1;
6027 %}
6028 
6029 //------- Compare operation -------------------------------
6030 
6031 // Compare reg-reg
6032 // Eg.  CMP     x0, x1
6033 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6034 %{
6035   single_instruction;
6036 //  fixed_latency(16);
6037   cr     : EX2(write);
6038   op1    : EX1(read);
6039   op2    : EX1(read);
6040   INS01  : ISS;
6041   ALU    : EX2;
6042 %}
6043 
6044 // Compare reg-reg
6045 // Eg.  CMP     x0, #N
6046 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6047 %{
6048   single_instruction;
6049 //  fixed_latency(16);
6050   cr     : EX2(write);
6051   op1    : EX1(read);
6052   INS01  : ISS;
6053   ALU    : EX2;
6054 %}
6055 
6056 //------- Conditional instructions ------------------------
6057 
6058 // Conditional no operands
6059 // Eg.  CSINC   x0, zr, zr, <cond>
6060 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6061 %{
6062   single_instruction;
6063   cr     : EX1(read);
6064   dst    : EX2(write);
6065   INS01  : ISS;
6066   ALU    : EX2;
6067 %}
6068 
6069 // Conditional 2 operand
6070 // EG.  CSEL    X0, X1, X2, <cond>
6071 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6072 %{
6073   single_instruction;
6074   cr     : EX1(read);
6075   src1   : EX1(read);
6076   src2   : EX1(read);
6077   dst    : EX2(write);
6078   INS01  : ISS;
6079   ALU    : EX2;
6080 %}
6081 
6082 // Conditional 2 operand
6083 // EG.  CSEL    X0, X1, X2, <cond>
6084 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6085 %{
6086   single_instruction;
6087   cr     : EX1(read);
6088   src    : EX1(read);
6089   dst    : EX2(write);
6090   INS01  : ISS;
6091   ALU    : EX2;
6092 %}
6093 
6094 //------- Multiply pipeline operations --------------------
6095 
6096 // Multiply reg-reg
6097 // Eg.  MUL     w0, w1, w2
6098 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6099 %{
6100   single_instruction;
6101   dst    : WR(write);
6102   src1   : ISS(read);
6103   src2   : ISS(read);
6104   INS01  : ISS;
6105   MAC    : WR;
6106 %}
6107 
6108 // Multiply accumulate
6109 // Eg.  MADD    w0, w1, w2, w3
6110 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6111 %{
6112   single_instruction;
6113   dst    : WR(write);
6114   src1   : ISS(read);
6115   src2   : ISS(read);
6116   src3   : ISS(read);
6117   INS01  : ISS;
6118   MAC    : WR;
6119 %}
6120 
6121 // Eg.  MUL     w0, w1, w2
6122 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6123 %{
6124   single_instruction;
6125   fixed_latency(3); // Maximum latency for 64 bit mul
6126   dst    : WR(write);
6127   src1   : ISS(read);
6128   src2   : ISS(read);
6129   INS01  : ISS;
6130   MAC    : WR;
6131 %}
6132 
6133 // Multiply accumulate
6134 // Eg.  MADD    w0, w1, w2, w3
6135 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6136 %{
6137   single_instruction;
6138   fixed_latency(3); // Maximum latency for 64 bit mul
6139   dst    : WR(write);
6140   src1   : ISS(read);
6141   src2   : ISS(read);
6142   src3   : ISS(read);
6143   INS01  : ISS;
6144   MAC    : WR;
6145 %}
6146 
6147 //------- Divide pipeline operations --------------------
6148 
6149 // Eg.  SDIV    w0, w1, w2
6150 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6151 %{
6152   single_instruction;
6153   fixed_latency(8); // Maximum latency for 32 bit divide
6154   dst    : WR(write);
6155   src1   : ISS(read);
6156   src2   : ISS(read);
6157   INS0   : ISS; // Can only dual issue as instruction 0
6158   DIV    : WR;
6159 %}
6160 
6161 // Eg.  SDIV    x0, x1, x2
6162 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6163 %{
6164   single_instruction;
6165   fixed_latency(16); // Maximum latency for 64 bit divide
6166   dst    : WR(write);
6167   src1   : ISS(read);
6168   src2   : ISS(read);
6169   INS0   : ISS; // Can only dual issue as instruction 0
6170   DIV    : WR;
6171 %}
6172 
6173 //------- Load pipeline operations ------------------------
6174 
6175 // Load - prefetch
6176 // Eg.  PFRM    <mem>
6177 pipe_class iload_prefetch(memory mem)
6178 %{
6179   single_instruction;
6180   mem    : ISS(read);
6181   INS01  : ISS;
6182   LDST   : WR;
6183 %}
6184 
6185 // Load - reg, mem
6186 // Eg.  LDR     x0, <mem>
6187 pipe_class iload_reg_mem(iRegI dst, memory mem)
6188 %{
6189   single_instruction;
6190   dst    : WR(write);
6191   mem    : ISS(read);
6192   INS01  : ISS;
6193   LDST   : WR;
6194 %}
6195 
6196 // Load - reg, reg
6197 // Eg.  LDR     x0, [sp, x1]
6198 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6199 %{
6200   single_instruction;
6201   dst    : WR(write);
6202   src    : ISS(read);
6203   INS01  : ISS;
6204   LDST   : WR;
6205 %}
6206 
6207 //------- Store pipeline operations -----------------------
6208 
6209 // Store - zr, mem
6210 // Eg.  STR     zr, <mem>
6211 pipe_class istore_mem(memory mem)
6212 %{
6213   single_instruction;
6214   mem    : ISS(read);
6215   INS01  : ISS;
6216   LDST   : WR;
6217 %}
6218 
6219 // Store - reg, mem
6220 // Eg.  STR     x0, <mem>
6221 pipe_class istore_reg_mem(iRegI src, memory mem)
6222 %{
6223   single_instruction;
6224   mem    : ISS(read);
6225   src    : EX2(read);
6226   INS01  : ISS;
6227   LDST   : WR;
6228 %}
6229 
6230 // Store - reg, reg
6231 // Eg. STR      x0, [sp, x1]
6232 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6233 %{
6234   single_instruction;
6235   dst    : ISS(read);
6236   src    : EX2(read);
6237   INS01  : ISS;
6238   LDST   : WR;
6239 %}
6240 
6241 //------- Store pipeline operations -----------------------
6242 
6243 // Branch
6244 pipe_class pipe_branch()
6245 %{
6246   single_instruction;
6247   INS01  : ISS;
6248   BRANCH : EX1;
6249 %}
6250 
6251 // Conditional branch
6252 pipe_class pipe_branch_cond(rFlagsReg cr)
6253 %{
6254   single_instruction;
6255   cr     : EX1(read);
6256   INS01  : ISS;
6257   BRANCH : EX1;
6258 %}
6259 
6260 // Compare & Branch
6261 // EG.  CBZ/CBNZ
6262 pipe_class pipe_cmp_branch(iRegI op1)
6263 %{
6264   single_instruction;
6265   op1    : EX1(read);
6266   INS01  : ISS;
6267   BRANCH : EX1;
6268 %}
6269 
6270 //------- Synchronisation operations ----------------------
6271 
6272 // Any operation requiring serialization.
6273 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6274 pipe_class pipe_serial()
6275 %{
6276   single_instruction;
6277   force_serialization;
6278   fixed_latency(16);
6279   INS01  : ISS(2); // Cannot dual issue with any other instruction
6280   LDST   : WR;
6281 %}
6282 
6283 // Generic big/slow expanded idiom - also serialized
6284 pipe_class pipe_slow()
6285 %{
6286   instruction_count(10);
6287   multiple_bundles;
6288   force_serialization;
6289   fixed_latency(16);
6290   INS01  : ISS(2); // Cannot dual issue with any other instruction
6291   LDST   : WR;
6292 %}
6293 
6294 // Empty pipeline class
6295 pipe_class pipe_class_empty()
6296 %{
6297   single_instruction;
6298   fixed_latency(0);
6299 %}
6300 
6301 // Default pipeline class.
6302 pipe_class pipe_class_default()
6303 %{
6304   single_instruction;
6305   fixed_latency(2);
6306 %}
6307 
6308 // Pipeline class for compares.
6309 pipe_class pipe_class_compare()
6310 %{
6311   single_instruction;
6312   fixed_latency(16);
6313 %}
6314 
6315 // Pipeline class for memory operations.
6316 pipe_class pipe_class_memory()
6317 %{
6318   single_instruction;
6319   fixed_latency(16);
6320 %}
6321 
6322 // Pipeline class for call.
6323 pipe_class pipe_class_call()
6324 %{
6325   single_instruction;
6326   fixed_latency(100);
6327 %}
6328 
6329 // Define the class for the Nop node.
6330 define %{
6331    MachNop = pipe_class_empty;
6332 %}
6333 
6334 %}
6335 //----------INSTRUCTIONS-------------------------------------------------------
6336 //
6337 // match      -- States which machine-independent subtree may be replaced
6338 //               by this instruction.
6339 // ins_cost   -- The estimated cost of this instruction is used by instruction
6340 //               selection to identify a minimum cost tree of machine
6341 //               instructions that matches a tree of machine-independent
6342 //               instructions.
6343 // format     -- A string providing the disassembly for this instruction.
6344 //               The value of an instruction's operand may be inserted
6345 //               by referring to it with a '$' prefix.
6346 // opcode     -- Three instruction opcodes may be provided.  These are referred
6347 //               to within an encode class as $primary, $secondary, and $tertiary
6348 //               rrspectively.  The primary opcode is commonly used to
6349 //               indicate the type of machine instruction, while secondary
6350 //               and tertiary are often used for prefix options or addressing
6351 //               modes.
6352 // ins_encode -- A list of encode classes with parameters. The encode class
6353 //               name must have been defined in an 'enc_class' specification
6354 //               in the encode section of the architecture description.
6355 
6356 // ============================================================================
6357 // Memory (Load/Store) Instructions
6358 
6359 // Load Instructions
6360 
6361 // Load Byte (8 bit signed)
6362 instruct loadB(iRegINoSp dst, memory mem)
6363 %{
6364   match(Set dst (LoadB mem));
6365   predicate(!needs_acquiring_load(n));
6366 
6367   ins_cost(4 * INSN_COST);
6368   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6369 
6370   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6371 
6372   ins_pipe(iload_reg_mem);
6373 %}
6374 
6375 // Load Byte (8 bit signed) into long
6376 instruct loadB2L(iRegLNoSp dst, memory mem)
6377 %{
6378   match(Set dst (ConvI2L (LoadB mem)));
6379   predicate(!needs_acquiring_load(n->in(1)));
6380 
6381   ins_cost(4 * INSN_COST);
6382   format %{ "ldrsb  $dst, $mem\t# byte" %}
6383 
6384   ins_encode(aarch64_enc_ldrsb(dst, mem));
6385 
6386   ins_pipe(iload_reg_mem);
6387 %}
6388 
6389 // Load Byte (8 bit unsigned)
6390 instruct loadUB(iRegINoSp dst, memory mem)
6391 %{
6392   match(Set dst (LoadUB mem));
6393   predicate(!needs_acquiring_load(n));
6394 
6395   ins_cost(4 * INSN_COST);
6396   format %{ "ldrbw  $dst, $mem\t# byte" %}
6397 
6398   ins_encode(aarch64_enc_ldrb(dst, mem));
6399 
6400   ins_pipe(iload_reg_mem);
6401 %}
6402 
6403 // Load Byte (8 bit unsigned) into long
6404 instruct loadUB2L(iRegLNoSp dst, memory mem)
6405 %{
6406   match(Set dst (ConvI2L (LoadUB mem)));
6407   predicate(!needs_acquiring_load(n->in(1)));
6408 
6409   ins_cost(4 * INSN_COST);
6410   format %{ "ldrb  $dst, $mem\t# byte" %}
6411 
6412   ins_encode(aarch64_enc_ldrb(dst, mem));
6413 
6414   ins_pipe(iload_reg_mem);
6415 %}
6416 
6417 // Load Short (16 bit signed)
6418 instruct loadS(iRegINoSp dst, memory mem)
6419 %{
6420   match(Set dst (LoadS mem));
6421   predicate(!needs_acquiring_load(n));
6422 
6423   ins_cost(4 * INSN_COST);
6424   format %{ "ldrshw  $dst, $mem\t# short" %}
6425 
6426   ins_encode(aarch64_enc_ldrshw(dst, mem));
6427 
6428   ins_pipe(iload_reg_mem);
6429 %}
6430 
6431 // Load Short (16 bit signed) into long
6432 instruct loadS2L(iRegLNoSp dst, memory mem)
6433 %{
6434   match(Set dst (ConvI2L (LoadS mem)));
6435   predicate(!needs_acquiring_load(n->in(1)));
6436 
6437   ins_cost(4 * INSN_COST);
6438   format %{ "ldrsh  $dst, $mem\t# short" %}
6439 
6440   ins_encode(aarch64_enc_ldrsh(dst, mem));
6441 
6442   ins_pipe(iload_reg_mem);
6443 %}
6444 
6445 // Load Char (16 bit unsigned)
6446 instruct loadUS(iRegINoSp dst, memory mem)
6447 %{
6448   match(Set dst (LoadUS mem));
6449   predicate(!needs_acquiring_load(n));
6450 
6451   ins_cost(4 * INSN_COST);
6452   format %{ "ldrh  $dst, $mem\t# short" %}
6453 
6454   ins_encode(aarch64_enc_ldrh(dst, mem));
6455 
6456   ins_pipe(iload_reg_mem);
6457 %}
6458 
6459 // Load Short/Char (16 bit unsigned) into long
6460 instruct loadUS2L(iRegLNoSp dst, memory mem)
6461 %{
6462   match(Set dst (ConvI2L (LoadUS mem)));
6463   predicate(!needs_acquiring_load(n->in(1)));
6464 
6465   ins_cost(4 * INSN_COST);
6466   format %{ "ldrh  $dst, $mem\t# short" %}
6467 
6468   ins_encode(aarch64_enc_ldrh(dst, mem));
6469 
6470   ins_pipe(iload_reg_mem);
6471 %}
6472 
6473 // Load Integer (32 bit signed)
6474 instruct loadI(iRegINoSp dst, memory mem)
6475 %{
6476   match(Set dst (LoadI mem));
6477   predicate(!needs_acquiring_load(n));
6478 
6479   ins_cost(4 * INSN_COST);
6480   format %{ "ldrw  $dst, $mem\t# int" %}
6481 
6482   ins_encode(aarch64_enc_ldrw(dst, mem));
6483 
6484   ins_pipe(iload_reg_mem);
6485 %}
6486 
6487 // Load Integer (32 bit signed) into long
6488 instruct loadI2L(iRegLNoSp dst, memory mem)
6489 %{
6490   match(Set dst (ConvI2L (LoadI mem)));
6491   predicate(!needs_acquiring_load(n->in(1)));
6492 
6493   ins_cost(4 * INSN_COST);
6494   format %{ "ldrsw  $dst, $mem\t# int" %}
6495 
6496   ins_encode(aarch64_enc_ldrsw(dst, mem));
6497 
6498   ins_pipe(iload_reg_mem);
6499 %}
6500 
6501 // Load Integer (32 bit unsigned) into long
6502 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6503 %{
6504   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6505   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6506 
6507   ins_cost(4 * INSN_COST);
6508   format %{ "ldrw  $dst, $mem\t# int" %}
6509 
6510   ins_encode(aarch64_enc_ldrw(dst, mem));
6511 
6512   ins_pipe(iload_reg_mem);
6513 %}
6514 
6515 // Load Long (64 bit signed)
6516 instruct loadL(iRegLNoSp dst, memory mem)
6517 %{
6518   match(Set dst (LoadL mem));
6519   predicate(!needs_acquiring_load(n));
6520 
6521   ins_cost(4 * INSN_COST);
6522   format %{ "ldr  $dst, $mem\t# int" %}
6523 
6524   ins_encode(aarch64_enc_ldr(dst, mem));
6525 
6526   ins_pipe(iload_reg_mem);
6527 %}
6528 
6529 // Load Range
6530 instruct loadRange(iRegINoSp dst, memory mem)
6531 %{
6532   match(Set dst (LoadRange mem));
6533 
6534   ins_cost(4 * INSN_COST);
6535   format %{ "ldrw  $dst, $mem\t# range" %}
6536 
6537   ins_encode(aarch64_enc_ldrw(dst, mem));
6538 
6539   ins_pipe(iload_reg_mem);
6540 %}
6541 
6542 // Load Pointer
6543 instruct loadP(iRegPNoSp dst, memory mem)
6544 %{
6545   match(Set dst (LoadP mem));
6546   predicate(!needs_acquiring_load(n));
6547 
6548   ins_cost(4 * INSN_COST);
6549   format %{ "ldr  $dst, $mem\t# ptr" %}
6550 
6551   ins_encode(aarch64_enc_ldr(dst, mem));
6552 
6553   ins_pipe(iload_reg_mem);
6554 %}
6555 
6556 // Load Compressed Pointer
6557 instruct loadN(iRegNNoSp dst, memory mem)
6558 %{
6559   match(Set dst (LoadN mem));
6560   predicate(!needs_acquiring_load(n));
6561 
6562   ins_cost(4 * INSN_COST);
6563   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6564 
6565   ins_encode(aarch64_enc_ldrw(dst, mem));
6566 
6567   ins_pipe(iload_reg_mem);
6568 %}
6569 
6570 // Load Klass Pointer
6571 instruct loadKlass(iRegPNoSp dst, memory mem)
6572 %{
6573   match(Set dst (LoadKlass mem));
6574   predicate(!needs_acquiring_load(n));
6575 
6576   ins_cost(4 * INSN_COST);
6577   format %{ "ldr  $dst, $mem\t# class" %}
6578 
6579   ins_encode(aarch64_enc_ldr(dst, mem));
6580 
6581   ins_pipe(iload_reg_mem);
6582 %}
6583 
6584 // Load Narrow Klass Pointer
6585 instruct loadNKlass(iRegNNoSp dst, memory mem)
6586 %{
6587   match(Set dst (LoadNKlass mem));
6588   predicate(!needs_acquiring_load(n));
6589 
6590   ins_cost(4 * INSN_COST);
6591   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6592 
6593   ins_encode(aarch64_enc_ldrw(dst, mem));
6594 
6595   ins_pipe(iload_reg_mem);
6596 %}
6597 
6598 // Load Float
6599 instruct loadF(vRegF dst, memory mem)
6600 %{
6601   match(Set dst (LoadF mem));
6602   predicate(!needs_acquiring_load(n));
6603 
6604   ins_cost(4 * INSN_COST);
6605   format %{ "ldrs  $dst, $mem\t# float" %}
6606 
6607   ins_encode( aarch64_enc_ldrs(dst, mem) );
6608 
6609   ins_pipe(pipe_class_memory);
6610 %}
6611 
6612 // Load Double
6613 instruct loadD(vRegD dst, memory mem)
6614 %{
6615   match(Set dst (LoadD mem));
6616   predicate(!needs_acquiring_load(n));
6617 
6618   ins_cost(4 * INSN_COST);
6619   format %{ "ldrd  $dst, $mem\t# double" %}
6620 
6621   ins_encode( aarch64_enc_ldrd(dst, mem) );
6622 
6623   ins_pipe(pipe_class_memory);
6624 %}
6625 
6626 
6627 // Load Int Constant
6628 instruct loadConI(iRegINoSp dst, immI src)
6629 %{
6630   match(Set dst src);
6631 
6632   ins_cost(INSN_COST);
6633   format %{ "mov $dst, $src\t# int" %}
6634 
6635   ins_encode( aarch64_enc_movw_imm(dst, src) );
6636 
6637   ins_pipe(ialu_imm);
6638 %}
6639 
6640 // Load Long Constant
6641 instruct loadConL(iRegLNoSp dst, immL src)
6642 %{
6643   match(Set dst src);
6644 
6645   ins_cost(INSN_COST);
6646   format %{ "mov $dst, $src\t# long" %}
6647 
6648   ins_encode( aarch64_enc_mov_imm(dst, src) );
6649 
6650   ins_pipe(ialu_imm);
6651 %}
6652 
6653 // Load Pointer Constant
6654 
6655 instruct loadConP(iRegPNoSp dst, immP con)
6656 %{
6657   match(Set dst con);
6658 
6659   ins_cost(INSN_COST * 4);
6660   format %{
6661     "mov  $dst, $con\t# ptr\n\t"
6662   %}
6663 
6664   ins_encode(aarch64_enc_mov_p(dst, con));
6665 
6666   ins_pipe(ialu_imm);
6667 %}
6668 
6669 // Load Null Pointer Constant
6670 
6671 instruct loadConP0(iRegPNoSp dst, immP0 con)
6672 %{
6673   match(Set dst con);
6674 
6675   ins_cost(INSN_COST);
6676   format %{ "mov  $dst, $con\t# NULL ptr" %}
6677 
6678   ins_encode(aarch64_enc_mov_p0(dst, con));
6679 
6680   ins_pipe(ialu_imm);
6681 %}
6682 
6683 // Load Pointer Constant One
6684 
6685 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6686 %{
6687   match(Set dst con);
6688 
6689   ins_cost(INSN_COST);
6690   format %{ "mov  $dst, $con\t# NULL ptr" %}
6691 
6692   ins_encode(aarch64_enc_mov_p1(dst, con));
6693 
6694   ins_pipe(ialu_imm);
6695 %}
6696 
6697 // Load Poll Page Constant
6698 
6699 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6700 %{
6701   match(Set dst con);
6702 
6703   ins_cost(INSN_COST);
6704   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6705 
6706   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6707 
6708   ins_pipe(ialu_imm);
6709 %}
6710 
6711 // Load Byte Map Base Constant
6712 
6713 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6714 %{
6715   match(Set dst con);
6716 
6717   ins_cost(INSN_COST);
6718   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6719 
6720   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6721 
6722   ins_pipe(ialu_imm);
6723 %}
6724 
6725 // Load Narrow Pointer Constant
6726 
6727 instruct loadConN(iRegNNoSp dst, immN con)
6728 %{
6729   match(Set dst con);
6730 
6731   ins_cost(INSN_COST * 4);
6732   format %{ "mov  $dst, $con\t# compressed ptr" %}
6733 
6734   ins_encode(aarch64_enc_mov_n(dst, con));
6735 
6736   ins_pipe(ialu_imm);
6737 %}
6738 
6739 // Load Narrow Null Pointer Constant
6740 
6741 instruct loadConN0(iRegNNoSp dst, immN0 con)
6742 %{
6743   match(Set dst con);
6744 
6745   ins_cost(INSN_COST);
6746   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6747 
6748   ins_encode(aarch64_enc_mov_n0(dst, con));
6749 
6750   ins_pipe(ialu_imm);
6751 %}
6752 
6753 // Load Narrow Klass Constant
6754 
6755 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6756 %{
6757   match(Set dst con);
6758 
6759   ins_cost(INSN_COST);
6760   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6761 
6762   ins_encode(aarch64_enc_mov_nk(dst, con));
6763 
6764   ins_pipe(ialu_imm);
6765 %}
6766 
6767 // Load Packed Float Constant
6768 
6769 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6770   match(Set dst con);
6771   ins_cost(INSN_COST * 4);
6772   format %{ "fmovs  $dst, $con"%}
6773   ins_encode %{
6774     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6775   %}
6776 
6777   ins_pipe(fp_imm_s);
6778 %}
6779 
6780 // Load Float Constant
6781 
6782 instruct loadConF(vRegF dst, immF con) %{
6783   match(Set dst con);
6784 
6785   ins_cost(INSN_COST * 4);
6786 
6787   format %{
6788     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6789   %}
6790 
6791   ins_encode %{
6792     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6793   %}
6794 
6795   ins_pipe(fp_load_constant_s);
6796 %}
6797 
6798 // Load Packed Double Constant
6799 
6800 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6801   match(Set dst con);
6802   ins_cost(INSN_COST);
6803   format %{ "fmovd  $dst, $con"%}
6804   ins_encode %{
6805     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6806   %}
6807 
6808   ins_pipe(fp_imm_d);
6809 %}
6810 
6811 // Load Double Constant
6812 
6813 instruct loadConD(vRegD dst, immD con) %{
6814   match(Set dst con);
6815 
6816   ins_cost(INSN_COST * 5);
6817   format %{
6818     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6819   %}
6820 
6821   ins_encode %{
6822     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6823   %}
6824 
6825   ins_pipe(fp_load_constant_d);
6826 %}
6827 
6828 // Store Instructions
6829 
6830 // Store CMS card-mark Immediate
6831 instruct storeimmCM0(immI0 zero, memory mem)
6832 %{
6833   match(Set mem (StoreCM mem zero));
6834   predicate(unnecessary_storestore(n));
6835 
6836   ins_cost(INSN_COST);
6837   format %{ "storestore (elided)\n\t"
6838             "strb zr, $mem\t# byte" %}
6839 
6840   ins_encode(aarch64_enc_strb0(mem));
6841 
6842   ins_pipe(istore_mem);
6843 %}
6844 
6845 // Store CMS card-mark Immediate with intervening StoreStore
6846 // needed when using CMS with no conditional card marking
6847 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6848 %{
6849   match(Set mem (StoreCM mem zero));
6850 
6851   ins_cost(INSN_COST * 2);
6852   format %{ "storestore\n\t"
6853             "dmb ishst"
6854             "\n\tstrb zr, $mem\t# byte" %}
6855 
6856   ins_encode(aarch64_enc_strb0_ordered(mem));
6857 
6858   ins_pipe(istore_mem);
6859 %}
6860 
6861 // Store Byte
6862 instruct storeB(iRegIorL2I src, memory mem)
6863 %{
6864   match(Set mem (StoreB mem src));
6865   predicate(!needs_releasing_store(n));
6866 
6867   ins_cost(INSN_COST);
6868   format %{ "strb  $src, $mem\t# byte" %}
6869 
6870   ins_encode(aarch64_enc_strb(src, mem));
6871 
6872   ins_pipe(istore_reg_mem);
6873 %}
6874 
6875 
6876 instruct storeimmB0(immI0 zero, memory mem)
6877 %{
6878   match(Set mem (StoreB mem zero));
6879   predicate(!needs_releasing_store(n));
6880 
6881   ins_cost(INSN_COST);
6882   format %{ "strb rscractch2, $mem\t# byte" %}
6883 
6884   ins_encode(aarch64_enc_strb0(mem));
6885 
6886   ins_pipe(istore_mem);
6887 %}
6888 
6889 // Store Char/Short
6890 instruct storeC(iRegIorL2I src, memory mem)
6891 %{
6892   match(Set mem (StoreC mem src));
6893   predicate(!needs_releasing_store(n));
6894 
6895   ins_cost(INSN_COST);
6896   format %{ "strh  $src, $mem\t# short" %}
6897 
6898   ins_encode(aarch64_enc_strh(src, mem));
6899 
6900   ins_pipe(istore_reg_mem);
6901 %}
6902 
6903 instruct storeimmC0(immI0 zero, memory mem)
6904 %{
6905   match(Set mem (StoreC mem zero));
6906   predicate(!needs_releasing_store(n));
6907 
6908   ins_cost(INSN_COST);
6909   format %{ "strh  zr, $mem\t# short" %}
6910 
6911   ins_encode(aarch64_enc_strh0(mem));
6912 
6913   ins_pipe(istore_mem);
6914 %}
6915 
6916 // Store Integer
6917 
6918 instruct storeI(iRegIorL2I src, memory mem)
6919 %{
6920   match(Set mem(StoreI mem src));
6921   predicate(!needs_releasing_store(n));
6922 
6923   ins_cost(INSN_COST);
6924   format %{ "strw  $src, $mem\t# int" %}
6925 
6926   ins_encode(aarch64_enc_strw(src, mem));
6927 
6928   ins_pipe(istore_reg_mem);
6929 %}
6930 
6931 instruct storeimmI0(immI0 zero, memory mem)
6932 %{
6933   match(Set mem(StoreI mem zero));
6934   predicate(!needs_releasing_store(n));
6935 
6936   ins_cost(INSN_COST);
6937   format %{ "strw  zr, $mem\t# int" %}
6938 
6939   ins_encode(aarch64_enc_strw0(mem));
6940 
6941   ins_pipe(istore_mem);
6942 %}
6943 
6944 // Store Long (64 bit signed)
6945 instruct storeL(iRegL src, memory mem)
6946 %{
6947   match(Set mem (StoreL mem src));
6948   predicate(!needs_releasing_store(n));
6949 
6950   ins_cost(INSN_COST);
6951   format %{ "str  $src, $mem\t# int" %}
6952 
6953   ins_encode(aarch64_enc_str(src, mem));
6954 
6955   ins_pipe(istore_reg_mem);
6956 %}
6957 
6958 // Store Long (64 bit signed)
6959 instruct storeimmL0(immL0 zero, memory mem)
6960 %{
6961   match(Set mem (StoreL mem zero));
6962   predicate(!needs_releasing_store(n));
6963 
6964   ins_cost(INSN_COST);
6965   format %{ "str  zr, $mem\t# int" %}
6966 
6967   ins_encode(aarch64_enc_str0(mem));
6968 
6969   ins_pipe(istore_mem);
6970 %}
6971 
6972 // Store Pointer
6973 instruct storeP(iRegP src, memory mem)
6974 %{
6975   match(Set mem (StoreP mem src));
6976   predicate(!needs_releasing_store(n));
6977 
6978   ins_cost(INSN_COST);
6979   format %{ "str  $src, $mem\t# ptr" %}
6980 
6981   ins_encode(aarch64_enc_str(src, mem));
6982 
6983   ins_pipe(istore_reg_mem);
6984 %}
6985 
6986 // Store Pointer
6987 instruct storeimmP0(immP0 zero, memory mem)
6988 %{
6989   match(Set mem (StoreP mem zero));
6990   predicate(!needs_releasing_store(n));
6991 
6992   ins_cost(INSN_COST);
6993   format %{ "str zr, $mem\t# ptr" %}
6994 
6995   ins_encode(aarch64_enc_str0(mem));
6996 
6997   ins_pipe(istore_mem);
6998 %}
6999 
7000 // Store Compressed Pointer
7001 instruct storeN(iRegN src, memory mem)
7002 %{
7003   match(Set mem (StoreN mem src));
7004   predicate(!needs_releasing_store(n));
7005 
7006   ins_cost(INSN_COST);
7007   format %{ "strw  $src, $mem\t# compressed ptr" %}
7008 
7009   ins_encode(aarch64_enc_strw(src, mem));
7010 
7011   ins_pipe(istore_reg_mem);
7012 %}
7013 
7014 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7015 %{
7016   match(Set mem (StoreN mem zero));
7017   predicate(Universe::narrow_oop_base() == NULL &&
7018             Universe::narrow_klass_base() == NULL &&
7019             (!needs_releasing_store(n)));
7020 
7021   ins_cost(INSN_COST);
7022   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7023 
7024   ins_encode(aarch64_enc_strw(heapbase, mem));
7025 
7026   ins_pipe(istore_reg_mem);
7027 %}
7028 
7029 // Store Float
7030 instruct storeF(vRegF src, memory mem)
7031 %{
7032   match(Set mem (StoreF mem src));
7033   predicate(!needs_releasing_store(n));
7034 
7035   ins_cost(INSN_COST);
7036   format %{ "strs  $src, $mem\t# float" %}
7037 
7038   ins_encode( aarch64_enc_strs(src, mem) );
7039 
7040   ins_pipe(pipe_class_memory);
7041 %}
7042 
7043 // TODO
7044 // implement storeImmF0 and storeFImmPacked
7045 
7046 // Store Double
7047 instruct storeD(vRegD src, memory mem)
7048 %{
7049   match(Set mem (StoreD mem src));
7050   predicate(!needs_releasing_store(n));
7051 
7052   ins_cost(INSN_COST);
7053   format %{ "strd  $src, $mem\t# double" %}
7054 
7055   ins_encode( aarch64_enc_strd(src, mem) );
7056 
7057   ins_pipe(pipe_class_memory);
7058 %}
7059 
7060 // Store Compressed Klass Pointer
7061 instruct storeNKlass(iRegN src, memory mem)
7062 %{
7063   predicate(!needs_releasing_store(n));
7064   match(Set mem (StoreNKlass mem src));
7065 
7066   ins_cost(INSN_COST);
7067   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7068 
7069   ins_encode(aarch64_enc_strw(src, mem));
7070 
7071   ins_pipe(istore_reg_mem);
7072 %}
7073 
7074 // TODO
7075 // implement storeImmD0 and storeDImmPacked
7076 
7077 // prefetch instructions
7078 // Must be safe to execute with invalid address (cannot fault).
7079 
7080 instruct prefetchalloc( memory mem ) %{
7081   match(PrefetchAllocation mem);
7082 
7083   ins_cost(INSN_COST);
7084   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7085 
7086   ins_encode( aarch64_enc_prefetchw(mem) );
7087 
7088   ins_pipe(iload_prefetch);
7089 %}
7090 
7091 //  ---------------- volatile loads and stores ----------------
7092 
7093 // Load Byte (8 bit signed)
7094 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7095 %{
7096   match(Set dst (LoadB mem));
7097 
7098   ins_cost(VOLATILE_REF_COST);
7099   format %{ "ldarsb  $dst, $mem\t# byte" %}
7100 
7101   ins_encode(aarch64_enc_ldarsb(dst, mem));
7102 
7103   ins_pipe(pipe_serial);
7104 %}
7105 
7106 // Load Byte (8 bit signed) into long
7107 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7108 %{
7109   match(Set dst (ConvI2L (LoadB mem)));
7110 
7111   ins_cost(VOLATILE_REF_COST);
7112   format %{ "ldarsb  $dst, $mem\t# byte" %}
7113 
7114   ins_encode(aarch64_enc_ldarsb(dst, mem));
7115 
7116   ins_pipe(pipe_serial);
7117 %}
7118 
7119 // Load Byte (8 bit unsigned)
7120 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7121 %{
7122   match(Set dst (LoadUB mem));
7123 
7124   ins_cost(VOLATILE_REF_COST);
7125   format %{ "ldarb  $dst, $mem\t# byte" %}
7126 
7127   ins_encode(aarch64_enc_ldarb(dst, mem));
7128 
7129   ins_pipe(pipe_serial);
7130 %}
7131 
7132 // Load Byte (8 bit unsigned) into long
7133 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7134 %{
7135   match(Set dst (ConvI2L (LoadUB mem)));
7136 
7137   ins_cost(VOLATILE_REF_COST);
7138   format %{ "ldarb  $dst, $mem\t# byte" %}
7139 
7140   ins_encode(aarch64_enc_ldarb(dst, mem));
7141 
7142   ins_pipe(pipe_serial);
7143 %}
7144 
7145 // Load Short (16 bit signed)
7146 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7147 %{
7148   match(Set dst (LoadS mem));
7149 
7150   ins_cost(VOLATILE_REF_COST);
7151   format %{ "ldarshw  $dst, $mem\t# short" %}
7152 
7153   ins_encode(aarch64_enc_ldarshw(dst, mem));
7154 
7155   ins_pipe(pipe_serial);
7156 %}
7157 
7158 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7159 %{
7160   match(Set dst (LoadUS mem));
7161 
7162   ins_cost(VOLATILE_REF_COST);
7163   format %{ "ldarhw  $dst, $mem\t# short" %}
7164 
7165   ins_encode(aarch64_enc_ldarhw(dst, mem));
7166 
7167   ins_pipe(pipe_serial);
7168 %}
7169 
7170 // Load Short/Char (16 bit unsigned) into long
7171 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7172 %{
7173   match(Set dst (ConvI2L (LoadUS mem)));
7174 
7175   ins_cost(VOLATILE_REF_COST);
7176   format %{ "ldarh  $dst, $mem\t# short" %}
7177 
7178   ins_encode(aarch64_enc_ldarh(dst, mem));
7179 
7180   ins_pipe(pipe_serial);
7181 %}
7182 
7183 // Load Short/Char (16 bit signed) into long
7184 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7185 %{
7186   match(Set dst (ConvI2L (LoadS mem)));
7187 
7188   ins_cost(VOLATILE_REF_COST);
7189   format %{ "ldarh  $dst, $mem\t# short" %}
7190 
7191   ins_encode(aarch64_enc_ldarsh(dst, mem));
7192 
7193   ins_pipe(pipe_serial);
7194 %}
7195 
7196 // Load Integer (32 bit signed)
7197 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7198 %{
7199   match(Set dst (LoadI mem));
7200 
7201   ins_cost(VOLATILE_REF_COST);
7202   format %{ "ldarw  $dst, $mem\t# int" %}
7203 
7204   ins_encode(aarch64_enc_ldarw(dst, mem));
7205 
7206   ins_pipe(pipe_serial);
7207 %}
7208 
7209 // Load Integer (32 bit unsigned) into long
7210 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7211 %{
7212   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7213 
7214   ins_cost(VOLATILE_REF_COST);
7215   format %{ "ldarw  $dst, $mem\t# int" %}
7216 
7217   ins_encode(aarch64_enc_ldarw(dst, mem));
7218 
7219   ins_pipe(pipe_serial);
7220 %}
7221 
7222 // Load Long (64 bit signed)
7223 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7224 %{
7225   match(Set dst (LoadL mem));
7226 
7227   ins_cost(VOLATILE_REF_COST);
7228   format %{ "ldar  $dst, $mem\t# int" %}
7229 
7230   ins_encode(aarch64_enc_ldar(dst, mem));
7231 
7232   ins_pipe(pipe_serial);
7233 %}
7234 
7235 // Load Pointer
7236 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7237 %{
7238   match(Set dst (LoadP mem));
7239 
7240   ins_cost(VOLATILE_REF_COST);
7241   format %{ "ldar  $dst, $mem\t# ptr" %}
7242 
7243   ins_encode(aarch64_enc_ldar(dst, mem));
7244 
7245   ins_pipe(pipe_serial);
7246 %}
7247 
7248 // Load Compressed Pointer
7249 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7250 %{
7251   match(Set dst (LoadN mem));
7252 
7253   ins_cost(VOLATILE_REF_COST);
7254   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7255 
7256   ins_encode(aarch64_enc_ldarw(dst, mem));
7257 
7258   ins_pipe(pipe_serial);
7259 %}
7260 
7261 // Load Float
7262 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7263 %{
7264   match(Set dst (LoadF mem));
7265 
7266   ins_cost(VOLATILE_REF_COST);
7267   format %{ "ldars  $dst, $mem\t# float" %}
7268 
7269   ins_encode( aarch64_enc_fldars(dst, mem) );
7270 
7271   ins_pipe(pipe_serial);
7272 %}
7273 
7274 // Load Double
7275 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7276 %{
7277   match(Set dst (LoadD mem));
7278 
7279   ins_cost(VOLATILE_REF_COST);
7280   format %{ "ldard  $dst, $mem\t# double" %}
7281 
7282   ins_encode( aarch64_enc_fldard(dst, mem) );
7283 
7284   ins_pipe(pipe_serial);
7285 %}
7286 
7287 // Store Byte
7288 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7289 %{
7290   match(Set mem (StoreB mem src));
7291 
7292   ins_cost(VOLATILE_REF_COST);
7293   format %{ "stlrb  $src, $mem\t# byte" %}
7294 
7295   ins_encode(aarch64_enc_stlrb(src, mem));
7296 
7297   ins_pipe(pipe_class_memory);
7298 %}
7299 
7300 // Store Char/Short
7301 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7302 %{
7303   match(Set mem (StoreC mem src));
7304 
7305   ins_cost(VOLATILE_REF_COST);
7306   format %{ "stlrh  $src, $mem\t# short" %}
7307 
7308   ins_encode(aarch64_enc_stlrh(src, mem));
7309 
7310   ins_pipe(pipe_class_memory);
7311 %}
7312 
7313 // Store Integer
7314 
7315 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7316 %{
7317   match(Set mem(StoreI mem src));
7318 
7319   ins_cost(VOLATILE_REF_COST);
7320   format %{ "stlrw  $src, $mem\t# int" %}
7321 
7322   ins_encode(aarch64_enc_stlrw(src, mem));
7323 
7324   ins_pipe(pipe_class_memory);
7325 %}
7326 
7327 // Store Long (64 bit signed)
7328 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7329 %{
7330   match(Set mem (StoreL mem src));
7331 
7332   ins_cost(VOLATILE_REF_COST);
7333   format %{ "stlr  $src, $mem\t# int" %}
7334 
7335   ins_encode(aarch64_enc_stlr(src, mem));
7336 
7337   ins_pipe(pipe_class_memory);
7338 %}
7339 
7340 // Store Pointer
7341 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7342 %{
7343   match(Set mem (StoreP mem src));
7344 
7345   ins_cost(VOLATILE_REF_COST);
7346   format %{ "stlr  $src, $mem\t# ptr" %}
7347 
7348   ins_encode(aarch64_enc_stlr(src, mem));
7349 
7350   ins_pipe(pipe_class_memory);
7351 %}
7352 
7353 // Store Compressed Pointer
7354 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7355 %{
7356   match(Set mem (StoreN mem src));
7357 
7358   ins_cost(VOLATILE_REF_COST);
7359   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7360 
7361   ins_encode(aarch64_enc_stlrw(src, mem));
7362 
7363   ins_pipe(pipe_class_memory);
7364 %}
7365 
7366 // Store Float
7367 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7368 %{
7369   match(Set mem (StoreF mem src));
7370 
7371   ins_cost(VOLATILE_REF_COST);
7372   format %{ "stlrs  $src, $mem\t# float" %}
7373 
7374   ins_encode( aarch64_enc_fstlrs(src, mem) );
7375 
7376   ins_pipe(pipe_class_memory);
7377 %}
7378 
7379 // TODO
7380 // implement storeImmF0 and storeFImmPacked
7381 
7382 // Store Double
7383 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7384 %{
7385   match(Set mem (StoreD mem src));
7386 
7387   ins_cost(VOLATILE_REF_COST);
7388   format %{ "stlrd  $src, $mem\t# double" %}
7389 
7390   ins_encode( aarch64_enc_fstlrd(src, mem) );
7391 
7392   ins_pipe(pipe_class_memory);
7393 %}
7394 
7395 //  ---------------- end of volatile loads and stores ----------------
7396 
7397 // ============================================================================
7398 // BSWAP Instructions
7399 
7400 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7401   match(Set dst (ReverseBytesI src));
7402 
7403   ins_cost(INSN_COST);
7404   format %{ "revw  $dst, $src" %}
7405 
7406   ins_encode %{
7407     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7408   %}
7409 
7410   ins_pipe(ialu_reg);
7411 %}
7412 
7413 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7414   match(Set dst (ReverseBytesL src));
7415 
7416   ins_cost(INSN_COST);
7417   format %{ "rev  $dst, $src" %}
7418 
7419   ins_encode %{
7420     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7421   %}
7422 
7423   ins_pipe(ialu_reg);
7424 %}
7425 
7426 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7427   match(Set dst (ReverseBytesUS src));
7428 
7429   ins_cost(INSN_COST);
7430   format %{ "rev16w  $dst, $src" %}
7431 
7432   ins_encode %{
7433     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7434   %}
7435 
7436   ins_pipe(ialu_reg);
7437 %}
7438 
7439 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7440   match(Set dst (ReverseBytesS src));
7441 
7442   ins_cost(INSN_COST);
7443   format %{ "rev16w  $dst, $src\n\t"
7444             "sbfmw $dst, $dst, #0, #15" %}
7445 
7446   ins_encode %{
7447     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7448     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7449   %}
7450 
7451   ins_pipe(ialu_reg);
7452 %}
7453 
7454 // ============================================================================
7455 // Zero Count Instructions
7456 
7457 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7458   match(Set dst (CountLeadingZerosI src));
7459 
7460   ins_cost(INSN_COST);
7461   format %{ "clzw  $dst, $src" %}
7462   ins_encode %{
7463     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7464   %}
7465 
7466   ins_pipe(ialu_reg);
7467 %}
7468 
7469 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7470   match(Set dst (CountLeadingZerosL src));
7471 
7472   ins_cost(INSN_COST);
7473   format %{ "clz   $dst, $src" %}
7474   ins_encode %{
7475     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7476   %}
7477 
7478   ins_pipe(ialu_reg);
7479 %}
7480 
7481 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7482   match(Set dst (CountTrailingZerosI src));
7483 
7484   ins_cost(INSN_COST * 2);
7485   format %{ "rbitw  $dst, $src\n\t"
7486             "clzw   $dst, $dst" %}
7487   ins_encode %{
7488     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7489     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7490   %}
7491 
7492   ins_pipe(ialu_reg);
7493 %}
7494 
7495 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7496   match(Set dst (CountTrailingZerosL src));
7497 
7498   ins_cost(INSN_COST * 2);
7499   format %{ "rbit   $dst, $src\n\t"
7500             "clz    $dst, $dst" %}
7501   ins_encode %{
7502     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7503     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7504   %}
7505 
7506   ins_pipe(ialu_reg);
7507 %}
7508 
7509 //---------- Population Count Instructions -------------------------------------
7510 //
7511 
7512 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7513   predicate(UsePopCountInstruction);
7514   match(Set dst (PopCountI src));
7515   effect(TEMP tmp);
7516   ins_cost(INSN_COST * 13);
7517 
7518   format %{ "movw   $src, $src\n\t"
7519             "mov    $tmp, $src\t# vector (1D)\n\t"
7520             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7521             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7522             "mov    $dst, $tmp\t# vector (1D)" %}
7523   ins_encode %{
7524     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7525     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7526     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7527     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7528     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7529   %}
7530 
7531   ins_pipe(pipe_class_default);
7532 %}
7533 
7534 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7535   predicate(UsePopCountInstruction);
7536   match(Set dst (PopCountI (LoadI mem)));
7537   effect(TEMP tmp);
7538   ins_cost(INSN_COST * 13);
7539 
7540   format %{ "ldrs   $tmp, $mem\n\t"
7541             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7542             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7543             "mov    $dst, $tmp\t# vector (1D)" %}
7544   ins_encode %{
7545     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7546     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7547                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7548     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7549     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7550     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7551   %}
7552 
7553   ins_pipe(pipe_class_default);
7554 %}
7555 
7556 // Note: Long.bitCount(long) returns an int.
7557 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7558   predicate(UsePopCountInstruction);
7559   match(Set dst (PopCountL src));
7560   effect(TEMP tmp);
7561   ins_cost(INSN_COST * 13);
7562 
7563   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7564             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7565             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7566             "mov    $dst, $tmp\t# vector (1D)" %}
7567   ins_encode %{
7568     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7569     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7570     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7571     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7572   %}
7573 
7574   ins_pipe(pipe_class_default);
7575 %}
7576 
7577 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7578   predicate(UsePopCountInstruction);
7579   match(Set dst (PopCountL (LoadL mem)));
7580   effect(TEMP tmp);
7581   ins_cost(INSN_COST * 13);
7582 
7583   format %{ "ldrd   $tmp, $mem\n\t"
7584             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7585             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7586             "mov    $dst, $tmp\t# vector (1D)" %}
7587   ins_encode %{
7588     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7589     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7590                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7591     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7592     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7593     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7594   %}
7595 
7596   ins_pipe(pipe_class_default);
7597 %}
7598 
7599 // ============================================================================
7600 // MemBar Instruction
7601 
7602 instruct load_fence() %{
7603   match(LoadFence);
7604   ins_cost(VOLATILE_REF_COST);
7605 
7606   format %{ "load_fence" %}
7607 
7608   ins_encode %{
7609     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7610   %}
7611   ins_pipe(pipe_serial);
7612 %}
7613 
7614 instruct unnecessary_membar_acquire() %{
7615   predicate(unnecessary_acquire(n));
7616   match(MemBarAcquire);
7617   ins_cost(0);
7618 
7619   format %{ "membar_acquire (elided)" %}
7620 
7621   ins_encode %{
7622     __ block_comment("membar_acquire (elided)");
7623   %}
7624 
7625   ins_pipe(pipe_class_empty);
7626 %}
7627 
7628 instruct membar_acquire() %{
7629   match(MemBarAcquire);
7630   ins_cost(VOLATILE_REF_COST);
7631 
7632   format %{ "membar_acquire\n\t"
7633             "dmb ish" %}
7634 
7635   ins_encode %{
7636     __ block_comment("membar_acquire");
7637     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7638   %}
7639 
7640   ins_pipe(pipe_serial);
7641 %}
7642 
7643 
7644 instruct membar_acquire_lock() %{
7645   match(MemBarAcquireLock);
7646   ins_cost(VOLATILE_REF_COST);
7647 
7648   format %{ "membar_acquire_lock (elided)" %}
7649 
7650   ins_encode %{
7651     __ block_comment("membar_acquire_lock (elided)");
7652   %}
7653 
7654   ins_pipe(pipe_serial);
7655 %}
7656 
7657 instruct store_fence() %{
7658   match(StoreFence);
7659   ins_cost(VOLATILE_REF_COST);
7660 
7661   format %{ "store_fence" %}
7662 
7663   ins_encode %{
7664     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7665   %}
7666   ins_pipe(pipe_serial);
7667 %}
7668 
7669 instruct unnecessary_membar_release() %{
7670   predicate(unnecessary_release(n));
7671   match(MemBarRelease);
7672   ins_cost(0);
7673 
7674   format %{ "membar_release (elided)" %}
7675 
7676   ins_encode %{
7677     __ block_comment("membar_release (elided)");
7678   %}
7679   ins_pipe(pipe_serial);
7680 %}
7681 
7682 instruct membar_release() %{
7683   match(MemBarRelease);
7684   ins_cost(VOLATILE_REF_COST);
7685 
7686   format %{ "membar_release\n\t"
7687             "dmb ish" %}
7688 
7689   ins_encode %{
7690     __ block_comment("membar_release");
7691     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7692   %}
7693   ins_pipe(pipe_serial);
7694 %}
7695 
7696 instruct membar_storestore() %{
7697   match(MemBarStoreStore);
7698   ins_cost(VOLATILE_REF_COST);
7699 
7700   format %{ "MEMBAR-store-store" %}
7701 
7702   ins_encode %{
7703     __ membar(Assembler::StoreStore);
7704   %}
7705   ins_pipe(pipe_serial);
7706 %}
7707 
7708 instruct membar_release_lock() %{
7709   match(MemBarReleaseLock);
7710   ins_cost(VOLATILE_REF_COST);
7711 
7712   format %{ "membar_release_lock (elided)" %}
7713 
7714   ins_encode %{
7715     __ block_comment("membar_release_lock (elided)");
7716   %}
7717 
7718   ins_pipe(pipe_serial);
7719 %}
7720 
7721 instruct unnecessary_membar_volatile() %{
7722   predicate(unnecessary_volatile(n));
7723   match(MemBarVolatile);
7724   ins_cost(0);
7725 
7726   format %{ "membar_volatile (elided)" %}
7727 
7728   ins_encode %{
7729     __ block_comment("membar_volatile (elided)");
7730   %}
7731 
7732   ins_pipe(pipe_serial);
7733 %}
7734 
7735 instruct membar_volatile() %{
7736   match(MemBarVolatile);
7737   ins_cost(VOLATILE_REF_COST*100);
7738 
7739   format %{ "membar_volatile\n\t"
7740              "dmb ish"%}
7741 
7742   ins_encode %{
7743     __ block_comment("membar_volatile");
7744     __ membar(Assembler::StoreLoad);
7745   %}
7746 
7747   ins_pipe(pipe_serial);
7748 %}
7749 
7750 // ============================================================================
7751 // Cast/Convert Instructions
7752 
7753 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7754   match(Set dst (CastX2P src));
7755 
7756   ins_cost(INSN_COST);
7757   format %{ "mov $dst, $src\t# long -> ptr" %}
7758 
7759   ins_encode %{
7760     if ($dst$$reg != $src$$reg) {
7761       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7762     }
7763   %}
7764 
7765   ins_pipe(ialu_reg);
7766 %}
7767 
7768 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7769   match(Set dst (CastP2X src));
7770 
7771   ins_cost(INSN_COST);
7772   format %{ "mov $dst, $src\t# ptr -> long" %}
7773 
7774   ins_encode %{
7775     if ($dst$$reg != $src$$reg) {
7776       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7777     }
7778   %}
7779 
7780   ins_pipe(ialu_reg);
7781 %}
7782 
7783 // Convert oop into int for vectors alignment masking
7784 instruct convP2I(iRegINoSp dst, iRegP src) %{
7785   match(Set dst (ConvL2I (CastP2X src)));
7786 
7787   ins_cost(INSN_COST);
7788   format %{ "movw $dst, $src\t# ptr -> int" %}
7789   ins_encode %{
7790     __ movw($dst$$Register, $src$$Register);
7791   %}
7792 
7793   ins_pipe(ialu_reg);
7794 %}
7795 
7796 // Convert compressed oop into int for vectors alignment masking
7797 // in case of 32bit oops (heap < 4Gb).
7798 instruct convN2I(iRegINoSp dst, iRegN src)
7799 %{
7800   predicate(Universe::narrow_oop_shift() == 0);
7801   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7802 
7803   ins_cost(INSN_COST);
7804   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7805   ins_encode %{
7806     __ movw($dst$$Register, $src$$Register);
7807   %}
7808 
7809   ins_pipe(ialu_reg);
7810 %}
7811 
7812 
7813 // Convert oop pointer into compressed form
7814 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7815   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7816   match(Set dst (EncodeP src));
7817   effect(KILL cr);
7818   ins_cost(INSN_COST * 3);
7819   format %{ "encode_heap_oop $dst, $src" %}
7820   ins_encode %{
7821     Register s = $src$$Register;
7822     Register d = $dst$$Register;
7823     __ encode_heap_oop(d, s);
7824   %}
7825   ins_pipe(ialu_reg);
7826 %}
7827 
7828 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7829   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7830   match(Set dst (EncodeP src));
7831   ins_cost(INSN_COST * 3);
7832   format %{ "encode_heap_oop_not_null $dst, $src" %}
7833   ins_encode %{
7834     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7835   %}
7836   ins_pipe(ialu_reg);
7837 %}
7838 
7839 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7840   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7841             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7842   match(Set dst (DecodeN src));
7843   ins_cost(INSN_COST * 3);
7844   format %{ "decode_heap_oop $dst, $src" %}
7845   ins_encode %{
7846     Register s = $src$$Register;
7847     Register d = $dst$$Register;
7848     __ decode_heap_oop(d, s);
7849   %}
7850   ins_pipe(ialu_reg);
7851 %}
7852 
7853 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7854   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7855             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7856   match(Set dst (DecodeN src));
7857   ins_cost(INSN_COST * 3);
7858   format %{ "decode_heap_oop_not_null $dst, $src" %}
7859   ins_encode %{
7860     Register s = $src$$Register;
7861     Register d = $dst$$Register;
7862     __ decode_heap_oop_not_null(d, s);
7863   %}
7864   ins_pipe(ialu_reg);
7865 %}
7866 
7867 // n.b. AArch64 implementations of encode_klass_not_null and
7868 // decode_klass_not_null do not modify the flags register so, unlike
7869 // Intel, we don't kill CR as a side effect here
7870 
7871 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7872   match(Set dst (EncodePKlass src));
7873 
7874   ins_cost(INSN_COST * 3);
7875   format %{ "encode_klass_not_null $dst,$src" %}
7876 
7877   ins_encode %{
7878     Register src_reg = as_Register($src$$reg);
7879     Register dst_reg = as_Register($dst$$reg);
7880     __ encode_klass_not_null(dst_reg, src_reg);
7881   %}
7882 
7883    ins_pipe(ialu_reg);
7884 %}
7885 
7886 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7887   match(Set dst (DecodeNKlass src));
7888 
7889   ins_cost(INSN_COST * 3);
7890   format %{ "decode_klass_not_null $dst,$src" %}
7891 
7892   ins_encode %{
7893     Register src_reg = as_Register($src$$reg);
7894     Register dst_reg = as_Register($dst$$reg);
7895     if (dst_reg != src_reg) {
7896       __ decode_klass_not_null(dst_reg, src_reg);
7897     } else {
7898       __ decode_klass_not_null(dst_reg);
7899     }
7900   %}
7901 
7902    ins_pipe(ialu_reg);
7903 %}
7904 
7905 instruct checkCastPP(iRegPNoSp dst)
7906 %{
7907   match(Set dst (CheckCastPP dst));
7908 
7909   size(0);
7910   format %{ "# checkcastPP of $dst" %}
7911   ins_encode(/* empty encoding */);
7912   ins_pipe(pipe_class_empty);
7913 %}
7914 
7915 instruct castPP(iRegPNoSp dst)
7916 %{
7917   match(Set dst (CastPP dst));
7918 
7919   size(0);
7920   format %{ "# castPP of $dst" %}
7921   ins_encode(/* empty encoding */);
7922   ins_pipe(pipe_class_empty);
7923 %}
7924 
7925 instruct castII(iRegI dst)
7926 %{
7927   match(Set dst (CastII dst));
7928 
7929   size(0);
7930   format %{ "# castII of $dst" %}
7931   ins_encode(/* empty encoding */);
7932   ins_cost(0);
7933   ins_pipe(pipe_class_empty);
7934 %}
7935 
7936 // ============================================================================
7937 // Atomic operation instructions
7938 //
7939 // Intel and SPARC both implement Ideal Node LoadPLocked and
7940 // Store{PIL}Conditional instructions using a normal load for the
7941 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7942 //
7943 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7944 // pair to lock object allocations from Eden space when not using
7945 // TLABs.
7946 //
7947 // There does not appear to be a Load{IL}Locked Ideal Node and the
7948 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7949 // and to use StoreIConditional only for 32-bit and StoreLConditional
7950 // only for 64-bit.
7951 //
7952 // We implement LoadPLocked and StorePLocked instructions using,
7953 // respectively the AArch64 hw load-exclusive and store-conditional
7954 // instructions. Whereas we must implement each of
7955 // Store{IL}Conditional using a CAS which employs a pair of
7956 // instructions comprising a load-exclusive followed by a
7957 // store-conditional.
7958 
7959 
7960 // Locked-load (linked load) of the current heap-top
7961 // used when updating the eden heap top
7962 // implemented using ldaxr on AArch64
7963 
7964 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7965 %{
7966   match(Set dst (LoadPLocked mem));
7967 
7968   ins_cost(VOLATILE_REF_COST);
7969 
7970   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7971 
7972   ins_encode(aarch64_enc_ldaxr(dst, mem));
7973 
7974   ins_pipe(pipe_serial);
7975 %}
7976 
7977 // Conditional-store of the updated heap-top.
7978 // Used during allocation of the shared heap.
7979 // Sets flag (EQ) on success.
7980 // implemented using stlxr on AArch64.
7981 
7982 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
7983 %{
7984   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7985 
7986   ins_cost(VOLATILE_REF_COST);
7987 
7988  // TODO
7989  // do we need to do a store-conditional release or can we just use a
7990  // plain store-conditional?
7991 
7992   format %{
7993     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7994     "cmpw rscratch1, zr\t# EQ on successful write"
7995   %}
7996 
7997   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7998 
7999   ins_pipe(pipe_serial);
8000 %}
8001 
8002 
8003 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8004 // when attempting to rebias a lock towards the current thread.  We
8005 // must use the acquire form of cmpxchg in order to guarantee acquire
8006 // semantics in this case.
8007 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8008 %{
8009   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8010 
8011   ins_cost(VOLATILE_REF_COST);
8012 
8013   format %{
8014     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8015     "cmpw rscratch1, zr\t# EQ on successful write"
8016   %}
8017 
8018   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8019 
8020   ins_pipe(pipe_slow);
8021 %}
8022 
8023 // storeIConditional also has acquire semantics, for no better reason
8024 // than matching storeLConditional.  At the time of writing this
8025 // comment storeIConditional was not used anywhere by AArch64.
8026 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8027 %{
8028   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8029 
8030   ins_cost(VOLATILE_REF_COST);
8031 
8032   format %{
8033     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8034     "cmpw rscratch1, zr\t# EQ on successful write"
8035   %}
8036 
8037   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8038 
8039   ins_pipe(pipe_slow);
8040 %}
8041 
8042 // standard CompareAndSwapX when we are using barriers
8043 // these have higher priority than the rules selected by a predicate
8044 
8045 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8046 // can't match them
8047 
8048 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8049 
8050   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8051   ins_cost(2 * VOLATILE_REF_COST);
8052 
8053   effect(KILL cr);
8054 
8055   format %{
8056     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8057     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8058   %}
8059 
8060   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8061             aarch64_enc_cset_eq(res));
8062 
8063   ins_pipe(pipe_slow);
8064 %}
8065 
8066 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8067 
8068   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8069   ins_cost(2 * VOLATILE_REF_COST);
8070 
8071   effect(KILL cr);
8072 
8073   format %{
8074     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8075     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8076   %}
8077 
8078   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8079             aarch64_enc_cset_eq(res));
8080 
8081   ins_pipe(pipe_slow);
8082 %}
8083 
8084 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8085 
8086   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8087   ins_cost(2 * VOLATILE_REF_COST);
8088 
8089   effect(KILL cr);
8090 
8091  format %{
8092     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8093     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8094  %}
8095 
8096  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8097             aarch64_enc_cset_eq(res));
8098 
8099   ins_pipe(pipe_slow);
8100 %}
8101 
8102 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8103 
8104   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8105   ins_cost(2 * VOLATILE_REF_COST);
8106 
8107   effect(KILL cr);
8108 
8109  format %{
8110     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8111     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8112  %}
8113 
8114  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8115             aarch64_enc_cset_eq(res));
8116 
8117   ins_pipe(pipe_slow);
8118 %}
8119 
8120 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8121 
8122   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8123   ins_cost(2 * VOLATILE_REF_COST);
8124 
8125   effect(KILL cr);
8126 
8127  format %{
8128     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8129     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8130  %}
8131 
8132  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8133             aarch64_enc_cset_eq(res));
8134 
8135   ins_pipe(pipe_slow);
8136 %}
8137 
8138 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8139 
8140   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8141   ins_cost(2 * VOLATILE_REF_COST);
8142 
8143   effect(KILL cr);
8144 
8145  format %{
8146     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8147     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8148  %}
8149 
8150  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8151             aarch64_enc_cset_eq(res));
8152 
8153   ins_pipe(pipe_slow);
8154 %}
8155 
8156 // alternative CompareAndSwapX when we are eliding barriers
8157 
8158 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8159 
8160   predicate(needs_acquiring_load_exclusive(n));
8161   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8162   ins_cost(VOLATILE_REF_COST);
8163 
8164   effect(KILL cr);
8165 
8166   format %{
8167     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8168     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8169   %}
8170 
8171   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8172             aarch64_enc_cset_eq(res));
8173 
8174   ins_pipe(pipe_slow);
8175 %}
8176 
8177 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8178 
8179   predicate(needs_acquiring_load_exclusive(n));
8180   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8181   ins_cost(VOLATILE_REF_COST);
8182 
8183   effect(KILL cr);
8184 
8185   format %{
8186     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8187     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8188   %}
8189 
8190   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8191             aarch64_enc_cset_eq(res));
8192 
8193   ins_pipe(pipe_slow);
8194 %}
8195 
8196 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8197 
8198   predicate(needs_acquiring_load_exclusive(n));
8199   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8200   ins_cost(VOLATILE_REF_COST);
8201 
8202   effect(KILL cr);
8203 
8204  format %{
8205     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8206     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8207  %}
8208 
8209  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8210             aarch64_enc_cset_eq(res));
8211 
8212   ins_pipe(pipe_slow);
8213 %}
8214 
8215 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8216 
8217   predicate(needs_acquiring_load_exclusive(n));
8218   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8219   ins_cost(VOLATILE_REF_COST);
8220 
8221   effect(KILL cr);
8222 
8223  format %{
8224     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8225     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8226  %}
8227 
8228  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8229             aarch64_enc_cset_eq(res));
8230 
8231   ins_pipe(pipe_slow);
8232 %}
8233 
8234 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8235 
8236   predicate(needs_acquiring_load_exclusive(n));
8237   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8238   ins_cost(VOLATILE_REF_COST);
8239 
8240   effect(KILL cr);
8241 
8242  format %{
8243     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8244     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8245  %}
8246 
8247  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8248             aarch64_enc_cset_eq(res));
8249 
8250   ins_pipe(pipe_slow);
8251 %}
8252 
8253 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8254 
8255   predicate(needs_acquiring_load_exclusive(n));
8256   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8257   ins_cost(VOLATILE_REF_COST);
8258 
8259   effect(KILL cr);
8260 
8261  format %{
8262     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8263     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8264  %}
8265 
8266  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8267             aarch64_enc_cset_eq(res));
8268 
8269   ins_pipe(pipe_slow);
8270 %}
8271 
8272 
8273 // ---------------------------------------------------------------------
8274 
8275 
8276 // BEGIN This section of the file is automatically generated. Do not edit --------------
8277 
8278 // Sundry CAS operations.  Note that release is always true,
8279 // regardless of the memory ordering of the CAS.  This is because we
8280 // need the volatile case to be sequentially consistent but there is
8281 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8282 // can't check the type of memory ordering here, so we always emit a
8283 // STLXR.
8284 
8285 // This section is generated from aarch64_ad_cas.m4
8286 
8287 
8288 
8289 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8290   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8291   ins_cost(2 * VOLATILE_REF_COST);
8292   effect(TEMP_DEF res, KILL cr);
8293   format %{
8294     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8295   %}
8296   ins_encode %{
8297     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8298                Assembler::byte, /*acquire*/ false, /*release*/ true,
8299                /*weak*/ false, $res$$Register);
8300     __ sxtbw($res$$Register, $res$$Register);
8301   %}
8302   ins_pipe(pipe_slow);
8303 %}
8304 
8305 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8306   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8307   ins_cost(2 * VOLATILE_REF_COST);
8308   effect(TEMP_DEF res, KILL cr);
8309   format %{
8310     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8311   %}
8312   ins_encode %{
8313     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8314                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8315                /*weak*/ false, $res$$Register);
8316     __ sxthw($res$$Register, $res$$Register);
8317   %}
8318   ins_pipe(pipe_slow);
8319 %}
8320 
8321 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8322   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8323   ins_cost(2 * VOLATILE_REF_COST);
8324   effect(TEMP_DEF res, KILL cr);
8325   format %{
8326     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8327   %}
8328   ins_encode %{
8329     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8330                Assembler::word, /*acquire*/ false, /*release*/ true,
8331                /*weak*/ false, $res$$Register);
8332   %}
8333   ins_pipe(pipe_slow);
8334 %}
8335 
8336 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8337   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8338   ins_cost(2 * VOLATILE_REF_COST);
8339   effect(TEMP_DEF res, KILL cr);
8340   format %{
8341     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8342   %}
8343   ins_encode %{
8344     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8345                Assembler::xword, /*acquire*/ false, /*release*/ true,
8346                /*weak*/ false, $res$$Register);
8347   %}
8348   ins_pipe(pipe_slow);
8349 %}
8350 
8351 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8352   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8353   ins_cost(2 * VOLATILE_REF_COST);
8354   effect(TEMP_DEF res, KILL cr);
8355   format %{
8356     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8357   %}
8358   ins_encode %{
8359     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8360                Assembler::word, /*acquire*/ false, /*release*/ true,
8361                /*weak*/ false, $res$$Register);
8362   %}
8363   ins_pipe(pipe_slow);
8364 %}
8365 
8366 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8367   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8368   ins_cost(2 * VOLATILE_REF_COST);
8369   effect(TEMP_DEF res, KILL cr);
8370   format %{
8371     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8372   %}
8373   ins_encode %{
8374     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8375                Assembler::xword, /*acquire*/ false, /*release*/ true,
8376                /*weak*/ false, $res$$Register);
8377   %}
8378   ins_pipe(pipe_slow);
8379 %}
8380 
8381 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8382   predicate(needs_acquiring_load_exclusive(n));
8383   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8384   ins_cost(VOLATILE_REF_COST);
8385   effect(TEMP_DEF res, KILL cr);
8386   format %{
8387     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8388   %}
8389   ins_encode %{
8390     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8391                Assembler::byte, /*acquire*/ true, /*release*/ true,
8392                /*weak*/ false, $res$$Register);
8393     __ sxtbw($res$$Register, $res$$Register);
8394   %}
8395   ins_pipe(pipe_slow);
8396 %}
8397 
8398 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8399   predicate(needs_acquiring_load_exclusive(n));
8400   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8401   ins_cost(VOLATILE_REF_COST);
8402   effect(TEMP_DEF res, KILL cr);
8403   format %{
8404     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8405   %}
8406   ins_encode %{
8407     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8408                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8409                /*weak*/ false, $res$$Register);
8410     __ sxthw($res$$Register, $res$$Register);
8411   %}
8412   ins_pipe(pipe_slow);
8413 %}
8414 
8415 
8416 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8417   predicate(needs_acquiring_load_exclusive(n));
8418   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8419   ins_cost(VOLATILE_REF_COST);
8420   effect(TEMP_DEF res, KILL cr);
8421   format %{
8422     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8423   %}
8424   ins_encode %{
8425     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8426                Assembler::word, /*acquire*/ true, /*release*/ true,
8427                /*weak*/ false, $res$$Register);
8428   %}
8429   ins_pipe(pipe_slow);
8430 %}
8431 
8432 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8433   predicate(needs_acquiring_load_exclusive(n));
8434   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8435   ins_cost(VOLATILE_REF_COST);
8436   effect(TEMP_DEF res, KILL cr);
8437   format %{
8438     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8439   %}
8440   ins_encode %{
8441     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8442                Assembler::xword, /*acquire*/ true, /*release*/ true,
8443                /*weak*/ false, $res$$Register);
8444   %}
8445   ins_pipe(pipe_slow);
8446 %}
8447 
8448 
8449 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8450   predicate(needs_acquiring_load_exclusive(n));
8451   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8452   ins_cost(VOLATILE_REF_COST);
8453   effect(TEMP_DEF res, KILL cr);
8454   format %{
8455     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8456   %}
8457   ins_encode %{
8458     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8459                Assembler::word, /*acquire*/ true, /*release*/ true,
8460                /*weak*/ false, $res$$Register);
8461   %}
8462   ins_pipe(pipe_slow);
8463 %}
8464 
8465 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8466   predicate(needs_acquiring_load_exclusive(n));
8467   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8468   ins_cost(VOLATILE_REF_COST);
8469   effect(TEMP_DEF res, KILL cr);
8470   format %{
8471     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8472   %}
8473   ins_encode %{
8474     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8475                Assembler::xword, /*acquire*/ true, /*release*/ true,
8476                /*weak*/ false, $res$$Register);
8477   %}
8478   ins_pipe(pipe_slow);
8479 %}
8480 
8481 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8482   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8483   ins_cost(2 * VOLATILE_REF_COST);
8484   effect(KILL cr);
8485   format %{
8486     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8487     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8488   %}
8489   ins_encode %{
8490     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8491                Assembler::byte, /*acquire*/ false, /*release*/ true,
8492                /*weak*/ true, noreg);
8493     __ csetw($res$$Register, Assembler::EQ);
8494   %}
8495   ins_pipe(pipe_slow);
8496 %}
8497 
8498 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8499   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8500   ins_cost(2 * VOLATILE_REF_COST);
8501   effect(KILL cr);
8502   format %{
8503     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8504     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8505   %}
8506   ins_encode %{
8507     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8508                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8509                /*weak*/ true, noreg);
8510     __ csetw($res$$Register, Assembler::EQ);
8511   %}
8512   ins_pipe(pipe_slow);
8513 %}
8514 
8515 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8516   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8517   ins_cost(2 * VOLATILE_REF_COST);
8518   effect(KILL cr);
8519   format %{
8520     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8521     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8522   %}
8523   ins_encode %{
8524     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8525                Assembler::word, /*acquire*/ false, /*release*/ true,
8526                /*weak*/ true, noreg);
8527     __ csetw($res$$Register, Assembler::EQ);
8528   %}
8529   ins_pipe(pipe_slow);
8530 %}
8531 
8532 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8533   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8534   ins_cost(2 * VOLATILE_REF_COST);
8535   effect(KILL cr);
8536   format %{
8537     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8538     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8539   %}
8540   ins_encode %{
8541     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8542                Assembler::xword, /*acquire*/ false, /*release*/ true,
8543                /*weak*/ true, noreg);
8544     __ csetw($res$$Register, Assembler::EQ);
8545   %}
8546   ins_pipe(pipe_slow);
8547 %}
8548 
8549 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8550   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8551   ins_cost(2 * VOLATILE_REF_COST);
8552   effect(KILL cr);
8553   format %{
8554     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8555     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8556   %}
8557   ins_encode %{
8558     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8559                Assembler::word, /*acquire*/ false, /*release*/ true,
8560                /*weak*/ true, noreg);
8561     __ csetw($res$$Register, Assembler::EQ);
8562   %}
8563   ins_pipe(pipe_slow);
8564 %}
8565 
8566 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8567   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8568   ins_cost(2 * VOLATILE_REF_COST);
8569   effect(KILL cr);
8570   format %{
8571     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8572     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8573   %}
8574   ins_encode %{
8575     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8576                Assembler::xword, /*acquire*/ false, /*release*/ true,
8577                /*weak*/ true, noreg);
8578     __ csetw($res$$Register, Assembler::EQ);
8579   %}
8580   ins_pipe(pipe_slow);
8581 %}
8582 
8583 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8584   predicate(needs_acquiring_load_exclusive(n));
8585   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8586   ins_cost(VOLATILE_REF_COST);
8587   effect(KILL cr);
8588   format %{
8589     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8590     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8591   %}
8592   ins_encode %{
8593     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8594                Assembler::byte, /*acquire*/ true, /*release*/ true,
8595                /*weak*/ true, noreg);
8596     __ csetw($res$$Register, Assembler::EQ);
8597   %}
8598   ins_pipe(pipe_slow);
8599 %}
8600 
8601 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8602   predicate(needs_acquiring_load_exclusive(n));
8603   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8604   ins_cost(VOLATILE_REF_COST);
8605   effect(KILL cr);
8606   format %{
8607     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8608     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8609   %}
8610   ins_encode %{
8611     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8612                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8613                /*weak*/ true, noreg);
8614     __ csetw($res$$Register, Assembler::EQ);
8615   %}
8616   ins_pipe(pipe_slow);
8617 %}
8618 
8619 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8620   predicate(needs_acquiring_load_exclusive(n));
8621   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8622   ins_cost(VOLATILE_REF_COST);
8623   effect(KILL cr);
8624   format %{
8625     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8626     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8627   %}
8628   ins_encode %{
8629     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8630                Assembler::word, /*acquire*/ true, /*release*/ true,
8631                /*weak*/ true, noreg);
8632     __ csetw($res$$Register, Assembler::EQ);
8633   %}
8634   ins_pipe(pipe_slow);
8635 %}
8636 
8637 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8638   predicate(needs_acquiring_load_exclusive(n));
8639   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8640   ins_cost(VOLATILE_REF_COST);
8641   effect(KILL cr);
8642   format %{
8643     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8644     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8645   %}
8646   ins_encode %{
8647     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8648                Assembler::xword, /*acquire*/ true, /*release*/ true,
8649                /*weak*/ true, noreg);
8650     __ csetw($res$$Register, Assembler::EQ);
8651   %}
8652   ins_pipe(pipe_slow);
8653 %}
8654 
8655 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8656   predicate(needs_acquiring_load_exclusive(n));
8657   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8658   ins_cost(VOLATILE_REF_COST);
8659   effect(KILL cr);
8660   format %{
8661     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8662     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8663   %}
8664   ins_encode %{
8665     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8666                Assembler::word, /*acquire*/ true, /*release*/ true,
8667                /*weak*/ true, noreg);
8668     __ csetw($res$$Register, Assembler::EQ);
8669   %}
8670   ins_pipe(pipe_slow);
8671 %}
8672 
8673 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8674   predicate(needs_acquiring_load_exclusive(n));
8675   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8676   ins_cost(VOLATILE_REF_COST);
8677   effect(KILL cr);
8678   format %{
8679     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8680     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8681   %}
8682   ins_encode %{
8683     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8684                Assembler::xword, /*acquire*/ true, /*release*/ true,
8685                /*weak*/ true, noreg);
8686     __ csetw($res$$Register, Assembler::EQ);
8687   %}
8688   ins_pipe(pipe_slow);
8689 %}
8690 
8691 // END This section of the file is automatically generated. Do not edit --------------
8692 // ---------------------------------------------------------------------
8693 
8694 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8695   match(Set prev (GetAndSetI mem newv));
8696   ins_cost(2 * VOLATILE_REF_COST);
8697   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8698   ins_encode %{
8699     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8700   %}
8701   ins_pipe(pipe_serial);
8702 %}
8703 
8704 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8705   match(Set prev (GetAndSetL mem newv));
8706   ins_cost(2 * VOLATILE_REF_COST);
8707   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8708   ins_encode %{
8709     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8710   %}
8711   ins_pipe(pipe_serial);
8712 %}
8713 
8714 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8715   match(Set prev (GetAndSetN mem newv));
8716   ins_cost(2 * VOLATILE_REF_COST);
8717   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8718   ins_encode %{
8719     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8720   %}
8721   ins_pipe(pipe_serial);
8722 %}
8723 
8724 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8725   match(Set prev (GetAndSetP mem newv));
8726   ins_cost(2 * VOLATILE_REF_COST);
8727   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8728   ins_encode %{
8729     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8730   %}
8731   ins_pipe(pipe_serial);
8732 %}
8733 
8734 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8735   predicate(needs_acquiring_load_exclusive(n));
8736   match(Set prev (GetAndSetI mem newv));
8737   ins_cost(VOLATILE_REF_COST);
8738   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8739   ins_encode %{
8740     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8741   %}
8742   ins_pipe(pipe_serial);
8743 %}
8744 
8745 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8746   predicate(needs_acquiring_load_exclusive(n));
8747   match(Set prev (GetAndSetL mem newv));
8748   ins_cost(VOLATILE_REF_COST);
8749   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8750   ins_encode %{
8751     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8752   %}
8753   ins_pipe(pipe_serial);
8754 %}
8755 
8756 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8757   predicate(needs_acquiring_load_exclusive(n));
8758   match(Set prev (GetAndSetN mem newv));
8759   ins_cost(VOLATILE_REF_COST);
8760   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8761   ins_encode %{
8762     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8763   %}
8764   ins_pipe(pipe_serial);
8765 %}
8766 
8767 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8768   predicate(needs_acquiring_load_exclusive(n));
8769   match(Set prev (GetAndSetP mem newv));
8770   ins_cost(VOLATILE_REF_COST);
8771   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8772   ins_encode %{
8773     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8774   %}
8775   ins_pipe(pipe_serial);
8776 %}
8777 
8778 
8779 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8780   match(Set newval (GetAndAddL mem incr));
8781   ins_cost(2 * VOLATILE_REF_COST + 1);
8782   format %{ "get_and_addL $newval, [$mem], $incr" %}
8783   ins_encode %{
8784     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8785   %}
8786   ins_pipe(pipe_serial);
8787 %}
8788 
8789 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8790   predicate(n->as_LoadStore()->result_not_used());
8791   match(Set dummy (GetAndAddL mem incr));
8792   ins_cost(2 * VOLATILE_REF_COST);
8793   format %{ "get_and_addL [$mem], $incr" %}
8794   ins_encode %{
8795     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8796   %}
8797   ins_pipe(pipe_serial);
8798 %}
8799 
8800 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8801   match(Set newval (GetAndAddL mem incr));
8802   ins_cost(2 * VOLATILE_REF_COST + 1);
8803   format %{ "get_and_addL $newval, [$mem], $incr" %}
8804   ins_encode %{
8805     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8806   %}
8807   ins_pipe(pipe_serial);
8808 %}
8809 
8810 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8811   predicate(n->as_LoadStore()->result_not_used());
8812   match(Set dummy (GetAndAddL mem incr));
8813   ins_cost(2 * VOLATILE_REF_COST);
8814   format %{ "get_and_addL [$mem], $incr" %}
8815   ins_encode %{
8816     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8817   %}
8818   ins_pipe(pipe_serial);
8819 %}
8820 
8821 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8822   match(Set newval (GetAndAddI mem incr));
8823   ins_cost(2 * VOLATILE_REF_COST + 1);
8824   format %{ "get_and_addI $newval, [$mem], $incr" %}
8825   ins_encode %{
8826     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8827   %}
8828   ins_pipe(pipe_serial);
8829 %}
8830 
8831 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8832   predicate(n->as_LoadStore()->result_not_used());
8833   match(Set dummy (GetAndAddI mem incr));
8834   ins_cost(2 * VOLATILE_REF_COST);
8835   format %{ "get_and_addI [$mem], $incr" %}
8836   ins_encode %{
8837     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8838   %}
8839   ins_pipe(pipe_serial);
8840 %}
8841 
8842 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8843   match(Set newval (GetAndAddI mem incr));
8844   ins_cost(2 * VOLATILE_REF_COST + 1);
8845   format %{ "get_and_addI $newval, [$mem], $incr" %}
8846   ins_encode %{
8847     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8848   %}
8849   ins_pipe(pipe_serial);
8850 %}
8851 
8852 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8853   predicate(n->as_LoadStore()->result_not_used());
8854   match(Set dummy (GetAndAddI mem incr));
8855   ins_cost(2 * VOLATILE_REF_COST);
8856   format %{ "get_and_addI [$mem], $incr" %}
8857   ins_encode %{
8858     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8859   %}
8860   ins_pipe(pipe_serial);
8861 %}
8862 
8863 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8864   predicate(needs_acquiring_load_exclusive(n));
8865   match(Set newval (GetAndAddL mem incr));
8866   ins_cost(VOLATILE_REF_COST + 1);
8867   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8868   ins_encode %{
8869     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8870   %}
8871   ins_pipe(pipe_serial);
8872 %}
8873 
8874 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8875   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8876   match(Set dummy (GetAndAddL mem incr));
8877   ins_cost(VOLATILE_REF_COST);
8878   format %{ "get_and_addL_acq [$mem], $incr" %}
8879   ins_encode %{
8880     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
8881   %}
8882   ins_pipe(pipe_serial);
8883 %}
8884 
8885 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8886   predicate(needs_acquiring_load_exclusive(n));
8887   match(Set newval (GetAndAddL mem incr));
8888   ins_cost(VOLATILE_REF_COST + 1);
8889   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8890   ins_encode %{
8891     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
8892   %}
8893   ins_pipe(pipe_serial);
8894 %}
8895 
8896 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
8897   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8898   match(Set dummy (GetAndAddL mem incr));
8899   ins_cost(VOLATILE_REF_COST);
8900   format %{ "get_and_addL_acq [$mem], $incr" %}
8901   ins_encode %{
8902     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
8903   %}
8904   ins_pipe(pipe_serial);
8905 %}
8906 
8907 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8908   predicate(needs_acquiring_load_exclusive(n));
8909   match(Set newval (GetAndAddI mem incr));
8910   ins_cost(VOLATILE_REF_COST + 1);
8911   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
8912   ins_encode %{
8913     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8914   %}
8915   ins_pipe(pipe_serial);
8916 %}
8917 
8918 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
8919   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8920   match(Set dummy (GetAndAddI mem incr));
8921   ins_cost(VOLATILE_REF_COST);
8922   format %{ "get_and_addI_acq [$mem], $incr" %}
8923   ins_encode %{
8924     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
8925   %}
8926   ins_pipe(pipe_serial);
8927 %}
8928 
8929 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8930   predicate(needs_acquiring_load_exclusive(n));
8931   match(Set newval (GetAndAddI mem incr));
8932   ins_cost(VOLATILE_REF_COST + 1);
8933   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
8934   ins_encode %{
8935     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8936   %}
8937   ins_pipe(pipe_serial);
8938 %}
8939 
8940 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
8941   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8942   match(Set dummy (GetAndAddI mem incr));
8943   ins_cost(VOLATILE_REF_COST);
8944   format %{ "get_and_addI_acq [$mem], $incr" %}
8945   ins_encode %{
8946     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
8947   %}
8948   ins_pipe(pipe_serial);
8949 %}
8950 
8951 // Manifest a CmpL result in an integer register.
8952 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8953 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8954 %{
8955   match(Set dst (CmpL3 src1 src2));
8956   effect(KILL flags);
8957 
8958   ins_cost(INSN_COST * 6);
8959   format %{
8960       "cmp $src1, $src2"
8961       "csetw $dst, ne"
8962       "cnegw $dst, lt"
8963   %}
8964   // format %{ "CmpL3 $dst, $src1, $src2" %}
8965   ins_encode %{
8966     __ cmp($src1$$Register, $src2$$Register);
8967     __ csetw($dst$$Register, Assembler::NE);
8968     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8969   %}
8970 
8971   ins_pipe(pipe_class_default);
8972 %}
8973 
8974 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8975 %{
8976   match(Set dst (CmpL3 src1 src2));
8977   effect(KILL flags);
8978 
8979   ins_cost(INSN_COST * 6);
8980   format %{
8981       "cmp $src1, $src2"
8982       "csetw $dst, ne"
8983       "cnegw $dst, lt"
8984   %}
8985   ins_encode %{
8986     int32_t con = (int32_t)$src2$$constant;
8987      if (con < 0) {
8988       __ adds(zr, $src1$$Register, -con);
8989     } else {
8990       __ subs(zr, $src1$$Register, con);
8991     }
8992     __ csetw($dst$$Register, Assembler::NE);
8993     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8994   %}
8995 
8996   ins_pipe(pipe_class_default);
8997 %}
8998 
8999 // ============================================================================
9000 // Conditional Move Instructions
9001 
9002 // n.b. we have identical rules for both a signed compare op (cmpOp)
9003 // and an unsigned compare op (cmpOpU). it would be nice if we could
9004 // define an op class which merged both inputs and use it to type the
9005 // argument to a single rule. unfortunatelyt his fails because the
9006 // opclass does not live up to the COND_INTER interface of its
9007 // component operands. When the generic code tries to negate the
9008 // operand it ends up running the generci Machoper::negate method
9009 // which throws a ShouldNotHappen. So, we have to provide two flavours
9010 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9011 
9012 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9013   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9014 
9015   ins_cost(INSN_COST * 2);
9016   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9017 
9018   ins_encode %{
9019     __ cselw(as_Register($dst$$reg),
9020              as_Register($src2$$reg),
9021              as_Register($src1$$reg),
9022              (Assembler::Condition)$cmp$$cmpcode);
9023   %}
9024 
9025   ins_pipe(icond_reg_reg);
9026 %}
9027 
9028 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9029   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9030 
9031   ins_cost(INSN_COST * 2);
9032   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9033 
9034   ins_encode %{
9035     __ cselw(as_Register($dst$$reg),
9036              as_Register($src2$$reg),
9037              as_Register($src1$$reg),
9038              (Assembler::Condition)$cmp$$cmpcode);
9039   %}
9040 
9041   ins_pipe(icond_reg_reg);
9042 %}
9043 
9044 // special cases where one arg is zero
9045 
9046 // n.b. this is selected in preference to the rule above because it
9047 // avoids loading constant 0 into a source register
9048 
9049 // TODO
9050 // we ought only to be able to cull one of these variants as the ideal
9051 // transforms ought always to order the zero consistently (to left/right?)
9052 
9053 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9054   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9055 
9056   ins_cost(INSN_COST * 2);
9057   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9058 
9059   ins_encode %{
9060     __ cselw(as_Register($dst$$reg),
9061              as_Register($src$$reg),
9062              zr,
9063              (Assembler::Condition)$cmp$$cmpcode);
9064   %}
9065 
9066   ins_pipe(icond_reg);
9067 %}
9068 
9069 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9070   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9071 
9072   ins_cost(INSN_COST * 2);
9073   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9074 
9075   ins_encode %{
9076     __ cselw(as_Register($dst$$reg),
9077              as_Register($src$$reg),
9078              zr,
9079              (Assembler::Condition)$cmp$$cmpcode);
9080   %}
9081 
9082   ins_pipe(icond_reg);
9083 %}
9084 
9085 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9086   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9087 
9088   ins_cost(INSN_COST * 2);
9089   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9090 
9091   ins_encode %{
9092     __ cselw(as_Register($dst$$reg),
9093              zr,
9094              as_Register($src$$reg),
9095              (Assembler::Condition)$cmp$$cmpcode);
9096   %}
9097 
9098   ins_pipe(icond_reg);
9099 %}
9100 
9101 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9102   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9103 
9104   ins_cost(INSN_COST * 2);
9105   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9106 
9107   ins_encode %{
9108     __ cselw(as_Register($dst$$reg),
9109              zr,
9110              as_Register($src$$reg),
9111              (Assembler::Condition)$cmp$$cmpcode);
9112   %}
9113 
9114   ins_pipe(icond_reg);
9115 %}
9116 
9117 // special case for creating a boolean 0 or 1
9118 
9119 // n.b. this is selected in preference to the rule above because it
9120 // avoids loading constants 0 and 1 into a source register
9121 
9122 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9123   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9124 
9125   ins_cost(INSN_COST * 2);
9126   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9127 
9128   ins_encode %{
9129     // equivalently
9130     // cset(as_Register($dst$$reg),
9131     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9132     __ csincw(as_Register($dst$$reg),
9133              zr,
9134              zr,
9135              (Assembler::Condition)$cmp$$cmpcode);
9136   %}
9137 
9138   ins_pipe(icond_none);
9139 %}
9140 
9141 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9142   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9143 
9144   ins_cost(INSN_COST * 2);
9145   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9146 
9147   ins_encode %{
9148     // equivalently
9149     // cset(as_Register($dst$$reg),
9150     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9151     __ csincw(as_Register($dst$$reg),
9152              zr,
9153              zr,
9154              (Assembler::Condition)$cmp$$cmpcode);
9155   %}
9156 
9157   ins_pipe(icond_none);
9158 %}
9159 
9160 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9161   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9162 
9163   ins_cost(INSN_COST * 2);
9164   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9165 
9166   ins_encode %{
9167     __ csel(as_Register($dst$$reg),
9168             as_Register($src2$$reg),
9169             as_Register($src1$$reg),
9170             (Assembler::Condition)$cmp$$cmpcode);
9171   %}
9172 
9173   ins_pipe(icond_reg_reg);
9174 %}
9175 
9176 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9177   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9178 
9179   ins_cost(INSN_COST * 2);
9180   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9181 
9182   ins_encode %{
9183     __ csel(as_Register($dst$$reg),
9184             as_Register($src2$$reg),
9185             as_Register($src1$$reg),
9186             (Assembler::Condition)$cmp$$cmpcode);
9187   %}
9188 
9189   ins_pipe(icond_reg_reg);
9190 %}
9191 
9192 // special cases where one arg is zero
9193 
9194 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9195   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9196 
9197   ins_cost(INSN_COST * 2);
9198   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9199 
9200   ins_encode %{
9201     __ csel(as_Register($dst$$reg),
9202             zr,
9203             as_Register($src$$reg),
9204             (Assembler::Condition)$cmp$$cmpcode);
9205   %}
9206 
9207   ins_pipe(icond_reg);
9208 %}
9209 
9210 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9211   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9212 
9213   ins_cost(INSN_COST * 2);
9214   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9215 
9216   ins_encode %{
9217     __ csel(as_Register($dst$$reg),
9218             zr,
9219             as_Register($src$$reg),
9220             (Assembler::Condition)$cmp$$cmpcode);
9221   %}
9222 
9223   ins_pipe(icond_reg);
9224 %}
9225 
9226 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9227   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9228 
9229   ins_cost(INSN_COST * 2);
9230   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9231 
9232   ins_encode %{
9233     __ csel(as_Register($dst$$reg),
9234             as_Register($src$$reg),
9235             zr,
9236             (Assembler::Condition)$cmp$$cmpcode);
9237   %}
9238 
9239   ins_pipe(icond_reg);
9240 %}
9241 
9242 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9243   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9244 
9245   ins_cost(INSN_COST * 2);
9246   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9247 
9248   ins_encode %{
9249     __ csel(as_Register($dst$$reg),
9250             as_Register($src$$reg),
9251             zr,
9252             (Assembler::Condition)$cmp$$cmpcode);
9253   %}
9254 
9255   ins_pipe(icond_reg);
9256 %}
9257 
9258 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9259   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9260 
9261   ins_cost(INSN_COST * 2);
9262   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9263 
9264   ins_encode %{
9265     __ csel(as_Register($dst$$reg),
9266             as_Register($src2$$reg),
9267             as_Register($src1$$reg),
9268             (Assembler::Condition)$cmp$$cmpcode);
9269   %}
9270 
9271   ins_pipe(icond_reg_reg);
9272 %}
9273 
9274 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9275   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9276 
9277   ins_cost(INSN_COST * 2);
9278   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9279 
9280   ins_encode %{
9281     __ csel(as_Register($dst$$reg),
9282             as_Register($src2$$reg),
9283             as_Register($src1$$reg),
9284             (Assembler::Condition)$cmp$$cmpcode);
9285   %}
9286 
9287   ins_pipe(icond_reg_reg);
9288 %}
9289 
9290 // special cases where one arg is zero
9291 
9292 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9293   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9294 
9295   ins_cost(INSN_COST * 2);
9296   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9297 
9298   ins_encode %{
9299     __ csel(as_Register($dst$$reg),
9300             zr,
9301             as_Register($src$$reg),
9302             (Assembler::Condition)$cmp$$cmpcode);
9303   %}
9304 
9305   ins_pipe(icond_reg);
9306 %}
9307 
9308 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9309   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9310 
9311   ins_cost(INSN_COST * 2);
9312   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9313 
9314   ins_encode %{
9315     __ csel(as_Register($dst$$reg),
9316             zr,
9317             as_Register($src$$reg),
9318             (Assembler::Condition)$cmp$$cmpcode);
9319   %}
9320 
9321   ins_pipe(icond_reg);
9322 %}
9323 
9324 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9325   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9326 
9327   ins_cost(INSN_COST * 2);
9328   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9329 
9330   ins_encode %{
9331     __ csel(as_Register($dst$$reg),
9332             as_Register($src$$reg),
9333             zr,
9334             (Assembler::Condition)$cmp$$cmpcode);
9335   %}
9336 
9337   ins_pipe(icond_reg);
9338 %}
9339 
9340 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9341   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9342 
9343   ins_cost(INSN_COST * 2);
9344   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9345 
9346   ins_encode %{
9347     __ csel(as_Register($dst$$reg),
9348             as_Register($src$$reg),
9349             zr,
9350             (Assembler::Condition)$cmp$$cmpcode);
9351   %}
9352 
9353   ins_pipe(icond_reg);
9354 %}
9355 
9356 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9357   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9358 
9359   ins_cost(INSN_COST * 2);
9360   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9361 
9362   ins_encode %{
9363     __ cselw(as_Register($dst$$reg),
9364              as_Register($src2$$reg),
9365              as_Register($src1$$reg),
9366              (Assembler::Condition)$cmp$$cmpcode);
9367   %}
9368 
9369   ins_pipe(icond_reg_reg);
9370 %}
9371 
9372 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9373   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9374 
9375   ins_cost(INSN_COST * 2);
9376   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9377 
9378   ins_encode %{
9379     __ cselw(as_Register($dst$$reg),
9380              as_Register($src2$$reg),
9381              as_Register($src1$$reg),
9382              (Assembler::Condition)$cmp$$cmpcode);
9383   %}
9384 
9385   ins_pipe(icond_reg_reg);
9386 %}
9387 
9388 // special cases where one arg is zero
9389 
9390 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9391   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9392 
9393   ins_cost(INSN_COST * 2);
9394   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9395 
9396   ins_encode %{
9397     __ cselw(as_Register($dst$$reg),
9398              zr,
9399              as_Register($src$$reg),
9400              (Assembler::Condition)$cmp$$cmpcode);
9401   %}
9402 
9403   ins_pipe(icond_reg);
9404 %}
9405 
9406 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9407   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9408 
9409   ins_cost(INSN_COST * 2);
9410   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9411 
9412   ins_encode %{
9413     __ cselw(as_Register($dst$$reg),
9414              zr,
9415              as_Register($src$$reg),
9416              (Assembler::Condition)$cmp$$cmpcode);
9417   %}
9418 
9419   ins_pipe(icond_reg);
9420 %}
9421 
9422 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9423   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9424 
9425   ins_cost(INSN_COST * 2);
9426   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9427 
9428   ins_encode %{
9429     __ cselw(as_Register($dst$$reg),
9430              as_Register($src$$reg),
9431              zr,
9432              (Assembler::Condition)$cmp$$cmpcode);
9433   %}
9434 
9435   ins_pipe(icond_reg);
9436 %}
9437 
9438 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9439   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9440 
9441   ins_cost(INSN_COST * 2);
9442   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9443 
9444   ins_encode %{
9445     __ cselw(as_Register($dst$$reg),
9446              as_Register($src$$reg),
9447              zr,
9448              (Assembler::Condition)$cmp$$cmpcode);
9449   %}
9450 
9451   ins_pipe(icond_reg);
9452 %}
9453 
9454 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9455 %{
9456   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9457 
9458   ins_cost(INSN_COST * 3);
9459 
9460   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9461   ins_encode %{
9462     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9463     __ fcsels(as_FloatRegister($dst$$reg),
9464               as_FloatRegister($src2$$reg),
9465               as_FloatRegister($src1$$reg),
9466               cond);
9467   %}
9468 
9469   ins_pipe(fp_cond_reg_reg_s);
9470 %}
9471 
9472 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9473 %{
9474   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9475 
9476   ins_cost(INSN_COST * 3);
9477 
9478   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9479   ins_encode %{
9480     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9481     __ fcsels(as_FloatRegister($dst$$reg),
9482               as_FloatRegister($src2$$reg),
9483               as_FloatRegister($src1$$reg),
9484               cond);
9485   %}
9486 
9487   ins_pipe(fp_cond_reg_reg_s);
9488 %}
9489 
9490 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9491 %{
9492   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9493 
9494   ins_cost(INSN_COST * 3);
9495 
9496   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9497   ins_encode %{
9498     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9499     __ fcseld(as_FloatRegister($dst$$reg),
9500               as_FloatRegister($src2$$reg),
9501               as_FloatRegister($src1$$reg),
9502               cond);
9503   %}
9504 
9505   ins_pipe(fp_cond_reg_reg_d);
9506 %}
9507 
9508 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9509 %{
9510   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9511 
9512   ins_cost(INSN_COST * 3);
9513 
9514   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9515   ins_encode %{
9516     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9517     __ fcseld(as_FloatRegister($dst$$reg),
9518               as_FloatRegister($src2$$reg),
9519               as_FloatRegister($src1$$reg),
9520               cond);
9521   %}
9522 
9523   ins_pipe(fp_cond_reg_reg_d);
9524 %}
9525 
9526 // ============================================================================
9527 // Arithmetic Instructions
9528 //
9529 
9530 // Integer Addition
9531 
9532 // TODO
9533 // these currently employ operations which do not set CR and hence are
9534 // not flagged as killing CR but we would like to isolate the cases
9535 // where we want to set flags from those where we don't. need to work
9536 // out how to do that.
9537 
9538 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9539   match(Set dst (AddI src1 src2));
9540 
9541   ins_cost(INSN_COST);
9542   format %{ "addw  $dst, $src1, $src2" %}
9543 
9544   ins_encode %{
9545     __ addw(as_Register($dst$$reg),
9546             as_Register($src1$$reg),
9547             as_Register($src2$$reg));
9548   %}
9549 
9550   ins_pipe(ialu_reg_reg);
9551 %}
9552 
9553 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9554   match(Set dst (AddI src1 src2));
9555 
9556   ins_cost(INSN_COST);
9557   format %{ "addw $dst, $src1, $src2" %}
9558 
9559   // use opcode to indicate that this is an add not a sub
9560   opcode(0x0);
9561 
9562   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9563 
9564   ins_pipe(ialu_reg_imm);
9565 %}
9566 
9567 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9568   match(Set dst (AddI (ConvL2I src1) src2));
9569 
9570   ins_cost(INSN_COST);
9571   format %{ "addw $dst, $src1, $src2" %}
9572 
9573   // use opcode to indicate that this is an add not a sub
9574   opcode(0x0);
9575 
9576   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9577 
9578   ins_pipe(ialu_reg_imm);
9579 %}
9580 
9581 // Pointer Addition
9582 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9583   match(Set dst (AddP src1 src2));
9584 
9585   ins_cost(INSN_COST);
9586   format %{ "add $dst, $src1, $src2\t# ptr" %}
9587 
9588   ins_encode %{
9589     __ add(as_Register($dst$$reg),
9590            as_Register($src1$$reg),
9591            as_Register($src2$$reg));
9592   %}
9593 
9594   ins_pipe(ialu_reg_reg);
9595 %}
9596 
9597 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9598   match(Set dst (AddP src1 (ConvI2L src2)));
9599 
9600   ins_cost(1.9 * INSN_COST);
9601   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9602 
9603   ins_encode %{
9604     __ add(as_Register($dst$$reg),
9605            as_Register($src1$$reg),
9606            as_Register($src2$$reg), ext::sxtw);
9607   %}
9608 
9609   ins_pipe(ialu_reg_reg);
9610 %}
9611 
9612 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9613   match(Set dst (AddP src1 (LShiftL src2 scale)));
9614 
9615   ins_cost(1.9 * INSN_COST);
9616   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9617 
9618   ins_encode %{
9619     __ lea(as_Register($dst$$reg),
9620            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9621                    Address::lsl($scale$$constant)));
9622   %}
9623 
9624   ins_pipe(ialu_reg_reg_shift);
9625 %}
9626 
9627 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9628   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9629 
9630   ins_cost(1.9 * INSN_COST);
9631   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9632 
9633   ins_encode %{
9634     __ lea(as_Register($dst$$reg),
9635            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9636                    Address::sxtw($scale$$constant)));
9637   %}
9638 
9639   ins_pipe(ialu_reg_reg_shift);
9640 %}
9641 
9642 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9643   match(Set dst (LShiftL (ConvI2L src) scale));
9644 
9645   ins_cost(INSN_COST);
9646   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9647 
9648   ins_encode %{
9649     __ sbfiz(as_Register($dst$$reg),
9650           as_Register($src$$reg),
9651           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9652   %}
9653 
9654   ins_pipe(ialu_reg_shift);
9655 %}
9656 
9657 // Pointer Immediate Addition
9658 // n.b. this needs to be more expensive than using an indirect memory
9659 // operand
9660 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9661   match(Set dst (AddP src1 src2));
9662 
9663   ins_cost(INSN_COST);
9664   format %{ "add $dst, $src1, $src2\t# ptr" %}
9665 
9666   // use opcode to indicate that this is an add not a sub
9667   opcode(0x0);
9668 
9669   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9670 
9671   ins_pipe(ialu_reg_imm);
9672 %}
9673 
9674 // Long Addition
9675 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9676 
9677   match(Set dst (AddL src1 src2));
9678 
9679   ins_cost(INSN_COST);
9680   format %{ "add  $dst, $src1, $src2" %}
9681 
9682   ins_encode %{
9683     __ add(as_Register($dst$$reg),
9684            as_Register($src1$$reg),
9685            as_Register($src2$$reg));
9686   %}
9687 
9688   ins_pipe(ialu_reg_reg);
9689 %}
9690 
9691 // No constant pool entries requiredLong Immediate Addition.
9692 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9693   match(Set dst (AddL src1 src2));
9694 
9695   ins_cost(INSN_COST);
9696   format %{ "add $dst, $src1, $src2" %}
9697 
9698   // use opcode to indicate that this is an add not a sub
9699   opcode(0x0);
9700 
9701   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9702 
9703   ins_pipe(ialu_reg_imm);
9704 %}
9705 
9706 // Integer Subtraction
9707 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9708   match(Set dst (SubI src1 src2));
9709 
9710   ins_cost(INSN_COST);
9711   format %{ "subw  $dst, $src1, $src2" %}
9712 
9713   ins_encode %{
9714     __ subw(as_Register($dst$$reg),
9715             as_Register($src1$$reg),
9716             as_Register($src2$$reg));
9717   %}
9718 
9719   ins_pipe(ialu_reg_reg);
9720 %}
9721 
9722 // Immediate Subtraction
9723 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9724   match(Set dst (SubI src1 src2));
9725 
9726   ins_cost(INSN_COST);
9727   format %{ "subw $dst, $src1, $src2" %}
9728 
9729   // use opcode to indicate that this is a sub not an add
9730   opcode(0x1);
9731 
9732   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9733 
9734   ins_pipe(ialu_reg_imm);
9735 %}
9736 
9737 // Long Subtraction
9738 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9739 
9740   match(Set dst (SubL src1 src2));
9741 
9742   ins_cost(INSN_COST);
9743   format %{ "sub  $dst, $src1, $src2" %}
9744 
9745   ins_encode %{
9746     __ sub(as_Register($dst$$reg),
9747            as_Register($src1$$reg),
9748            as_Register($src2$$reg));
9749   %}
9750 
9751   ins_pipe(ialu_reg_reg);
9752 %}
9753 
9754 // No constant pool entries requiredLong Immediate Subtraction.
9755 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9756   match(Set dst (SubL src1 src2));
9757 
9758   ins_cost(INSN_COST);
9759   format %{ "sub$dst, $src1, $src2" %}
9760 
9761   // use opcode to indicate that this is a sub not an add
9762   opcode(0x1);
9763 
9764   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9765 
9766   ins_pipe(ialu_reg_imm);
9767 %}
9768 
9769 // Integer Negation (special case for sub)
9770 
9771 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9772   match(Set dst (SubI zero src));
9773 
9774   ins_cost(INSN_COST);
9775   format %{ "negw $dst, $src\t# int" %}
9776 
9777   ins_encode %{
9778     __ negw(as_Register($dst$$reg),
9779             as_Register($src$$reg));
9780   %}
9781 
9782   ins_pipe(ialu_reg);
9783 %}
9784 
9785 // Long Negation
9786 
9787 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9788   match(Set dst (SubL zero src));
9789 
9790   ins_cost(INSN_COST);
9791   format %{ "neg $dst, $src\t# long" %}
9792 
9793   ins_encode %{
9794     __ neg(as_Register($dst$$reg),
9795            as_Register($src$$reg));
9796   %}
9797 
9798   ins_pipe(ialu_reg);
9799 %}
9800 
9801 // Integer Multiply
9802 
9803 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9804   match(Set dst (MulI src1 src2));
9805 
9806   ins_cost(INSN_COST * 3);
9807   format %{ "mulw  $dst, $src1, $src2" %}
9808 
9809   ins_encode %{
9810     __ mulw(as_Register($dst$$reg),
9811             as_Register($src1$$reg),
9812             as_Register($src2$$reg));
9813   %}
9814 
9815   ins_pipe(imul_reg_reg);
9816 %}
9817 
9818 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9819   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9820 
9821   ins_cost(INSN_COST * 3);
9822   format %{ "smull  $dst, $src1, $src2" %}
9823 
9824   ins_encode %{
9825     __ smull(as_Register($dst$$reg),
9826              as_Register($src1$$reg),
9827              as_Register($src2$$reg));
9828   %}
9829 
9830   ins_pipe(imul_reg_reg);
9831 %}
9832 
9833 // Long Multiply
9834 
9835 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9836   match(Set dst (MulL src1 src2));
9837 
9838   ins_cost(INSN_COST * 5);
9839   format %{ "mul  $dst, $src1, $src2" %}
9840 
9841   ins_encode %{
9842     __ mul(as_Register($dst$$reg),
9843            as_Register($src1$$reg),
9844            as_Register($src2$$reg));
9845   %}
9846 
9847   ins_pipe(lmul_reg_reg);
9848 %}
9849 
9850 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9851 %{
9852   match(Set dst (MulHiL src1 src2));
9853 
9854   ins_cost(INSN_COST * 7);
9855   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9856 
9857   ins_encode %{
9858     __ smulh(as_Register($dst$$reg),
9859              as_Register($src1$$reg),
9860              as_Register($src2$$reg));
9861   %}
9862 
9863   ins_pipe(lmul_reg_reg);
9864 %}
9865 
9866 // Combined Integer Multiply & Add/Sub
9867 
9868 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9869   match(Set dst (AddI src3 (MulI src1 src2)));
9870 
9871   ins_cost(INSN_COST * 3);
9872   format %{ "madd  $dst, $src1, $src2, $src3" %}
9873 
9874   ins_encode %{
9875     __ maddw(as_Register($dst$$reg),
9876              as_Register($src1$$reg),
9877              as_Register($src2$$reg),
9878              as_Register($src3$$reg));
9879   %}
9880 
9881   ins_pipe(imac_reg_reg);
9882 %}
9883 
9884 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9885   match(Set dst (SubI src3 (MulI src1 src2)));
9886 
9887   ins_cost(INSN_COST * 3);
9888   format %{ "msub  $dst, $src1, $src2, $src3" %}
9889 
9890   ins_encode %{
9891     __ msubw(as_Register($dst$$reg),
9892              as_Register($src1$$reg),
9893              as_Register($src2$$reg),
9894              as_Register($src3$$reg));
9895   %}
9896 
9897   ins_pipe(imac_reg_reg);
9898 %}
9899 
9900 // Combined Long Multiply & Add/Sub
9901 
9902 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9903   match(Set dst (AddL src3 (MulL src1 src2)));
9904 
9905   ins_cost(INSN_COST * 5);
9906   format %{ "madd  $dst, $src1, $src2, $src3" %}
9907 
9908   ins_encode %{
9909     __ madd(as_Register($dst$$reg),
9910             as_Register($src1$$reg),
9911             as_Register($src2$$reg),
9912             as_Register($src3$$reg));
9913   %}
9914 
9915   ins_pipe(lmac_reg_reg);
9916 %}
9917 
9918 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9919   match(Set dst (SubL src3 (MulL src1 src2)));
9920 
9921   ins_cost(INSN_COST * 5);
9922   format %{ "msub  $dst, $src1, $src2, $src3" %}
9923 
9924   ins_encode %{
9925     __ msub(as_Register($dst$$reg),
9926             as_Register($src1$$reg),
9927             as_Register($src2$$reg),
9928             as_Register($src3$$reg));
9929   %}
9930 
9931   ins_pipe(lmac_reg_reg);
9932 %}
9933 
9934 // Combine Integer Signed Multiply & Add/Sub/Neg Long
9935 
9936 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
9937   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
9938 
9939   ins_cost(INSN_COST * 3);
9940   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
9941 
9942   ins_encode %{
9943     __ smaddl(as_Register($dst$$reg),
9944               as_Register($src1$$reg),
9945               as_Register($src2$$reg),
9946               as_Register($src3$$reg));
9947   %}
9948 
9949   ins_pipe(imac_reg_reg);
9950 %}
9951 
9952 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
9953   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
9954 
9955   ins_cost(INSN_COST * 3);
9956   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
9957 
9958   ins_encode %{
9959     __ smsubl(as_Register($dst$$reg),
9960               as_Register($src1$$reg),
9961               as_Register($src2$$reg),
9962               as_Register($src3$$reg));
9963   %}
9964 
9965   ins_pipe(imac_reg_reg);
9966 %}
9967 
9968 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
9969   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
9970   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
9971 
9972   ins_cost(INSN_COST * 3);
9973   format %{ "smnegl  $dst, $src1, $src2" %}
9974 
9975   ins_encode %{
9976     __ smnegl(as_Register($dst$$reg),
9977               as_Register($src1$$reg),
9978               as_Register($src2$$reg));
9979   %}
9980 
9981   ins_pipe(imac_reg_reg);
9982 %}
9983 
9984 // Integer Divide
9985 
9986 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9987   match(Set dst (DivI src1 src2));
9988 
9989   ins_cost(INSN_COST * 19);
9990   format %{ "sdivw  $dst, $src1, $src2" %}
9991 
9992   ins_encode(aarch64_enc_divw(dst, src1, src2));
9993   ins_pipe(idiv_reg_reg);
9994 %}
9995 
9996 // Long Divide
9997 
9998 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9999   match(Set dst (DivL src1 src2));
10000 
10001   ins_cost(INSN_COST * 35);
10002   format %{ "sdiv   $dst, $src1, $src2" %}
10003 
10004   ins_encode(aarch64_enc_div(dst, src1, src2));
10005   ins_pipe(ldiv_reg_reg);
10006 %}
10007 
10008 // Integer Remainder
10009 
10010 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10011   match(Set dst (ModI src1 src2));
10012 
10013   ins_cost(INSN_COST * 22);
10014   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10015             "msubw($dst, rscratch1, $src2, $src1" %}
10016 
10017   ins_encode(aarch64_enc_modw(dst, src1, src2));
10018   ins_pipe(idiv_reg_reg);
10019 %}
10020 
10021 // Long Remainder
10022 
10023 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10024   match(Set dst (ModL src1 src2));
10025 
10026   ins_cost(INSN_COST * 38);
10027   format %{ "sdiv   rscratch1, $src1, $src2\n"
10028             "msub($dst, rscratch1, $src2, $src1" %}
10029 
10030   ins_encode(aarch64_enc_mod(dst, src1, src2));
10031   ins_pipe(ldiv_reg_reg);
10032 %}
10033 
10034 // Integer Shifts
10035 
10036 // Shift Left Register
10037 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10038   match(Set dst (LShiftI src1 src2));
10039 
10040   ins_cost(INSN_COST * 2);
10041   format %{ "lslvw  $dst, $src1, $src2" %}
10042 
10043   ins_encode %{
10044     __ lslvw(as_Register($dst$$reg),
10045              as_Register($src1$$reg),
10046              as_Register($src2$$reg));
10047   %}
10048 
10049   ins_pipe(ialu_reg_reg_vshift);
10050 %}
10051 
10052 // Shift Left Immediate
10053 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10054   match(Set dst (LShiftI src1 src2));
10055 
10056   ins_cost(INSN_COST);
10057   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10058 
10059   ins_encode %{
10060     __ lslw(as_Register($dst$$reg),
10061             as_Register($src1$$reg),
10062             $src2$$constant & 0x1f);
10063   %}
10064 
10065   ins_pipe(ialu_reg_shift);
10066 %}
10067 
10068 // Shift Right Logical Register
10069 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10070   match(Set dst (URShiftI src1 src2));
10071 
10072   ins_cost(INSN_COST * 2);
10073   format %{ "lsrvw  $dst, $src1, $src2" %}
10074 
10075   ins_encode %{
10076     __ lsrvw(as_Register($dst$$reg),
10077              as_Register($src1$$reg),
10078              as_Register($src2$$reg));
10079   %}
10080 
10081   ins_pipe(ialu_reg_reg_vshift);
10082 %}
10083 
10084 // Shift Right Logical Immediate
10085 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10086   match(Set dst (URShiftI src1 src2));
10087 
10088   ins_cost(INSN_COST);
10089   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10090 
10091   ins_encode %{
10092     __ lsrw(as_Register($dst$$reg),
10093             as_Register($src1$$reg),
10094             $src2$$constant & 0x1f);
10095   %}
10096 
10097   ins_pipe(ialu_reg_shift);
10098 %}
10099 
10100 // Shift Right Arithmetic Register
10101 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10102   match(Set dst (RShiftI src1 src2));
10103 
10104   ins_cost(INSN_COST * 2);
10105   format %{ "asrvw  $dst, $src1, $src2" %}
10106 
10107   ins_encode %{
10108     __ asrvw(as_Register($dst$$reg),
10109              as_Register($src1$$reg),
10110              as_Register($src2$$reg));
10111   %}
10112 
10113   ins_pipe(ialu_reg_reg_vshift);
10114 %}
10115 
10116 // Shift Right Arithmetic Immediate
10117 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10118   match(Set dst (RShiftI src1 src2));
10119 
10120   ins_cost(INSN_COST);
10121   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10122 
10123   ins_encode %{
10124     __ asrw(as_Register($dst$$reg),
10125             as_Register($src1$$reg),
10126             $src2$$constant & 0x1f);
10127   %}
10128 
10129   ins_pipe(ialu_reg_shift);
10130 %}
10131 
10132 // Combined Int Mask and Right Shift (using UBFM)
10133 // TODO
10134 
10135 // Long Shifts
10136 
10137 // Shift Left Register
10138 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10139   match(Set dst (LShiftL src1 src2));
10140 
10141   ins_cost(INSN_COST * 2);
10142   format %{ "lslv  $dst, $src1, $src2" %}
10143 
10144   ins_encode %{
10145     __ lslv(as_Register($dst$$reg),
10146             as_Register($src1$$reg),
10147             as_Register($src2$$reg));
10148   %}
10149 
10150   ins_pipe(ialu_reg_reg_vshift);
10151 %}
10152 
10153 // Shift Left Immediate
10154 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10155   match(Set dst (LShiftL src1 src2));
10156 
10157   ins_cost(INSN_COST);
10158   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10159 
10160   ins_encode %{
10161     __ lsl(as_Register($dst$$reg),
10162             as_Register($src1$$reg),
10163             $src2$$constant & 0x3f);
10164   %}
10165 
10166   ins_pipe(ialu_reg_shift);
10167 %}
10168 
10169 // Shift Right Logical Register
10170 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10171   match(Set dst (URShiftL src1 src2));
10172 
10173   ins_cost(INSN_COST * 2);
10174   format %{ "lsrv  $dst, $src1, $src2" %}
10175 
10176   ins_encode %{
10177     __ lsrv(as_Register($dst$$reg),
10178             as_Register($src1$$reg),
10179             as_Register($src2$$reg));
10180   %}
10181 
10182   ins_pipe(ialu_reg_reg_vshift);
10183 %}
10184 
10185 // Shift Right Logical Immediate
10186 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10187   match(Set dst (URShiftL src1 src2));
10188 
10189   ins_cost(INSN_COST);
10190   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10191 
10192   ins_encode %{
10193     __ lsr(as_Register($dst$$reg),
10194            as_Register($src1$$reg),
10195            $src2$$constant & 0x3f);
10196   %}
10197 
10198   ins_pipe(ialu_reg_shift);
10199 %}
10200 
10201 // A special-case pattern for card table stores.
10202 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10203   match(Set dst (URShiftL (CastP2X src1) src2));
10204 
10205   ins_cost(INSN_COST);
10206   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10207 
10208   ins_encode %{
10209     __ lsr(as_Register($dst$$reg),
10210            as_Register($src1$$reg),
10211            $src2$$constant & 0x3f);
10212   %}
10213 
10214   ins_pipe(ialu_reg_shift);
10215 %}
10216 
10217 // Shift Right Arithmetic Register
10218 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10219   match(Set dst (RShiftL src1 src2));
10220 
10221   ins_cost(INSN_COST * 2);
10222   format %{ "asrv  $dst, $src1, $src2" %}
10223 
10224   ins_encode %{
10225     __ asrv(as_Register($dst$$reg),
10226             as_Register($src1$$reg),
10227             as_Register($src2$$reg));
10228   %}
10229 
10230   ins_pipe(ialu_reg_reg_vshift);
10231 %}
10232 
10233 // Shift Right Arithmetic Immediate
10234 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10235   match(Set dst (RShiftL src1 src2));
10236 
10237   ins_cost(INSN_COST);
10238   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10239 
10240   ins_encode %{
10241     __ asr(as_Register($dst$$reg),
10242            as_Register($src1$$reg),
10243            $src2$$constant & 0x3f);
10244   %}
10245 
10246   ins_pipe(ialu_reg_shift);
10247 %}
10248 
10249 // BEGIN This section of the file is automatically generated. Do not edit --------------
10250 
10251 instruct regL_not_reg(iRegLNoSp dst,
10252                          iRegL src1, immL_M1 m1,
10253                          rFlagsReg cr) %{
10254   match(Set dst (XorL src1 m1));
10255   ins_cost(INSN_COST);
10256   format %{ "eon  $dst, $src1, zr" %}
10257 
10258   ins_encode %{
10259     __ eon(as_Register($dst$$reg),
10260               as_Register($src1$$reg),
10261               zr,
10262               Assembler::LSL, 0);
10263   %}
10264 
10265   ins_pipe(ialu_reg);
10266 %}
10267 instruct regI_not_reg(iRegINoSp dst,
10268                          iRegIorL2I src1, immI_M1 m1,
10269                          rFlagsReg cr) %{
10270   match(Set dst (XorI src1 m1));
10271   ins_cost(INSN_COST);
10272   format %{ "eonw  $dst, $src1, zr" %}
10273 
10274   ins_encode %{
10275     __ eonw(as_Register($dst$$reg),
10276               as_Register($src1$$reg),
10277               zr,
10278               Assembler::LSL, 0);
10279   %}
10280 
10281   ins_pipe(ialu_reg);
10282 %}
10283 
10284 instruct AndI_reg_not_reg(iRegINoSp dst,
10285                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10286                          rFlagsReg cr) %{
10287   match(Set dst (AndI src1 (XorI src2 m1)));
10288   ins_cost(INSN_COST);
10289   format %{ "bicw  $dst, $src1, $src2" %}
10290 
10291   ins_encode %{
10292     __ bicw(as_Register($dst$$reg),
10293               as_Register($src1$$reg),
10294               as_Register($src2$$reg),
10295               Assembler::LSL, 0);
10296   %}
10297 
10298   ins_pipe(ialu_reg_reg);
10299 %}
10300 
10301 instruct AndL_reg_not_reg(iRegLNoSp dst,
10302                          iRegL src1, iRegL src2, immL_M1 m1,
10303                          rFlagsReg cr) %{
10304   match(Set dst (AndL src1 (XorL src2 m1)));
10305   ins_cost(INSN_COST);
10306   format %{ "bic  $dst, $src1, $src2" %}
10307 
10308   ins_encode %{
10309     __ bic(as_Register($dst$$reg),
10310               as_Register($src1$$reg),
10311               as_Register($src2$$reg),
10312               Assembler::LSL, 0);
10313   %}
10314 
10315   ins_pipe(ialu_reg_reg);
10316 %}
10317 
10318 instruct OrI_reg_not_reg(iRegINoSp dst,
10319                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10320                          rFlagsReg cr) %{
10321   match(Set dst (OrI src1 (XorI src2 m1)));
10322   ins_cost(INSN_COST);
10323   format %{ "ornw  $dst, $src1, $src2" %}
10324 
10325   ins_encode %{
10326     __ ornw(as_Register($dst$$reg),
10327               as_Register($src1$$reg),
10328               as_Register($src2$$reg),
10329               Assembler::LSL, 0);
10330   %}
10331 
10332   ins_pipe(ialu_reg_reg);
10333 %}
10334 
10335 instruct OrL_reg_not_reg(iRegLNoSp dst,
10336                          iRegL src1, iRegL src2, immL_M1 m1,
10337                          rFlagsReg cr) %{
10338   match(Set dst (OrL src1 (XorL src2 m1)));
10339   ins_cost(INSN_COST);
10340   format %{ "orn  $dst, $src1, $src2" %}
10341 
10342   ins_encode %{
10343     __ orn(as_Register($dst$$reg),
10344               as_Register($src1$$reg),
10345               as_Register($src2$$reg),
10346               Assembler::LSL, 0);
10347   %}
10348 
10349   ins_pipe(ialu_reg_reg);
10350 %}
10351 
10352 instruct XorI_reg_not_reg(iRegINoSp dst,
10353                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10354                          rFlagsReg cr) %{
10355   match(Set dst (XorI m1 (XorI src2 src1)));
10356   ins_cost(INSN_COST);
10357   format %{ "eonw  $dst, $src1, $src2" %}
10358 
10359   ins_encode %{
10360     __ eonw(as_Register($dst$$reg),
10361               as_Register($src1$$reg),
10362               as_Register($src2$$reg),
10363               Assembler::LSL, 0);
10364   %}
10365 
10366   ins_pipe(ialu_reg_reg);
10367 %}
10368 
10369 instruct XorL_reg_not_reg(iRegLNoSp dst,
10370                          iRegL src1, iRegL src2, immL_M1 m1,
10371                          rFlagsReg cr) %{
10372   match(Set dst (XorL m1 (XorL src2 src1)));
10373   ins_cost(INSN_COST);
10374   format %{ "eon  $dst, $src1, $src2" %}
10375 
10376   ins_encode %{
10377     __ eon(as_Register($dst$$reg),
10378               as_Register($src1$$reg),
10379               as_Register($src2$$reg),
10380               Assembler::LSL, 0);
10381   %}
10382 
10383   ins_pipe(ialu_reg_reg);
10384 %}
10385 
10386 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10387                          iRegIorL2I src1, iRegIorL2I src2,
10388                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10389   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10390   ins_cost(1.9 * INSN_COST);
10391   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10392 
10393   ins_encode %{
10394     __ bicw(as_Register($dst$$reg),
10395               as_Register($src1$$reg),
10396               as_Register($src2$$reg),
10397               Assembler::LSR,
10398               $src3$$constant & 0x1f);
10399   %}
10400 
10401   ins_pipe(ialu_reg_reg_shift);
10402 %}
10403 
10404 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10405                          iRegL src1, iRegL src2,
10406                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10407   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10408   ins_cost(1.9 * INSN_COST);
10409   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10410 
10411   ins_encode %{
10412     __ bic(as_Register($dst$$reg),
10413               as_Register($src1$$reg),
10414               as_Register($src2$$reg),
10415               Assembler::LSR,
10416               $src3$$constant & 0x3f);
10417   %}
10418 
10419   ins_pipe(ialu_reg_reg_shift);
10420 %}
10421 
10422 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10423                          iRegIorL2I src1, iRegIorL2I src2,
10424                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10425   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10426   ins_cost(1.9 * INSN_COST);
10427   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10428 
10429   ins_encode %{
10430     __ bicw(as_Register($dst$$reg),
10431               as_Register($src1$$reg),
10432               as_Register($src2$$reg),
10433               Assembler::ASR,
10434               $src3$$constant & 0x1f);
10435   %}
10436 
10437   ins_pipe(ialu_reg_reg_shift);
10438 %}
10439 
10440 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10441                          iRegL src1, iRegL src2,
10442                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10443   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10444   ins_cost(1.9 * INSN_COST);
10445   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10446 
10447   ins_encode %{
10448     __ bic(as_Register($dst$$reg),
10449               as_Register($src1$$reg),
10450               as_Register($src2$$reg),
10451               Assembler::ASR,
10452               $src3$$constant & 0x3f);
10453   %}
10454 
10455   ins_pipe(ialu_reg_reg_shift);
10456 %}
10457 
10458 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10459                          iRegIorL2I src1, iRegIorL2I src2,
10460                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10461   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10462   ins_cost(1.9 * INSN_COST);
10463   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10464 
10465   ins_encode %{
10466     __ bicw(as_Register($dst$$reg),
10467               as_Register($src1$$reg),
10468               as_Register($src2$$reg),
10469               Assembler::LSL,
10470               $src3$$constant & 0x1f);
10471   %}
10472 
10473   ins_pipe(ialu_reg_reg_shift);
10474 %}
10475 
10476 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10477                          iRegL src1, iRegL src2,
10478                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10479   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10480   ins_cost(1.9 * INSN_COST);
10481   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10482 
10483   ins_encode %{
10484     __ bic(as_Register($dst$$reg),
10485               as_Register($src1$$reg),
10486               as_Register($src2$$reg),
10487               Assembler::LSL,
10488               $src3$$constant & 0x3f);
10489   %}
10490 
10491   ins_pipe(ialu_reg_reg_shift);
10492 %}
10493 
10494 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10495                          iRegIorL2I src1, iRegIorL2I src2,
10496                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10497   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10498   ins_cost(1.9 * INSN_COST);
10499   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10500 
10501   ins_encode %{
10502     __ eonw(as_Register($dst$$reg),
10503               as_Register($src1$$reg),
10504               as_Register($src2$$reg),
10505               Assembler::LSR,
10506               $src3$$constant & 0x1f);
10507   %}
10508 
10509   ins_pipe(ialu_reg_reg_shift);
10510 %}
10511 
10512 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10513                          iRegL src1, iRegL src2,
10514                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10515   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10516   ins_cost(1.9 * INSN_COST);
10517   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10518 
10519   ins_encode %{
10520     __ eon(as_Register($dst$$reg),
10521               as_Register($src1$$reg),
10522               as_Register($src2$$reg),
10523               Assembler::LSR,
10524               $src3$$constant & 0x3f);
10525   %}
10526 
10527   ins_pipe(ialu_reg_reg_shift);
10528 %}
10529 
10530 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10531                          iRegIorL2I src1, iRegIorL2I src2,
10532                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10533   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10534   ins_cost(1.9 * INSN_COST);
10535   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10536 
10537   ins_encode %{
10538     __ eonw(as_Register($dst$$reg),
10539               as_Register($src1$$reg),
10540               as_Register($src2$$reg),
10541               Assembler::ASR,
10542               $src3$$constant & 0x1f);
10543   %}
10544 
10545   ins_pipe(ialu_reg_reg_shift);
10546 %}
10547 
10548 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10549                          iRegL src1, iRegL src2,
10550                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10551   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10552   ins_cost(1.9 * INSN_COST);
10553   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10554 
10555   ins_encode %{
10556     __ eon(as_Register($dst$$reg),
10557               as_Register($src1$$reg),
10558               as_Register($src2$$reg),
10559               Assembler::ASR,
10560               $src3$$constant & 0x3f);
10561   %}
10562 
10563   ins_pipe(ialu_reg_reg_shift);
10564 %}
10565 
10566 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10567                          iRegIorL2I src1, iRegIorL2I src2,
10568                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10569   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10570   ins_cost(1.9 * INSN_COST);
10571   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10572 
10573   ins_encode %{
10574     __ eonw(as_Register($dst$$reg),
10575               as_Register($src1$$reg),
10576               as_Register($src2$$reg),
10577               Assembler::LSL,
10578               $src3$$constant & 0x1f);
10579   %}
10580 
10581   ins_pipe(ialu_reg_reg_shift);
10582 %}
10583 
10584 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10585                          iRegL src1, iRegL src2,
10586                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10587   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10588   ins_cost(1.9 * INSN_COST);
10589   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10590 
10591   ins_encode %{
10592     __ eon(as_Register($dst$$reg),
10593               as_Register($src1$$reg),
10594               as_Register($src2$$reg),
10595               Assembler::LSL,
10596               $src3$$constant & 0x3f);
10597   %}
10598 
10599   ins_pipe(ialu_reg_reg_shift);
10600 %}
10601 
10602 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10603                          iRegIorL2I src1, iRegIorL2I src2,
10604                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10605   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10606   ins_cost(1.9 * INSN_COST);
10607   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10608 
10609   ins_encode %{
10610     __ ornw(as_Register($dst$$reg),
10611               as_Register($src1$$reg),
10612               as_Register($src2$$reg),
10613               Assembler::LSR,
10614               $src3$$constant & 0x1f);
10615   %}
10616 
10617   ins_pipe(ialu_reg_reg_shift);
10618 %}
10619 
10620 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10621                          iRegL src1, iRegL src2,
10622                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10623   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10624   ins_cost(1.9 * INSN_COST);
10625   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10626 
10627   ins_encode %{
10628     __ orn(as_Register($dst$$reg),
10629               as_Register($src1$$reg),
10630               as_Register($src2$$reg),
10631               Assembler::LSR,
10632               $src3$$constant & 0x3f);
10633   %}
10634 
10635   ins_pipe(ialu_reg_reg_shift);
10636 %}
10637 
10638 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10639                          iRegIorL2I src1, iRegIorL2I src2,
10640                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10641   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10642   ins_cost(1.9 * INSN_COST);
10643   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10644 
10645   ins_encode %{
10646     __ ornw(as_Register($dst$$reg),
10647               as_Register($src1$$reg),
10648               as_Register($src2$$reg),
10649               Assembler::ASR,
10650               $src3$$constant & 0x1f);
10651   %}
10652 
10653   ins_pipe(ialu_reg_reg_shift);
10654 %}
10655 
10656 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10657                          iRegL src1, iRegL src2,
10658                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10659   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10660   ins_cost(1.9 * INSN_COST);
10661   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10662 
10663   ins_encode %{
10664     __ orn(as_Register($dst$$reg),
10665               as_Register($src1$$reg),
10666               as_Register($src2$$reg),
10667               Assembler::ASR,
10668               $src3$$constant & 0x3f);
10669   %}
10670 
10671   ins_pipe(ialu_reg_reg_shift);
10672 %}
10673 
10674 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10675                          iRegIorL2I src1, iRegIorL2I src2,
10676                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10677   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10678   ins_cost(1.9 * INSN_COST);
10679   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10680 
10681   ins_encode %{
10682     __ ornw(as_Register($dst$$reg),
10683               as_Register($src1$$reg),
10684               as_Register($src2$$reg),
10685               Assembler::LSL,
10686               $src3$$constant & 0x1f);
10687   %}
10688 
10689   ins_pipe(ialu_reg_reg_shift);
10690 %}
10691 
10692 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10693                          iRegL src1, iRegL src2,
10694                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10695   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10696   ins_cost(1.9 * INSN_COST);
10697   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10698 
10699   ins_encode %{
10700     __ orn(as_Register($dst$$reg),
10701               as_Register($src1$$reg),
10702               as_Register($src2$$reg),
10703               Assembler::LSL,
10704               $src3$$constant & 0x3f);
10705   %}
10706 
10707   ins_pipe(ialu_reg_reg_shift);
10708 %}
10709 
10710 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10711                          iRegIorL2I src1, iRegIorL2I src2,
10712                          immI src3, rFlagsReg cr) %{
10713   match(Set dst (AndI src1 (URShiftI src2 src3)));
10714 
10715   ins_cost(1.9 * INSN_COST);
10716   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10717 
10718   ins_encode %{
10719     __ andw(as_Register($dst$$reg),
10720               as_Register($src1$$reg),
10721               as_Register($src2$$reg),
10722               Assembler::LSR,
10723               $src3$$constant & 0x1f);
10724   %}
10725 
10726   ins_pipe(ialu_reg_reg_shift);
10727 %}
10728 
10729 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10730                          iRegL src1, iRegL src2,
10731                          immI src3, rFlagsReg cr) %{
10732   match(Set dst (AndL src1 (URShiftL src2 src3)));
10733 
10734   ins_cost(1.9 * INSN_COST);
10735   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10736 
10737   ins_encode %{
10738     __ andr(as_Register($dst$$reg),
10739               as_Register($src1$$reg),
10740               as_Register($src2$$reg),
10741               Assembler::LSR,
10742               $src3$$constant & 0x3f);
10743   %}
10744 
10745   ins_pipe(ialu_reg_reg_shift);
10746 %}
10747 
10748 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10749                          iRegIorL2I src1, iRegIorL2I src2,
10750                          immI src3, rFlagsReg cr) %{
10751   match(Set dst (AndI src1 (RShiftI src2 src3)));
10752 
10753   ins_cost(1.9 * INSN_COST);
10754   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10755 
10756   ins_encode %{
10757     __ andw(as_Register($dst$$reg),
10758               as_Register($src1$$reg),
10759               as_Register($src2$$reg),
10760               Assembler::ASR,
10761               $src3$$constant & 0x1f);
10762   %}
10763 
10764   ins_pipe(ialu_reg_reg_shift);
10765 %}
10766 
10767 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10768                          iRegL src1, iRegL src2,
10769                          immI src3, rFlagsReg cr) %{
10770   match(Set dst (AndL src1 (RShiftL src2 src3)));
10771 
10772   ins_cost(1.9 * INSN_COST);
10773   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10774 
10775   ins_encode %{
10776     __ andr(as_Register($dst$$reg),
10777               as_Register($src1$$reg),
10778               as_Register($src2$$reg),
10779               Assembler::ASR,
10780               $src3$$constant & 0x3f);
10781   %}
10782 
10783   ins_pipe(ialu_reg_reg_shift);
10784 %}
10785 
10786 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10787                          iRegIorL2I src1, iRegIorL2I src2,
10788                          immI src3, rFlagsReg cr) %{
10789   match(Set dst (AndI src1 (LShiftI src2 src3)));
10790 
10791   ins_cost(1.9 * INSN_COST);
10792   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10793 
10794   ins_encode %{
10795     __ andw(as_Register($dst$$reg),
10796               as_Register($src1$$reg),
10797               as_Register($src2$$reg),
10798               Assembler::LSL,
10799               $src3$$constant & 0x1f);
10800   %}
10801 
10802   ins_pipe(ialu_reg_reg_shift);
10803 %}
10804 
10805 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10806                          iRegL src1, iRegL src2,
10807                          immI src3, rFlagsReg cr) %{
10808   match(Set dst (AndL src1 (LShiftL src2 src3)));
10809 
10810   ins_cost(1.9 * INSN_COST);
10811   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10812 
10813   ins_encode %{
10814     __ andr(as_Register($dst$$reg),
10815               as_Register($src1$$reg),
10816               as_Register($src2$$reg),
10817               Assembler::LSL,
10818               $src3$$constant & 0x3f);
10819   %}
10820 
10821   ins_pipe(ialu_reg_reg_shift);
10822 %}
10823 
10824 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10825                          iRegIorL2I src1, iRegIorL2I src2,
10826                          immI src3, rFlagsReg cr) %{
10827   match(Set dst (XorI src1 (URShiftI src2 src3)));
10828 
10829   ins_cost(1.9 * INSN_COST);
10830   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10831 
10832   ins_encode %{
10833     __ eorw(as_Register($dst$$reg),
10834               as_Register($src1$$reg),
10835               as_Register($src2$$reg),
10836               Assembler::LSR,
10837               $src3$$constant & 0x1f);
10838   %}
10839 
10840   ins_pipe(ialu_reg_reg_shift);
10841 %}
10842 
10843 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10844                          iRegL src1, iRegL src2,
10845                          immI src3, rFlagsReg cr) %{
10846   match(Set dst (XorL src1 (URShiftL src2 src3)));
10847 
10848   ins_cost(1.9 * INSN_COST);
10849   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10850 
10851   ins_encode %{
10852     __ eor(as_Register($dst$$reg),
10853               as_Register($src1$$reg),
10854               as_Register($src2$$reg),
10855               Assembler::LSR,
10856               $src3$$constant & 0x3f);
10857   %}
10858 
10859   ins_pipe(ialu_reg_reg_shift);
10860 %}
10861 
10862 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10863                          iRegIorL2I src1, iRegIorL2I src2,
10864                          immI src3, rFlagsReg cr) %{
10865   match(Set dst (XorI src1 (RShiftI src2 src3)));
10866 
10867   ins_cost(1.9 * INSN_COST);
10868   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10869 
10870   ins_encode %{
10871     __ eorw(as_Register($dst$$reg),
10872               as_Register($src1$$reg),
10873               as_Register($src2$$reg),
10874               Assembler::ASR,
10875               $src3$$constant & 0x1f);
10876   %}
10877 
10878   ins_pipe(ialu_reg_reg_shift);
10879 %}
10880 
10881 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10882                          iRegL src1, iRegL src2,
10883                          immI src3, rFlagsReg cr) %{
10884   match(Set dst (XorL src1 (RShiftL src2 src3)));
10885 
10886   ins_cost(1.9 * INSN_COST);
10887   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10888 
10889   ins_encode %{
10890     __ eor(as_Register($dst$$reg),
10891               as_Register($src1$$reg),
10892               as_Register($src2$$reg),
10893               Assembler::ASR,
10894               $src3$$constant & 0x3f);
10895   %}
10896 
10897   ins_pipe(ialu_reg_reg_shift);
10898 %}
10899 
10900 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10901                          iRegIorL2I src1, iRegIorL2I src2,
10902                          immI src3, rFlagsReg cr) %{
10903   match(Set dst (XorI src1 (LShiftI src2 src3)));
10904 
10905   ins_cost(1.9 * INSN_COST);
10906   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10907 
10908   ins_encode %{
10909     __ eorw(as_Register($dst$$reg),
10910               as_Register($src1$$reg),
10911               as_Register($src2$$reg),
10912               Assembler::LSL,
10913               $src3$$constant & 0x1f);
10914   %}
10915 
10916   ins_pipe(ialu_reg_reg_shift);
10917 %}
10918 
10919 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10920                          iRegL src1, iRegL src2,
10921                          immI src3, rFlagsReg cr) %{
10922   match(Set dst (XorL src1 (LShiftL src2 src3)));
10923 
10924   ins_cost(1.9 * INSN_COST);
10925   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10926 
10927   ins_encode %{
10928     __ eor(as_Register($dst$$reg),
10929               as_Register($src1$$reg),
10930               as_Register($src2$$reg),
10931               Assembler::LSL,
10932               $src3$$constant & 0x3f);
10933   %}
10934 
10935   ins_pipe(ialu_reg_reg_shift);
10936 %}
10937 
10938 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10939                          iRegIorL2I src1, iRegIorL2I src2,
10940                          immI src3, rFlagsReg cr) %{
10941   match(Set dst (OrI src1 (URShiftI src2 src3)));
10942 
10943   ins_cost(1.9 * INSN_COST);
10944   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10945 
10946   ins_encode %{
10947     __ orrw(as_Register($dst$$reg),
10948               as_Register($src1$$reg),
10949               as_Register($src2$$reg),
10950               Assembler::LSR,
10951               $src3$$constant & 0x1f);
10952   %}
10953 
10954   ins_pipe(ialu_reg_reg_shift);
10955 %}
10956 
10957 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10958                          iRegL src1, iRegL src2,
10959                          immI src3, rFlagsReg cr) %{
10960   match(Set dst (OrL src1 (URShiftL src2 src3)));
10961 
10962   ins_cost(1.9 * INSN_COST);
10963   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10964 
10965   ins_encode %{
10966     __ orr(as_Register($dst$$reg),
10967               as_Register($src1$$reg),
10968               as_Register($src2$$reg),
10969               Assembler::LSR,
10970               $src3$$constant & 0x3f);
10971   %}
10972 
10973   ins_pipe(ialu_reg_reg_shift);
10974 %}
10975 
10976 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10977                          iRegIorL2I src1, iRegIorL2I src2,
10978                          immI src3, rFlagsReg cr) %{
10979   match(Set dst (OrI src1 (RShiftI src2 src3)));
10980 
10981   ins_cost(1.9 * INSN_COST);
10982   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10983 
10984   ins_encode %{
10985     __ orrw(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::ASR,
10989               $src3$$constant & 0x1f);
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_shift);
10993 %}
10994 
10995 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10996                          iRegL src1, iRegL src2,
10997                          immI src3, rFlagsReg cr) %{
10998   match(Set dst (OrL src1 (RShiftL src2 src3)));
10999 
11000   ins_cost(1.9 * INSN_COST);
11001   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11002 
11003   ins_encode %{
11004     __ orr(as_Register($dst$$reg),
11005               as_Register($src1$$reg),
11006               as_Register($src2$$reg),
11007               Assembler::ASR,
11008               $src3$$constant & 0x3f);
11009   %}
11010 
11011   ins_pipe(ialu_reg_reg_shift);
11012 %}
11013 
11014 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11015                          iRegIorL2I src1, iRegIorL2I src2,
11016                          immI src3, rFlagsReg cr) %{
11017   match(Set dst (OrI src1 (LShiftI src2 src3)));
11018 
11019   ins_cost(1.9 * INSN_COST);
11020   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11021 
11022   ins_encode %{
11023     __ orrw(as_Register($dst$$reg),
11024               as_Register($src1$$reg),
11025               as_Register($src2$$reg),
11026               Assembler::LSL,
11027               $src3$$constant & 0x1f);
11028   %}
11029 
11030   ins_pipe(ialu_reg_reg_shift);
11031 %}
11032 
11033 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11034                          iRegL src1, iRegL src2,
11035                          immI src3, rFlagsReg cr) %{
11036   match(Set dst (OrL src1 (LShiftL src2 src3)));
11037 
11038   ins_cost(1.9 * INSN_COST);
11039   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11040 
11041   ins_encode %{
11042     __ orr(as_Register($dst$$reg),
11043               as_Register($src1$$reg),
11044               as_Register($src2$$reg),
11045               Assembler::LSL,
11046               $src3$$constant & 0x3f);
11047   %}
11048 
11049   ins_pipe(ialu_reg_reg_shift);
11050 %}
11051 
11052 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11053                          iRegIorL2I src1, iRegIorL2I src2,
11054                          immI src3, rFlagsReg cr) %{
11055   match(Set dst (AddI src1 (URShiftI src2 src3)));
11056 
11057   ins_cost(1.9 * INSN_COST);
11058   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11059 
11060   ins_encode %{
11061     __ addw(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::LSR,
11065               $src3$$constant & 0x1f);
11066   %}
11067 
11068   ins_pipe(ialu_reg_reg_shift);
11069 %}
11070 
11071 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11072                          iRegL src1, iRegL src2,
11073                          immI src3, rFlagsReg cr) %{
11074   match(Set dst (AddL src1 (URShiftL src2 src3)));
11075 
11076   ins_cost(1.9 * INSN_COST);
11077   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11078 
11079   ins_encode %{
11080     __ add(as_Register($dst$$reg),
11081               as_Register($src1$$reg),
11082               as_Register($src2$$reg),
11083               Assembler::LSR,
11084               $src3$$constant & 0x3f);
11085   %}
11086 
11087   ins_pipe(ialu_reg_reg_shift);
11088 %}
11089 
11090 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11091                          iRegIorL2I src1, iRegIorL2I src2,
11092                          immI src3, rFlagsReg cr) %{
11093   match(Set dst (AddI src1 (RShiftI src2 src3)));
11094 
11095   ins_cost(1.9 * INSN_COST);
11096   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11097 
11098   ins_encode %{
11099     __ addw(as_Register($dst$$reg),
11100               as_Register($src1$$reg),
11101               as_Register($src2$$reg),
11102               Assembler::ASR,
11103               $src3$$constant & 0x1f);
11104   %}
11105 
11106   ins_pipe(ialu_reg_reg_shift);
11107 %}
11108 
11109 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11110                          iRegL src1, iRegL src2,
11111                          immI src3, rFlagsReg cr) %{
11112   match(Set dst (AddL src1 (RShiftL src2 src3)));
11113 
11114   ins_cost(1.9 * INSN_COST);
11115   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11116 
11117   ins_encode %{
11118     __ add(as_Register($dst$$reg),
11119               as_Register($src1$$reg),
11120               as_Register($src2$$reg),
11121               Assembler::ASR,
11122               $src3$$constant & 0x3f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_reg_shift);
11126 %}
11127 
11128 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11129                          iRegIorL2I src1, iRegIorL2I src2,
11130                          immI src3, rFlagsReg cr) %{
11131   match(Set dst (AddI src1 (LShiftI src2 src3)));
11132 
11133   ins_cost(1.9 * INSN_COST);
11134   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11135 
11136   ins_encode %{
11137     __ addw(as_Register($dst$$reg),
11138               as_Register($src1$$reg),
11139               as_Register($src2$$reg),
11140               Assembler::LSL,
11141               $src3$$constant & 0x1f);
11142   %}
11143 
11144   ins_pipe(ialu_reg_reg_shift);
11145 %}
11146 
11147 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11148                          iRegL src1, iRegL src2,
11149                          immI src3, rFlagsReg cr) %{
11150   match(Set dst (AddL src1 (LShiftL src2 src3)));
11151 
11152   ins_cost(1.9 * INSN_COST);
11153   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11154 
11155   ins_encode %{
11156     __ add(as_Register($dst$$reg),
11157               as_Register($src1$$reg),
11158               as_Register($src2$$reg),
11159               Assembler::LSL,
11160               $src3$$constant & 0x3f);
11161   %}
11162 
11163   ins_pipe(ialu_reg_reg_shift);
11164 %}
11165 
11166 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11167                          iRegIorL2I src1, iRegIorL2I src2,
11168                          immI src3, rFlagsReg cr) %{
11169   match(Set dst (SubI src1 (URShiftI src2 src3)));
11170 
11171   ins_cost(1.9 * INSN_COST);
11172   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11173 
11174   ins_encode %{
11175     __ subw(as_Register($dst$$reg),
11176               as_Register($src1$$reg),
11177               as_Register($src2$$reg),
11178               Assembler::LSR,
11179               $src3$$constant & 0x1f);
11180   %}
11181 
11182   ins_pipe(ialu_reg_reg_shift);
11183 %}
11184 
11185 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11186                          iRegL src1, iRegL src2,
11187                          immI src3, rFlagsReg cr) %{
11188   match(Set dst (SubL src1 (URShiftL src2 src3)));
11189 
11190   ins_cost(1.9 * INSN_COST);
11191   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11192 
11193   ins_encode %{
11194     __ sub(as_Register($dst$$reg),
11195               as_Register($src1$$reg),
11196               as_Register($src2$$reg),
11197               Assembler::LSR,
11198               $src3$$constant & 0x3f);
11199   %}
11200 
11201   ins_pipe(ialu_reg_reg_shift);
11202 %}
11203 
11204 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11205                          iRegIorL2I src1, iRegIorL2I src2,
11206                          immI src3, rFlagsReg cr) %{
11207   match(Set dst (SubI src1 (RShiftI src2 src3)));
11208 
11209   ins_cost(1.9 * INSN_COST);
11210   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11211 
11212   ins_encode %{
11213     __ subw(as_Register($dst$$reg),
11214               as_Register($src1$$reg),
11215               as_Register($src2$$reg),
11216               Assembler::ASR,
11217               $src3$$constant & 0x1f);
11218   %}
11219 
11220   ins_pipe(ialu_reg_reg_shift);
11221 %}
11222 
11223 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11224                          iRegL src1, iRegL src2,
11225                          immI src3, rFlagsReg cr) %{
11226   match(Set dst (SubL src1 (RShiftL src2 src3)));
11227 
11228   ins_cost(1.9 * INSN_COST);
11229   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11230 
11231   ins_encode %{
11232     __ sub(as_Register($dst$$reg),
11233               as_Register($src1$$reg),
11234               as_Register($src2$$reg),
11235               Assembler::ASR,
11236               $src3$$constant & 0x3f);
11237   %}
11238 
11239   ins_pipe(ialu_reg_reg_shift);
11240 %}
11241 
11242 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11243                          iRegIorL2I src1, iRegIorL2I src2,
11244                          immI src3, rFlagsReg cr) %{
11245   match(Set dst (SubI src1 (LShiftI src2 src3)));
11246 
11247   ins_cost(1.9 * INSN_COST);
11248   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11249 
11250   ins_encode %{
11251     __ subw(as_Register($dst$$reg),
11252               as_Register($src1$$reg),
11253               as_Register($src2$$reg),
11254               Assembler::LSL,
11255               $src3$$constant & 0x1f);
11256   %}
11257 
11258   ins_pipe(ialu_reg_reg_shift);
11259 %}
11260 
11261 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11262                          iRegL src1, iRegL src2,
11263                          immI src3, rFlagsReg cr) %{
11264   match(Set dst (SubL src1 (LShiftL src2 src3)));
11265 
11266   ins_cost(1.9 * INSN_COST);
11267   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11268 
11269   ins_encode %{
11270     __ sub(as_Register($dst$$reg),
11271               as_Register($src1$$reg),
11272               as_Register($src2$$reg),
11273               Assembler::LSL,
11274               $src3$$constant & 0x3f);
11275   %}
11276 
11277   ins_pipe(ialu_reg_reg_shift);
11278 %}
11279 
11280 
11281 
11282 // Shift Left followed by Shift Right.
11283 // This idiom is used by the compiler for the i2b bytecode etc.
11284 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11285 %{
11286   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11287   ins_cost(INSN_COST * 2);
11288   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11289   ins_encode %{
11290     int lshift = $lshift_count$$constant & 63;
11291     int rshift = $rshift_count$$constant & 63;
11292     int s = 63 - lshift;
11293     int r = (rshift - lshift) & 63;
11294     __ sbfm(as_Register($dst$$reg),
11295             as_Register($src$$reg),
11296             r, s);
11297   %}
11298 
11299   ins_pipe(ialu_reg_shift);
11300 %}
11301 
11302 // Shift Left followed by Shift Right.
11303 // This idiom is used by the compiler for the i2b bytecode etc.
11304 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11305 %{
11306   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11307   ins_cost(INSN_COST * 2);
11308   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11309   ins_encode %{
11310     int lshift = $lshift_count$$constant & 31;
11311     int rshift = $rshift_count$$constant & 31;
11312     int s = 31 - lshift;
11313     int r = (rshift - lshift) & 31;
11314     __ sbfmw(as_Register($dst$$reg),
11315             as_Register($src$$reg),
11316             r, s);
11317   %}
11318 
11319   ins_pipe(ialu_reg_shift);
11320 %}
11321 
11322 // Shift Left followed by Shift Right.
11323 // This idiom is used by the compiler for the i2b bytecode etc.
11324 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11325 %{
11326   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11327   ins_cost(INSN_COST * 2);
11328   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11329   ins_encode %{
11330     int lshift = $lshift_count$$constant & 63;
11331     int rshift = $rshift_count$$constant & 63;
11332     int s = 63 - lshift;
11333     int r = (rshift - lshift) & 63;
11334     __ ubfm(as_Register($dst$$reg),
11335             as_Register($src$$reg),
11336             r, s);
11337   %}
11338 
11339   ins_pipe(ialu_reg_shift);
11340 %}
11341 
11342 // Shift Left followed by Shift Right.
11343 // This idiom is used by the compiler for the i2b bytecode etc.
11344 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11345 %{
11346   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11347   ins_cost(INSN_COST * 2);
11348   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11349   ins_encode %{
11350     int lshift = $lshift_count$$constant & 31;
11351     int rshift = $rshift_count$$constant & 31;
11352     int s = 31 - lshift;
11353     int r = (rshift - lshift) & 31;
11354     __ ubfmw(as_Register($dst$$reg),
11355             as_Register($src$$reg),
11356             r, s);
11357   %}
11358 
11359   ins_pipe(ialu_reg_shift);
11360 %}
11361 // Bitfield extract with shift & mask
11362 
11363 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11364 %{
11365   match(Set dst (AndI (URShiftI src rshift) mask));
11366   // Make sure we are not going to exceed what ubfxw can do.
11367   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11368 
11369   ins_cost(INSN_COST);
11370   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11371   ins_encode %{
11372     int rshift = $rshift$$constant & 31;
11373     long mask = $mask$$constant;
11374     int width = exact_log2(mask+1);
11375     __ ubfxw(as_Register($dst$$reg),
11376             as_Register($src$$reg), rshift, width);
11377   %}
11378   ins_pipe(ialu_reg_shift);
11379 %}
11380 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11381 %{
11382   match(Set dst (AndL (URShiftL src rshift) mask));
11383   // Make sure we are not going to exceed what ubfx can do.
11384   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11385 
11386   ins_cost(INSN_COST);
11387   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11388   ins_encode %{
11389     int rshift = $rshift$$constant & 63;
11390     long mask = $mask$$constant;
11391     int width = exact_log2_long(mask+1);
11392     __ ubfx(as_Register($dst$$reg),
11393             as_Register($src$$reg), rshift, width);
11394   %}
11395   ins_pipe(ialu_reg_shift);
11396 %}
11397 
11398 // We can use ubfx when extending an And with a mask when we know mask
11399 // is positive.  We know that because immI_bitmask guarantees it.
11400 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11401 %{
11402   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11403   // Make sure we are not going to exceed what ubfxw can do.
11404   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11405 
11406   ins_cost(INSN_COST * 2);
11407   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11408   ins_encode %{
11409     int rshift = $rshift$$constant & 31;
11410     long mask = $mask$$constant;
11411     int width = exact_log2(mask+1);
11412     __ ubfx(as_Register($dst$$reg),
11413             as_Register($src$$reg), rshift, width);
11414   %}
11415   ins_pipe(ialu_reg_shift);
11416 %}
11417 
11418 // We can use ubfiz when masking by a positive number and then left shifting the result.
11419 // We know that the mask is positive because immI_bitmask guarantees it.
11420 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11421 %{
11422   match(Set dst (LShiftI (AndI src mask) lshift));
11423   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11424 
11425   ins_cost(INSN_COST);
11426   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11427   ins_encode %{
11428     int lshift = $lshift$$constant & 31;
11429     long mask = $mask$$constant;
11430     int width = exact_log2(mask+1);
11431     __ ubfizw(as_Register($dst$$reg),
11432           as_Register($src$$reg), lshift, width);
11433   %}
11434   ins_pipe(ialu_reg_shift);
11435 %}
11436 // We can use ubfiz when masking by a positive number and then left shifting the result.
11437 // We know that the mask is positive because immL_bitmask guarantees it.
11438 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11439 %{
11440   match(Set dst (LShiftL (AndL src mask) lshift));
11441   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11442 
11443   ins_cost(INSN_COST);
11444   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11445   ins_encode %{
11446     int lshift = $lshift$$constant & 63;
11447     long mask = $mask$$constant;
11448     int width = exact_log2_long(mask+1);
11449     __ ubfiz(as_Register($dst$$reg),
11450           as_Register($src$$reg), lshift, width);
11451   %}
11452   ins_pipe(ialu_reg_shift);
11453 %}
11454 
11455 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11456 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11457 %{
11458   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
11459   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11460 
11461   ins_cost(INSN_COST);
11462   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11463   ins_encode %{
11464     int lshift = $lshift$$constant & 63;
11465     long mask = $mask$$constant;
11466     int width = exact_log2(mask+1);
11467     __ ubfiz(as_Register($dst$$reg),
11468              as_Register($src$$reg), lshift, width);
11469   %}
11470   ins_pipe(ialu_reg_shift);
11471 %}
11472 
11473 // Rotations
11474 
11475 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11476 %{
11477   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11478   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11479 
11480   ins_cost(INSN_COST);
11481   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11482 
11483   ins_encode %{
11484     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11485             $rshift$$constant & 63);
11486   %}
11487   ins_pipe(ialu_reg_reg_extr);
11488 %}
11489 
11490 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11491 %{
11492   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11493   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11494 
11495   ins_cost(INSN_COST);
11496   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11497 
11498   ins_encode %{
11499     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11500             $rshift$$constant & 31);
11501   %}
11502   ins_pipe(ialu_reg_reg_extr);
11503 %}
11504 
11505 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11506 %{
11507   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11508   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11509 
11510   ins_cost(INSN_COST);
11511   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11512 
11513   ins_encode %{
11514     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11515             $rshift$$constant & 63);
11516   %}
11517   ins_pipe(ialu_reg_reg_extr);
11518 %}
11519 
11520 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11521 %{
11522   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11523   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11524 
11525   ins_cost(INSN_COST);
11526   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11527 
11528   ins_encode %{
11529     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11530             $rshift$$constant & 31);
11531   %}
11532   ins_pipe(ialu_reg_reg_extr);
11533 %}
11534 
11535 
11536 // rol expander
11537 
11538 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11539 %{
11540   effect(DEF dst, USE src, USE shift);
11541 
11542   format %{ "rol    $dst, $src, $shift" %}
11543   ins_cost(INSN_COST * 3);
11544   ins_encode %{
11545     __ subw(rscratch1, zr, as_Register($shift$$reg));
11546     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11547             rscratch1);
11548     %}
11549   ins_pipe(ialu_reg_reg_vshift);
11550 %}
11551 
11552 // rol expander
11553 
11554 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11555 %{
11556   effect(DEF dst, USE src, USE shift);
11557 
11558   format %{ "rol    $dst, $src, $shift" %}
11559   ins_cost(INSN_COST * 3);
11560   ins_encode %{
11561     __ subw(rscratch1, zr, as_Register($shift$$reg));
11562     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11563             rscratch1);
11564     %}
11565   ins_pipe(ialu_reg_reg_vshift);
11566 %}
11567 
11568 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11569 %{
11570   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11571 
11572   expand %{
11573     rolL_rReg(dst, src, shift, cr);
11574   %}
11575 %}
11576 
11577 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11578 %{
11579   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11580 
11581   expand %{
11582     rolL_rReg(dst, src, shift, cr);
11583   %}
11584 %}
11585 
11586 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11587 %{
11588   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11589 
11590   expand %{
11591     rolI_rReg(dst, src, shift, cr);
11592   %}
11593 %}
11594 
11595 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11596 %{
11597   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11598 
11599   expand %{
11600     rolI_rReg(dst, src, shift, cr);
11601   %}
11602 %}
11603 
11604 // ror expander
11605 
11606 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11607 %{
11608   effect(DEF dst, USE src, USE shift);
11609 
11610   format %{ "ror    $dst, $src, $shift" %}
11611   ins_cost(INSN_COST);
11612   ins_encode %{
11613     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11614             as_Register($shift$$reg));
11615     %}
11616   ins_pipe(ialu_reg_reg_vshift);
11617 %}
11618 
11619 // ror expander
11620 
11621 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11622 %{
11623   effect(DEF dst, USE src, USE shift);
11624 
11625   format %{ "ror    $dst, $src, $shift" %}
11626   ins_cost(INSN_COST);
11627   ins_encode %{
11628     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11629             as_Register($shift$$reg));
11630     %}
11631   ins_pipe(ialu_reg_reg_vshift);
11632 %}
11633 
11634 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11635 %{
11636   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11637 
11638   expand %{
11639     rorL_rReg(dst, src, shift, cr);
11640   %}
11641 %}
11642 
11643 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11644 %{
11645   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11646 
11647   expand %{
11648     rorL_rReg(dst, src, shift, cr);
11649   %}
11650 %}
11651 
11652 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11653 %{
11654   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11655 
11656   expand %{
11657     rorI_rReg(dst, src, shift, cr);
11658   %}
11659 %}
11660 
11661 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11662 %{
11663   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11664 
11665   expand %{
11666     rorI_rReg(dst, src, shift, cr);
11667   %}
11668 %}
11669 
11670 // Add/subtract (extended)
11671 
11672 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11673 %{
11674   match(Set dst (AddL src1 (ConvI2L src2)));
11675   ins_cost(INSN_COST);
11676   format %{ "add  $dst, $src1, $src2, sxtw" %}
11677 
11678    ins_encode %{
11679      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11680             as_Register($src2$$reg), ext::sxtw);
11681    %}
11682   ins_pipe(ialu_reg_reg);
11683 %};
11684 
11685 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11686 %{
11687   match(Set dst (SubL src1 (ConvI2L src2)));
11688   ins_cost(INSN_COST);
11689   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11690 
11691    ins_encode %{
11692      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11693             as_Register($src2$$reg), ext::sxtw);
11694    %}
11695   ins_pipe(ialu_reg_reg);
11696 %};
11697 
11698 
11699 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11700 %{
11701   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11702   ins_cost(INSN_COST);
11703   format %{ "add  $dst, $src1, $src2, sxth" %}
11704 
11705    ins_encode %{
11706      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11707             as_Register($src2$$reg), ext::sxth);
11708    %}
11709   ins_pipe(ialu_reg_reg);
11710 %}
11711 
11712 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11713 %{
11714   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11715   ins_cost(INSN_COST);
11716   format %{ "add  $dst, $src1, $src2, sxtb" %}
11717 
11718    ins_encode %{
11719      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11720             as_Register($src2$$reg), ext::sxtb);
11721    %}
11722   ins_pipe(ialu_reg_reg);
11723 %}
11724 
11725 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11726 %{
11727   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11728   ins_cost(INSN_COST);
11729   format %{ "add  $dst, $src1, $src2, uxtb" %}
11730 
11731    ins_encode %{
11732      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11733             as_Register($src2$$reg), ext::uxtb);
11734    %}
11735   ins_pipe(ialu_reg_reg);
11736 %}
11737 
11738 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11739 %{
11740   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11741   ins_cost(INSN_COST);
11742   format %{ "add  $dst, $src1, $src2, sxth" %}
11743 
11744    ins_encode %{
11745      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11746             as_Register($src2$$reg), ext::sxth);
11747    %}
11748   ins_pipe(ialu_reg_reg);
11749 %}
11750 
11751 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11752 %{
11753   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11754   ins_cost(INSN_COST);
11755   format %{ "add  $dst, $src1, $src2, sxtw" %}
11756 
11757    ins_encode %{
11758      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11759             as_Register($src2$$reg), ext::sxtw);
11760    %}
11761   ins_pipe(ialu_reg_reg);
11762 %}
11763 
11764 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11765 %{
11766   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11767   ins_cost(INSN_COST);
11768   format %{ "add  $dst, $src1, $src2, sxtb" %}
11769 
11770    ins_encode %{
11771      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11772             as_Register($src2$$reg), ext::sxtb);
11773    %}
11774   ins_pipe(ialu_reg_reg);
11775 %}
11776 
11777 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11778 %{
11779   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11780   ins_cost(INSN_COST);
11781   format %{ "add  $dst, $src1, $src2, uxtb" %}
11782 
11783    ins_encode %{
11784      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11785             as_Register($src2$$reg), ext::uxtb);
11786    %}
11787   ins_pipe(ialu_reg_reg);
11788 %}
11789 
11790 
11791 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11792 %{
11793   match(Set dst (AddI src1 (AndI src2 mask)));
11794   ins_cost(INSN_COST);
11795   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11796 
11797    ins_encode %{
11798      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11799             as_Register($src2$$reg), ext::uxtb);
11800    %}
11801   ins_pipe(ialu_reg_reg);
11802 %}
11803 
11804 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11805 %{
11806   match(Set dst (AddI src1 (AndI src2 mask)));
11807   ins_cost(INSN_COST);
11808   format %{ "addw  $dst, $src1, $src2, uxth" %}
11809 
11810    ins_encode %{
11811      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11812             as_Register($src2$$reg), ext::uxth);
11813    %}
11814   ins_pipe(ialu_reg_reg);
11815 %}
11816 
11817 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11818 %{
11819   match(Set dst (AddL src1 (AndL src2 mask)));
11820   ins_cost(INSN_COST);
11821   format %{ "add  $dst, $src1, $src2, uxtb" %}
11822 
11823    ins_encode %{
11824      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11825             as_Register($src2$$reg), ext::uxtb);
11826    %}
11827   ins_pipe(ialu_reg_reg);
11828 %}
11829 
11830 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11831 %{
11832   match(Set dst (AddL src1 (AndL src2 mask)));
11833   ins_cost(INSN_COST);
11834   format %{ "add  $dst, $src1, $src2, uxth" %}
11835 
11836    ins_encode %{
11837      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11838             as_Register($src2$$reg), ext::uxth);
11839    %}
11840   ins_pipe(ialu_reg_reg);
11841 %}
11842 
11843 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11844 %{
11845   match(Set dst (AddL src1 (AndL src2 mask)));
11846   ins_cost(INSN_COST);
11847   format %{ "add  $dst, $src1, $src2, uxtw" %}
11848 
11849    ins_encode %{
11850      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11851             as_Register($src2$$reg), ext::uxtw);
11852    %}
11853   ins_pipe(ialu_reg_reg);
11854 %}
11855 
11856 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11857 %{
11858   match(Set dst (SubI src1 (AndI src2 mask)));
11859   ins_cost(INSN_COST);
11860   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11861 
11862    ins_encode %{
11863      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11864             as_Register($src2$$reg), ext::uxtb);
11865    %}
11866   ins_pipe(ialu_reg_reg);
11867 %}
11868 
11869 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11870 %{
11871   match(Set dst (SubI src1 (AndI src2 mask)));
11872   ins_cost(INSN_COST);
11873   format %{ "subw  $dst, $src1, $src2, uxth" %}
11874 
11875    ins_encode %{
11876      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11877             as_Register($src2$$reg), ext::uxth);
11878    %}
11879   ins_pipe(ialu_reg_reg);
11880 %}
11881 
11882 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11883 %{
11884   match(Set dst (SubL src1 (AndL src2 mask)));
11885   ins_cost(INSN_COST);
11886   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11887 
11888    ins_encode %{
11889      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11890             as_Register($src2$$reg), ext::uxtb);
11891    %}
11892   ins_pipe(ialu_reg_reg);
11893 %}
11894 
11895 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11896 %{
11897   match(Set dst (SubL src1 (AndL src2 mask)));
11898   ins_cost(INSN_COST);
11899   format %{ "sub  $dst, $src1, $src2, uxth" %}
11900 
11901    ins_encode %{
11902      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11903             as_Register($src2$$reg), ext::uxth);
11904    %}
11905   ins_pipe(ialu_reg_reg);
11906 %}
11907 
11908 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11909 %{
11910   match(Set dst (SubL src1 (AndL src2 mask)));
11911   ins_cost(INSN_COST);
11912   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11913 
11914    ins_encode %{
11915      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11916             as_Register($src2$$reg), ext::uxtw);
11917    %}
11918   ins_pipe(ialu_reg_reg);
11919 %}
11920 
11921 
11922 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11923 %{
11924   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11925   ins_cost(1.9 * INSN_COST);
11926   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
11927 
11928    ins_encode %{
11929      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11930             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11931    %}
11932   ins_pipe(ialu_reg_reg_shift);
11933 %}
11934 
11935 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11936 %{
11937   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11938   ins_cost(1.9 * INSN_COST);
11939   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
11940 
11941    ins_encode %{
11942      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11943             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11944    %}
11945   ins_pipe(ialu_reg_reg_shift);
11946 %}
11947 
11948 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11949 %{
11950   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11951   ins_cost(1.9 * INSN_COST);
11952   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
11953 
11954    ins_encode %{
11955      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11956             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11957    %}
11958   ins_pipe(ialu_reg_reg_shift);
11959 %}
11960 
11961 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11962 %{
11963   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11964   ins_cost(1.9 * INSN_COST);
11965   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
11966 
11967    ins_encode %{
11968      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11969             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11970    %}
11971   ins_pipe(ialu_reg_reg_shift);
11972 %}
11973 
11974 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11975 %{
11976   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11977   ins_cost(1.9 * INSN_COST);
11978   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
11979 
11980    ins_encode %{
11981      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11982             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11983    %}
11984   ins_pipe(ialu_reg_reg_shift);
11985 %}
11986 
11987 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11988 %{
11989   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11990   ins_cost(1.9 * INSN_COST);
11991   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
11992 
11993    ins_encode %{
11994      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11995             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11996    %}
11997   ins_pipe(ialu_reg_reg_shift);
11998 %}
11999 
12000 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12001 %{
12002   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12003   ins_cost(1.9 * INSN_COST);
12004   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12005 
12006    ins_encode %{
12007      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12008             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12009    %}
12010   ins_pipe(ialu_reg_reg_shift);
12011 %}
12012 
12013 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12014 %{
12015   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12016   ins_cost(1.9 * INSN_COST);
12017   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12018 
12019    ins_encode %{
12020      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12021             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12022    %}
12023   ins_pipe(ialu_reg_reg_shift);
12024 %}
12025 
12026 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12027 %{
12028   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12029   ins_cost(1.9 * INSN_COST);
12030   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12031 
12032    ins_encode %{
12033      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12034             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12035    %}
12036   ins_pipe(ialu_reg_reg_shift);
12037 %}
12038 
12039 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12040 %{
12041   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12042   ins_cost(1.9 * INSN_COST);
12043   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12044 
12045    ins_encode %{
12046      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12047             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12048    %}
12049   ins_pipe(ialu_reg_reg_shift);
12050 %}
12051 
12052 
12053 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12054 %{
12055   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12056   ins_cost(1.9 * INSN_COST);
12057   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12058 
12059    ins_encode %{
12060      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12061             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12062    %}
12063   ins_pipe(ialu_reg_reg_shift);
12064 %};
12065 
12066 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12067 %{
12068   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12069   ins_cost(1.9 * INSN_COST);
12070   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12071 
12072    ins_encode %{
12073      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12074             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12075    %}
12076   ins_pipe(ialu_reg_reg_shift);
12077 %};
12078 
12079 
12080 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12081 %{
12082   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12083   ins_cost(1.9 * INSN_COST);
12084   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12085 
12086    ins_encode %{
12087      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12088             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12089    %}
12090   ins_pipe(ialu_reg_reg_shift);
12091 %}
12092 
12093 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12094 %{
12095   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12096   ins_cost(1.9 * INSN_COST);
12097   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12098 
12099    ins_encode %{
12100      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12101             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12102    %}
12103   ins_pipe(ialu_reg_reg_shift);
12104 %}
12105 
12106 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12107 %{
12108   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12109   ins_cost(1.9 * INSN_COST);
12110   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12111 
12112    ins_encode %{
12113      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12114             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12115    %}
12116   ins_pipe(ialu_reg_reg_shift);
12117 %}
12118 
12119 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12120 %{
12121   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12122   ins_cost(1.9 * INSN_COST);
12123   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12124 
12125    ins_encode %{
12126      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12127             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12128    %}
12129   ins_pipe(ialu_reg_reg_shift);
12130 %}
12131 
12132 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12133 %{
12134   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12135   ins_cost(1.9 * INSN_COST);
12136   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12137 
12138    ins_encode %{
12139      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12140             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12141    %}
12142   ins_pipe(ialu_reg_reg_shift);
12143 %}
12144 
12145 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12146 %{
12147   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12148   ins_cost(1.9 * INSN_COST);
12149   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12150 
12151    ins_encode %{
12152      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12153             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12154    %}
12155   ins_pipe(ialu_reg_reg_shift);
12156 %}
12157 
12158 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12159 %{
12160   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12161   ins_cost(1.9 * INSN_COST);
12162   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12163 
12164    ins_encode %{
12165      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12166             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12167    %}
12168   ins_pipe(ialu_reg_reg_shift);
12169 %}
12170 
12171 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12172 %{
12173   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12174   ins_cost(1.9 * INSN_COST);
12175   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12176 
12177    ins_encode %{
12178      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12179             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12180    %}
12181   ins_pipe(ialu_reg_reg_shift);
12182 %}
12183 
12184 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12185 %{
12186   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12187   ins_cost(1.9 * INSN_COST);
12188   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12189 
12190    ins_encode %{
12191      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12192             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12193    %}
12194   ins_pipe(ialu_reg_reg_shift);
12195 %}
12196 
12197 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12198 %{
12199   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12200   ins_cost(1.9 * INSN_COST);
12201   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12202 
12203    ins_encode %{
12204      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12205             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12206    %}
12207   ins_pipe(ialu_reg_reg_shift);
12208 %}
12209 // END This section of the file is automatically generated. Do not edit --------------
12210 
12211 // ============================================================================
12212 // Floating Point Arithmetic Instructions
12213 
12214 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12215   match(Set dst (AddF src1 src2));
12216 
12217   ins_cost(INSN_COST * 5);
12218   format %{ "fadds   $dst, $src1, $src2" %}
12219 
12220   ins_encode %{
12221     __ fadds(as_FloatRegister($dst$$reg),
12222              as_FloatRegister($src1$$reg),
12223              as_FloatRegister($src2$$reg));
12224   %}
12225 
12226   ins_pipe(fp_dop_reg_reg_s);
12227 %}
12228 
12229 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12230   match(Set dst (AddD src1 src2));
12231 
12232   ins_cost(INSN_COST * 5);
12233   format %{ "faddd   $dst, $src1, $src2" %}
12234 
12235   ins_encode %{
12236     __ faddd(as_FloatRegister($dst$$reg),
12237              as_FloatRegister($src1$$reg),
12238              as_FloatRegister($src2$$reg));
12239   %}
12240 
12241   ins_pipe(fp_dop_reg_reg_d);
12242 %}
12243 
12244 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12245   match(Set dst (SubF src1 src2));
12246 
12247   ins_cost(INSN_COST * 5);
12248   format %{ "fsubs   $dst, $src1, $src2" %}
12249 
12250   ins_encode %{
12251     __ fsubs(as_FloatRegister($dst$$reg),
12252              as_FloatRegister($src1$$reg),
12253              as_FloatRegister($src2$$reg));
12254   %}
12255 
12256   ins_pipe(fp_dop_reg_reg_s);
12257 %}
12258 
12259 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12260   match(Set dst (SubD src1 src2));
12261 
12262   ins_cost(INSN_COST * 5);
12263   format %{ "fsubd   $dst, $src1, $src2" %}
12264 
12265   ins_encode %{
12266     __ fsubd(as_FloatRegister($dst$$reg),
12267              as_FloatRegister($src1$$reg),
12268              as_FloatRegister($src2$$reg));
12269   %}
12270 
12271   ins_pipe(fp_dop_reg_reg_d);
12272 %}
12273 
12274 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12275   match(Set dst (MulF src1 src2));
12276 
12277   ins_cost(INSN_COST * 6);
12278   format %{ "fmuls   $dst, $src1, $src2" %}
12279 
12280   ins_encode %{
12281     __ fmuls(as_FloatRegister($dst$$reg),
12282              as_FloatRegister($src1$$reg),
12283              as_FloatRegister($src2$$reg));
12284   %}
12285 
12286   ins_pipe(fp_dop_reg_reg_s);
12287 %}
12288 
12289 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12290   match(Set dst (MulD src1 src2));
12291 
12292   ins_cost(INSN_COST * 6);
12293   format %{ "fmuld   $dst, $src1, $src2" %}
12294 
12295   ins_encode %{
12296     __ fmuld(as_FloatRegister($dst$$reg),
12297              as_FloatRegister($src1$$reg),
12298              as_FloatRegister($src2$$reg));
12299   %}
12300 
12301   ins_pipe(fp_dop_reg_reg_d);
12302 %}
12303 
12304 // src1 * src2 + src3
12305 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12306   predicate(UseFMA);
12307   match(Set dst (FmaF src3 (Binary src1 src2)));
12308 
12309   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12310 
12311   ins_encode %{
12312     __ fmadds(as_FloatRegister($dst$$reg),
12313              as_FloatRegister($src1$$reg),
12314              as_FloatRegister($src2$$reg),
12315              as_FloatRegister($src3$$reg));
12316   %}
12317 
12318   ins_pipe(pipe_class_default);
12319 %}
12320 
12321 // src1 * src2 + src3
12322 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12323   predicate(UseFMA);
12324   match(Set dst (FmaD src3 (Binary src1 src2)));
12325 
12326   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12327 
12328   ins_encode %{
12329     __ fmaddd(as_FloatRegister($dst$$reg),
12330              as_FloatRegister($src1$$reg),
12331              as_FloatRegister($src2$$reg),
12332              as_FloatRegister($src3$$reg));
12333   %}
12334 
12335   ins_pipe(pipe_class_default);
12336 %}
12337 
12338 // -src1 * src2 + src3
12339 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12340   predicate(UseFMA);
12341   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12342   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12343 
12344   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12345 
12346   ins_encode %{
12347     __ fmsubs(as_FloatRegister($dst$$reg),
12348               as_FloatRegister($src1$$reg),
12349               as_FloatRegister($src2$$reg),
12350               as_FloatRegister($src3$$reg));
12351   %}
12352 
12353   ins_pipe(pipe_class_default);
12354 %}
12355 
12356 // -src1 * src2 + src3
12357 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12358   predicate(UseFMA);
12359   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12360   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12361 
12362   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12363 
12364   ins_encode %{
12365     __ fmsubd(as_FloatRegister($dst$$reg),
12366               as_FloatRegister($src1$$reg),
12367               as_FloatRegister($src2$$reg),
12368               as_FloatRegister($src3$$reg));
12369   %}
12370 
12371   ins_pipe(pipe_class_default);
12372 %}
12373 
12374 // -src1 * src2 - src3
12375 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12376   predicate(UseFMA);
12377   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12378   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12379 
12380   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12381 
12382   ins_encode %{
12383     __ fnmadds(as_FloatRegister($dst$$reg),
12384                as_FloatRegister($src1$$reg),
12385                as_FloatRegister($src2$$reg),
12386                as_FloatRegister($src3$$reg));
12387   %}
12388 
12389   ins_pipe(pipe_class_default);
12390 %}
12391 
12392 // -src1 * src2 - src3
12393 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12394   predicate(UseFMA);
12395   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12396   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12397 
12398   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12399 
12400   ins_encode %{
12401     __ fnmaddd(as_FloatRegister($dst$$reg),
12402                as_FloatRegister($src1$$reg),
12403                as_FloatRegister($src2$$reg),
12404                as_FloatRegister($src3$$reg));
12405   %}
12406 
12407   ins_pipe(pipe_class_default);
12408 %}
12409 
12410 // src1 * src2 - src3
12411 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12412   predicate(UseFMA);
12413   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12414 
12415   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12416 
12417   ins_encode %{
12418     __ fnmsubs(as_FloatRegister($dst$$reg),
12419                as_FloatRegister($src1$$reg),
12420                as_FloatRegister($src2$$reg),
12421                as_FloatRegister($src3$$reg));
12422   %}
12423 
12424   ins_pipe(pipe_class_default);
12425 %}
12426 
12427 // src1 * src2 - src3
12428 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12429   predicate(UseFMA);
12430   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12431 
12432   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12433 
12434   ins_encode %{
12435   // n.b. insn name should be fnmsubd
12436     __ fnmsub(as_FloatRegister($dst$$reg),
12437               as_FloatRegister($src1$$reg),
12438               as_FloatRegister($src2$$reg),
12439               as_FloatRegister($src3$$reg));
12440   %}
12441 
12442   ins_pipe(pipe_class_default);
12443 %}
12444 
12445 
12446 // Math.max(FF)F
12447 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12448   match(Set dst (MaxF src1 src2));
12449 
12450   format %{ "fmaxs   $dst, $src1, $src2" %}
12451   ins_encode %{
12452     __ fmaxs(as_FloatRegister($dst$$reg),
12453              as_FloatRegister($src1$$reg),
12454              as_FloatRegister($src2$$reg));
12455   %}
12456 
12457   ins_pipe(fp_dop_reg_reg_s);
12458 %}
12459 
12460 // Math.min(FF)F
12461 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12462   match(Set dst (MinF src1 src2));
12463 
12464   format %{ "fmins   $dst, $src1, $src2" %}
12465   ins_encode %{
12466     __ fmins(as_FloatRegister($dst$$reg),
12467              as_FloatRegister($src1$$reg),
12468              as_FloatRegister($src2$$reg));
12469   %}
12470 
12471   ins_pipe(fp_dop_reg_reg_s);
12472 %}
12473 
12474 // Math.max(DD)D
12475 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12476   match(Set dst (MaxD src1 src2));
12477 
12478   format %{ "fmaxd   $dst, $src1, $src2" %}
12479   ins_encode %{
12480     __ fmaxd(as_FloatRegister($dst$$reg),
12481              as_FloatRegister($src1$$reg),
12482              as_FloatRegister($src2$$reg));
12483   %}
12484 
12485   ins_pipe(fp_dop_reg_reg_d);
12486 %}
12487 
12488 // Math.min(DD)D
12489 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12490   match(Set dst (MinD src1 src2));
12491 
12492   format %{ "fmind   $dst, $src1, $src2" %}
12493   ins_encode %{
12494     __ fmind(as_FloatRegister($dst$$reg),
12495              as_FloatRegister($src1$$reg),
12496              as_FloatRegister($src2$$reg));
12497   %}
12498 
12499   ins_pipe(fp_dop_reg_reg_d);
12500 %}
12501 
12502 
12503 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12504   match(Set dst (DivF src1  src2));
12505 
12506   ins_cost(INSN_COST * 18);
12507   format %{ "fdivs   $dst, $src1, $src2" %}
12508 
12509   ins_encode %{
12510     __ fdivs(as_FloatRegister($dst$$reg),
12511              as_FloatRegister($src1$$reg),
12512              as_FloatRegister($src2$$reg));
12513   %}
12514 
12515   ins_pipe(fp_div_s);
12516 %}
12517 
12518 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12519   match(Set dst (DivD src1  src2));
12520 
12521   ins_cost(INSN_COST * 32);
12522   format %{ "fdivd   $dst, $src1, $src2" %}
12523 
12524   ins_encode %{
12525     __ fdivd(as_FloatRegister($dst$$reg),
12526              as_FloatRegister($src1$$reg),
12527              as_FloatRegister($src2$$reg));
12528   %}
12529 
12530   ins_pipe(fp_div_d);
12531 %}
12532 
12533 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12534   match(Set dst (NegF src));
12535 
12536   ins_cost(INSN_COST * 3);
12537   format %{ "fneg   $dst, $src" %}
12538 
12539   ins_encode %{
12540     __ fnegs(as_FloatRegister($dst$$reg),
12541              as_FloatRegister($src$$reg));
12542   %}
12543 
12544   ins_pipe(fp_uop_s);
12545 %}
12546 
12547 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12548   match(Set dst (NegD src));
12549 
12550   ins_cost(INSN_COST * 3);
12551   format %{ "fnegd   $dst, $src" %}
12552 
12553   ins_encode %{
12554     __ fnegd(as_FloatRegister($dst$$reg),
12555              as_FloatRegister($src$$reg));
12556   %}
12557 
12558   ins_pipe(fp_uop_d);
12559 %}
12560 
12561 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12562 %{
12563   match(Set dst (AbsI src));
12564 
12565   effect(KILL cr);
12566   ins_cost(INSN_COST * 2);
12567   format %{ "cmpw  $src, zr\n\t"
12568             "cnegw $dst, $src, Assembler::LT\t# int abs"
12569   %}
12570 
12571   ins_encode %{
12572     __ cmpw(as_Register($src$$reg), zr);
12573     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
12574   %}
12575   ins_pipe(pipe_class_default);
12576 %}
12577 
12578 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
12579 %{
12580   match(Set dst (AbsL src));
12581 
12582   effect(KILL cr);
12583   ins_cost(INSN_COST * 2);
12584   format %{ "cmp  $src, zr\n\t"
12585             "cneg $dst, $src, Assembler::LT\t# long abs"
12586   %}
12587 
12588   ins_encode %{
12589     __ cmp(as_Register($src$$reg), zr);
12590     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
12591   %}
12592   ins_pipe(pipe_class_default);
12593 %}
12594 
12595 instruct absF_reg(vRegF dst, vRegF src) %{
12596   match(Set dst (AbsF src));
12597 
12598   ins_cost(INSN_COST * 3);
12599   format %{ "fabss   $dst, $src" %}
12600   ins_encode %{
12601     __ fabss(as_FloatRegister($dst$$reg),
12602              as_FloatRegister($src$$reg));
12603   %}
12604 
12605   ins_pipe(fp_uop_s);
12606 %}
12607 
12608 instruct absD_reg(vRegD dst, vRegD src) %{
12609   match(Set dst (AbsD src));
12610 
12611   ins_cost(INSN_COST * 3);
12612   format %{ "fabsd   $dst, $src" %}
12613   ins_encode %{
12614     __ fabsd(as_FloatRegister($dst$$reg),
12615              as_FloatRegister($src$$reg));
12616   %}
12617 
12618   ins_pipe(fp_uop_d);
12619 %}
12620 
12621 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12622   match(Set dst (SqrtD src));
12623 
12624   ins_cost(INSN_COST * 50);
12625   format %{ "fsqrtd  $dst, $src" %}
12626   ins_encode %{
12627     __ fsqrtd(as_FloatRegister($dst$$reg),
12628              as_FloatRegister($src$$reg));
12629   %}
12630 
12631   ins_pipe(fp_div_s);
12632 %}
12633 
12634 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12635   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12636 
12637   ins_cost(INSN_COST * 50);
12638   format %{ "fsqrts  $dst, $src" %}
12639   ins_encode %{
12640     __ fsqrts(as_FloatRegister($dst$$reg),
12641              as_FloatRegister($src$$reg));
12642   %}
12643 
12644   ins_pipe(fp_div_d);
12645 %}
12646 
12647 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
12648   match(Set dst (CopySignD src1 (Binary src2 zero)));
12649   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
12650   format %{ "CopySignD  $dst $src1 $src2" %}
12651   ins_encode %{
12652     FloatRegister dst = as_FloatRegister($dst$$reg),
12653                   src1 = as_FloatRegister($src1$$reg),
12654                   src2 = as_FloatRegister($src2$$reg),
12655                   zero = as_FloatRegister($zero$$reg);
12656     __ fnegd(dst, zero);
12657     __ bsl(dst, __ T8B, src2, src1);
12658   %}
12659   ins_pipe(fp_uop_d);
12660 %}
12661 
12662 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
12663   match(Set dst (CopySignF src1 src2));
12664   effect(TEMP_DEF dst, USE src1, USE src2);
12665   format %{ "CopySignF  $dst $src1 $src2" %}
12666   ins_encode %{
12667     FloatRegister dst = as_FloatRegister($dst$$reg),
12668                   src1 = as_FloatRegister($src1$$reg),
12669                   src2 = as_FloatRegister($src2$$reg);
12670     __ movi(dst, __ T2S, 0x80, 24);
12671     __ bsl(dst, __ T8B, src2, src1);
12672   %}
12673   ins_pipe(fp_uop_d);
12674 %}
12675 
12676 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
12677   match(Set dst (SignumD src (Binary zero one)));
12678   effect(TEMP_DEF dst, USE src, USE zero, USE one);
12679   format %{ "signumD  $dst, $src" %}
12680   ins_encode %{
12681     FloatRegister src = as_FloatRegister($src$$reg),
12682                   dst = as_FloatRegister($dst$$reg),
12683                   zero = as_FloatRegister($zero$$reg),
12684                   one = as_FloatRegister($one$$reg);
12685     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
12686     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
12687     // Bit selection instruction gets bit from "one" for each enabled bit in
12688     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
12689     // NaN the whole "src" will be copied because "dst" is zero. For all other
12690     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
12691     // from "src", and all other bits are copied from 1.0.
12692     __ bsl(dst, __ T8B, one, src);
12693   %}
12694   ins_pipe(fp_uop_d);
12695 %}
12696 
12697 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
12698   match(Set dst (SignumF src (Binary zero one)));
12699   effect(TEMP_DEF dst, USE src, USE zero, USE one);
12700   format %{ "signumF  $dst, $src" %}
12701   ins_encode %{
12702     FloatRegister src = as_FloatRegister($src$$reg),
12703                   dst = as_FloatRegister($dst$$reg),
12704                   zero = as_FloatRegister($zero$$reg),
12705                   one = as_FloatRegister($one$$reg);
12706     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
12707     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
12708     // Bit selection instruction gets bit from "one" for each enabled bit in
12709     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
12710     // NaN the whole "src" will be copied because "dst" is zero. For all other
12711     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
12712     // from "src", and all other bits are copied from 1.0.
12713     __ bsl(dst, __ T8B, one, src);
12714   %}
12715   ins_pipe(fp_uop_d);
12716 %}
12717 
12718 // ============================================================================
12719 // Logical Instructions
12720 
12721 // Integer Logical Instructions
12722 
12723 // And Instructions
12724 
12725 
12726 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12727   match(Set dst (AndI src1 src2));
12728 
12729   format %{ "andw  $dst, $src1, $src2\t# int" %}
12730 
12731   ins_cost(INSN_COST);
12732   ins_encode %{
12733     __ andw(as_Register($dst$$reg),
12734             as_Register($src1$$reg),
12735             as_Register($src2$$reg));
12736   %}
12737 
12738   ins_pipe(ialu_reg_reg);
12739 %}
12740 
12741 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12742   match(Set dst (AndI src1 src2));
12743 
12744   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12745 
12746   ins_cost(INSN_COST);
12747   ins_encode %{
12748     __ andw(as_Register($dst$$reg),
12749             as_Register($src1$$reg),
12750             (unsigned long)($src2$$constant));
12751   %}
12752 
12753   ins_pipe(ialu_reg_imm);
12754 %}
12755 
12756 // Or Instructions
12757 
12758 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12759   match(Set dst (OrI src1 src2));
12760 
12761   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12762 
12763   ins_cost(INSN_COST);
12764   ins_encode %{
12765     __ orrw(as_Register($dst$$reg),
12766             as_Register($src1$$reg),
12767             as_Register($src2$$reg));
12768   %}
12769 
12770   ins_pipe(ialu_reg_reg);
12771 %}
12772 
12773 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12774   match(Set dst (OrI src1 src2));
12775 
12776   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12777 
12778   ins_cost(INSN_COST);
12779   ins_encode %{
12780     __ orrw(as_Register($dst$$reg),
12781             as_Register($src1$$reg),
12782             (unsigned long)($src2$$constant));
12783   %}
12784 
12785   ins_pipe(ialu_reg_imm);
12786 %}
12787 
12788 // Xor Instructions
12789 
12790 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12791   match(Set dst (XorI src1 src2));
12792 
12793   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12794 
12795   ins_cost(INSN_COST);
12796   ins_encode %{
12797     __ eorw(as_Register($dst$$reg),
12798             as_Register($src1$$reg),
12799             as_Register($src2$$reg));
12800   %}
12801 
12802   ins_pipe(ialu_reg_reg);
12803 %}
12804 
12805 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12806   match(Set dst (XorI src1 src2));
12807 
12808   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12809 
12810   ins_cost(INSN_COST);
12811   ins_encode %{
12812     __ eorw(as_Register($dst$$reg),
12813             as_Register($src1$$reg),
12814             (unsigned long)($src2$$constant));
12815   %}
12816 
12817   ins_pipe(ialu_reg_imm);
12818 %}
12819 
12820 // Long Logical Instructions
12821 // TODO
12822 
12823 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12824   match(Set dst (AndL src1 src2));
12825 
12826   format %{ "and  $dst, $src1, $src2\t# int" %}
12827 
12828   ins_cost(INSN_COST);
12829   ins_encode %{
12830     __ andr(as_Register($dst$$reg),
12831             as_Register($src1$$reg),
12832             as_Register($src2$$reg));
12833   %}
12834 
12835   ins_pipe(ialu_reg_reg);
12836 %}
12837 
12838 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12839   match(Set dst (AndL src1 src2));
12840 
12841   format %{ "and  $dst, $src1, $src2\t# int" %}
12842 
12843   ins_cost(INSN_COST);
12844   ins_encode %{
12845     __ andr(as_Register($dst$$reg),
12846             as_Register($src1$$reg),
12847             (unsigned long)($src2$$constant));
12848   %}
12849 
12850   ins_pipe(ialu_reg_imm);
12851 %}
12852 
12853 // Or Instructions
12854 
12855 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12856   match(Set dst (OrL src1 src2));
12857 
12858   format %{ "orr  $dst, $src1, $src2\t# int" %}
12859 
12860   ins_cost(INSN_COST);
12861   ins_encode %{
12862     __ orr(as_Register($dst$$reg),
12863            as_Register($src1$$reg),
12864            as_Register($src2$$reg));
12865   %}
12866 
12867   ins_pipe(ialu_reg_reg);
12868 %}
12869 
12870 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12871   match(Set dst (OrL src1 src2));
12872 
12873   format %{ "orr  $dst, $src1, $src2\t# int" %}
12874 
12875   ins_cost(INSN_COST);
12876   ins_encode %{
12877     __ orr(as_Register($dst$$reg),
12878            as_Register($src1$$reg),
12879            (unsigned long)($src2$$constant));
12880   %}
12881 
12882   ins_pipe(ialu_reg_imm);
12883 %}
12884 
12885 // Xor Instructions
12886 
12887 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12888   match(Set dst (XorL src1 src2));
12889 
12890   format %{ "eor  $dst, $src1, $src2\t# int" %}
12891 
12892   ins_cost(INSN_COST);
12893   ins_encode %{
12894     __ eor(as_Register($dst$$reg),
12895            as_Register($src1$$reg),
12896            as_Register($src2$$reg));
12897   %}
12898 
12899   ins_pipe(ialu_reg_reg);
12900 %}
12901 
12902 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12903   match(Set dst (XorL src1 src2));
12904 
12905   ins_cost(INSN_COST);
12906   format %{ "eor  $dst, $src1, $src2\t# int" %}
12907 
12908   ins_encode %{
12909     __ eor(as_Register($dst$$reg),
12910            as_Register($src1$$reg),
12911            (unsigned long)($src2$$constant));
12912   %}
12913 
12914   ins_pipe(ialu_reg_imm);
12915 %}
12916 
12917 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12918 %{
12919   match(Set dst (ConvI2L src));
12920 
12921   ins_cost(INSN_COST);
12922   format %{ "sxtw  $dst, $src\t# i2l" %}
12923   ins_encode %{
12924     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12925   %}
12926   ins_pipe(ialu_reg_shift);
12927 %}
12928 
12929 // this pattern occurs in bigmath arithmetic
12930 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12931 %{
12932   match(Set dst (AndL (ConvI2L src) mask));
12933 
12934   ins_cost(INSN_COST);
12935   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12936   ins_encode %{
12937     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12938   %}
12939 
12940   ins_pipe(ialu_reg_shift);
12941 %}
12942 
12943 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12944   match(Set dst (ConvL2I src));
12945 
12946   ins_cost(INSN_COST);
12947   format %{ "movw  $dst, $src \t// l2i" %}
12948 
12949   ins_encode %{
12950     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12951   %}
12952 
12953   ins_pipe(ialu_reg);
12954 %}
12955 
12956 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12957 %{
12958   match(Set dst (Conv2B src));
12959   effect(KILL cr);
12960 
12961   format %{
12962     "cmpw $src, zr\n\t"
12963     "cset $dst, ne"
12964   %}
12965 
12966   ins_encode %{
12967     __ cmpw(as_Register($src$$reg), zr);
12968     __ cset(as_Register($dst$$reg), Assembler::NE);
12969   %}
12970 
12971   ins_pipe(ialu_reg);
12972 %}
12973 
12974 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12975 %{
12976   match(Set dst (Conv2B src));
12977   effect(KILL cr);
12978 
12979   format %{
12980     "cmp  $src, zr\n\t"
12981     "cset $dst, ne"
12982   %}
12983 
12984   ins_encode %{
12985     __ cmp(as_Register($src$$reg), zr);
12986     __ cset(as_Register($dst$$reg), Assembler::NE);
12987   %}
12988 
12989   ins_pipe(ialu_reg);
12990 %}
12991 
12992 instruct convD2F_reg(vRegF dst, vRegD src) %{
12993   match(Set dst (ConvD2F src));
12994 
12995   ins_cost(INSN_COST * 5);
12996   format %{ "fcvtd  $dst, $src \t// d2f" %}
12997 
12998   ins_encode %{
12999     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13000   %}
13001 
13002   ins_pipe(fp_d2f);
13003 %}
13004 
13005 instruct convF2D_reg(vRegD dst, vRegF src) %{
13006   match(Set dst (ConvF2D src));
13007 
13008   ins_cost(INSN_COST * 5);
13009   format %{ "fcvts  $dst, $src \t// f2d" %}
13010 
13011   ins_encode %{
13012     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13013   %}
13014 
13015   ins_pipe(fp_f2d);
13016 %}
13017 
13018 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13019   match(Set dst (ConvF2I src));
13020 
13021   ins_cost(INSN_COST * 5);
13022   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13023 
13024   ins_encode %{
13025     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13026   %}
13027 
13028   ins_pipe(fp_f2i);
13029 %}
13030 
13031 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13032   match(Set dst (ConvF2L src));
13033 
13034   ins_cost(INSN_COST * 5);
13035   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13036 
13037   ins_encode %{
13038     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13039   %}
13040 
13041   ins_pipe(fp_f2l);
13042 %}
13043 
13044 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13045   match(Set dst (ConvI2F src));
13046 
13047   ins_cost(INSN_COST * 5);
13048   format %{ "scvtfws  $dst, $src \t// i2f" %}
13049 
13050   ins_encode %{
13051     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13052   %}
13053 
13054   ins_pipe(fp_i2f);
13055 %}
13056 
13057 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13058   match(Set dst (ConvL2F src));
13059 
13060   ins_cost(INSN_COST * 5);
13061   format %{ "scvtfs  $dst, $src \t// l2f" %}
13062 
13063   ins_encode %{
13064     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13065   %}
13066 
13067   ins_pipe(fp_l2f);
13068 %}
13069 
13070 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13071   match(Set dst (ConvD2I src));
13072 
13073   ins_cost(INSN_COST * 5);
13074   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13075 
13076   ins_encode %{
13077     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13078   %}
13079 
13080   ins_pipe(fp_d2i);
13081 %}
13082 
13083 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13084   match(Set dst (ConvD2L src));
13085 
13086   ins_cost(INSN_COST * 5);
13087   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13088 
13089   ins_encode %{
13090     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13091   %}
13092 
13093   ins_pipe(fp_d2l);
13094 %}
13095 
13096 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13097   match(Set dst (ConvI2D src));
13098 
13099   ins_cost(INSN_COST * 5);
13100   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13101 
13102   ins_encode %{
13103     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13104   %}
13105 
13106   ins_pipe(fp_i2d);
13107 %}
13108 
13109 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13110   match(Set dst (ConvL2D src));
13111 
13112   ins_cost(INSN_COST * 5);
13113   format %{ "scvtfd  $dst, $src \t// l2d" %}
13114 
13115   ins_encode %{
13116     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13117   %}
13118 
13119   ins_pipe(fp_l2d);
13120 %}
13121 
13122 // stack <-> reg and reg <-> reg shuffles with no conversion
13123 
13124 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13125 
13126   match(Set dst (MoveF2I src));
13127 
13128   effect(DEF dst, USE src);
13129 
13130   ins_cost(4 * INSN_COST);
13131 
13132   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13133 
13134   ins_encode %{
13135     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13136   %}
13137 
13138   ins_pipe(iload_reg_reg);
13139 
13140 %}
13141 
13142 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13143 
13144   match(Set dst (MoveI2F src));
13145 
13146   effect(DEF dst, USE src);
13147 
13148   ins_cost(4 * INSN_COST);
13149 
13150   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13151 
13152   ins_encode %{
13153     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13154   %}
13155 
13156   ins_pipe(pipe_class_memory);
13157 
13158 %}
13159 
13160 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13161 
13162   match(Set dst (MoveD2L src));
13163 
13164   effect(DEF dst, USE src);
13165 
13166   ins_cost(4 * INSN_COST);
13167 
13168   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13169 
13170   ins_encode %{
13171     __ ldr($dst$$Register, Address(sp, $src$$disp));
13172   %}
13173 
13174   ins_pipe(iload_reg_reg);
13175 
13176 %}
13177 
13178 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13179 
13180   match(Set dst (MoveL2D src));
13181 
13182   effect(DEF dst, USE src);
13183 
13184   ins_cost(4 * INSN_COST);
13185 
13186   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13187 
13188   ins_encode %{
13189     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13190   %}
13191 
13192   ins_pipe(pipe_class_memory);
13193 
13194 %}
13195 
13196 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13197 
13198   match(Set dst (MoveF2I src));
13199 
13200   effect(DEF dst, USE src);
13201 
13202   ins_cost(INSN_COST);
13203 
13204   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13205 
13206   ins_encode %{
13207     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13208   %}
13209 
13210   ins_pipe(pipe_class_memory);
13211 
13212 %}
13213 
13214 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13215 
13216   match(Set dst (MoveI2F src));
13217 
13218   effect(DEF dst, USE src);
13219 
13220   ins_cost(INSN_COST);
13221 
13222   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13223 
13224   ins_encode %{
13225     __ strw($src$$Register, Address(sp, $dst$$disp));
13226   %}
13227 
13228   ins_pipe(istore_reg_reg);
13229 
13230 %}
13231 
13232 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13233 
13234   match(Set dst (MoveD2L src));
13235 
13236   effect(DEF dst, USE src);
13237 
13238   ins_cost(INSN_COST);
13239 
13240   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13241 
13242   ins_encode %{
13243     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13244   %}
13245 
13246   ins_pipe(pipe_class_memory);
13247 
13248 %}
13249 
13250 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13251 
13252   match(Set dst (MoveL2D src));
13253 
13254   effect(DEF dst, USE src);
13255 
13256   ins_cost(INSN_COST);
13257 
13258   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13259 
13260   ins_encode %{
13261     __ str($src$$Register, Address(sp, $dst$$disp));
13262   %}
13263 
13264   ins_pipe(istore_reg_reg);
13265 
13266 %}
13267 
13268 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13269 
13270   match(Set dst (MoveF2I src));
13271 
13272   effect(DEF dst, USE src);
13273 
13274   ins_cost(INSN_COST);
13275 
13276   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13277 
13278   ins_encode %{
13279     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13280   %}
13281 
13282   ins_pipe(fp_f2i);
13283 
13284 %}
13285 
13286 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13287 
13288   match(Set dst (MoveI2F src));
13289 
13290   effect(DEF dst, USE src);
13291 
13292   ins_cost(INSN_COST);
13293 
13294   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13295 
13296   ins_encode %{
13297     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13298   %}
13299 
13300   ins_pipe(fp_i2f);
13301 
13302 %}
13303 
13304 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13305 
13306   match(Set dst (MoveD2L src));
13307 
13308   effect(DEF dst, USE src);
13309 
13310   ins_cost(INSN_COST);
13311 
13312   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13313 
13314   ins_encode %{
13315     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13316   %}
13317 
13318   ins_pipe(fp_d2l);
13319 
13320 %}
13321 
13322 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13323 
13324   match(Set dst (MoveL2D src));
13325 
13326   effect(DEF dst, USE src);
13327 
13328   ins_cost(INSN_COST);
13329 
13330   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13331 
13332   ins_encode %{
13333     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13334   %}
13335 
13336   ins_pipe(fp_l2d);
13337 
13338 %}
13339 
13340 // ============================================================================
13341 // clearing of an array
13342 
13343 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13344 %{
13345   match(Set dummy (ClearArray cnt base));
13346   effect(USE_KILL cnt, USE_KILL base, KILL cr);
13347 
13348   ins_cost(4 * INSN_COST);
13349   format %{ "ClearArray $cnt, $base" %}
13350 
13351   ins_encode %{
13352     address tpc = __ zero_words($base$$Register, $cnt$$Register);
13353     if (tpc == NULL) {
13354       ciEnv::current()->record_failure("CodeCache is full");
13355       return;
13356     }
13357   %}
13358 
13359   ins_pipe(pipe_class_memory);
13360 %}
13361 
13362 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13363 %{
13364   predicate((u_int64_t)n->in(2)->get_long()
13365             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13366   match(Set dummy (ClearArray cnt base));
13367   effect(USE_KILL base);
13368 
13369   ins_cost(4 * INSN_COST);
13370   format %{ "ClearArray $cnt, $base" %}
13371 
13372   ins_encode %{
13373     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13374   %}
13375 
13376   ins_pipe(pipe_class_memory);
13377 %}
13378 
13379 // ============================================================================
13380 // Overflow Math Instructions
13381 
13382 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13383 %{
13384   match(Set cr (OverflowAddI op1 op2));
13385 
13386   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13387   ins_cost(INSN_COST);
13388   ins_encode %{
13389     __ cmnw($op1$$Register, $op2$$Register);
13390   %}
13391 
13392   ins_pipe(icmp_reg_reg);
13393 %}
13394 
13395 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13396 %{
13397   match(Set cr (OverflowAddI op1 op2));
13398 
13399   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13400   ins_cost(INSN_COST);
13401   ins_encode %{
13402     __ cmnw($op1$$Register, $op2$$constant);
13403   %}
13404 
13405   ins_pipe(icmp_reg_imm);
13406 %}
13407 
13408 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13409 %{
13410   match(Set cr (OverflowAddL op1 op2));
13411 
13412   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13413   ins_cost(INSN_COST);
13414   ins_encode %{
13415     __ cmn($op1$$Register, $op2$$Register);
13416   %}
13417 
13418   ins_pipe(icmp_reg_reg);
13419 %}
13420 
13421 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13422 %{
13423   match(Set cr (OverflowAddL op1 op2));
13424 
13425   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13426   ins_cost(INSN_COST);
13427   ins_encode %{
13428     __ cmn($op1$$Register, $op2$$constant);
13429   %}
13430 
13431   ins_pipe(icmp_reg_imm);
13432 %}
13433 
13434 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13435 %{
13436   match(Set cr (OverflowSubI op1 op2));
13437 
13438   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13439   ins_cost(INSN_COST);
13440   ins_encode %{
13441     __ cmpw($op1$$Register, $op2$$Register);
13442   %}
13443 
13444   ins_pipe(icmp_reg_reg);
13445 %}
13446 
13447 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13448 %{
13449   match(Set cr (OverflowSubI op1 op2));
13450 
13451   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13452   ins_cost(INSN_COST);
13453   ins_encode %{
13454     __ cmpw($op1$$Register, $op2$$constant);
13455   %}
13456 
13457   ins_pipe(icmp_reg_imm);
13458 %}
13459 
13460 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13461 %{
13462   match(Set cr (OverflowSubL op1 op2));
13463 
13464   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13465   ins_cost(INSN_COST);
13466   ins_encode %{
13467     __ cmp($op1$$Register, $op2$$Register);
13468   %}
13469 
13470   ins_pipe(icmp_reg_reg);
13471 %}
13472 
13473 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13474 %{
13475   match(Set cr (OverflowSubL op1 op2));
13476 
13477   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13478   ins_cost(INSN_COST);
13479   ins_encode %{
13480     __ cmp($op1$$Register, $op2$$constant);
13481   %}
13482 
13483   ins_pipe(icmp_reg_imm);
13484 %}
13485 
13486 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13487 %{
13488   match(Set cr (OverflowSubI zero op1));
13489 
13490   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13491   ins_cost(INSN_COST);
13492   ins_encode %{
13493     __ cmpw(zr, $op1$$Register);
13494   %}
13495 
13496   ins_pipe(icmp_reg_imm);
13497 %}
13498 
13499 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13500 %{
13501   match(Set cr (OverflowSubL zero op1));
13502 
13503   format %{ "cmp   zr, $op1\t# overflow check long" %}
13504   ins_cost(INSN_COST);
13505   ins_encode %{
13506     __ cmp(zr, $op1$$Register);
13507   %}
13508 
13509   ins_pipe(icmp_reg_imm);
13510 %}
13511 
13512 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13513 %{
13514   match(Set cr (OverflowMulI op1 op2));
13515 
13516   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13517             "cmp   rscratch1, rscratch1, sxtw\n\t"
13518             "movw  rscratch1, #0x80000000\n\t"
13519             "cselw rscratch1, rscratch1, zr, NE\n\t"
13520             "cmpw  rscratch1, #1" %}
13521   ins_cost(5 * INSN_COST);
13522   ins_encode %{
13523     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13524     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13525     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13526     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13527     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13528   %}
13529 
13530   ins_pipe(pipe_slow);
13531 %}
13532 
13533 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13534 %{
13535   match(If cmp (OverflowMulI op1 op2));
13536   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13537             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13538   effect(USE labl, KILL cr);
13539 
13540   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13541             "cmp   rscratch1, rscratch1, sxtw\n\t"
13542             "b$cmp   $labl" %}
13543   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13544   ins_encode %{
13545     Label* L = $labl$$label;
13546     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13547     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13548     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13549     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13550   %}
13551 
13552   ins_pipe(pipe_serial);
13553 %}
13554 
13555 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13556 %{
13557   match(Set cr (OverflowMulL op1 op2));
13558 
13559   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13560             "smulh rscratch2, $op1, $op2\n\t"
13561             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13562             "movw  rscratch1, #0x80000000\n\t"
13563             "cselw rscratch1, rscratch1, zr, NE\n\t"
13564             "cmpw  rscratch1, #1" %}
13565   ins_cost(6 * INSN_COST);
13566   ins_encode %{
13567     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13568     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13569     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13570     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13571     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13572     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13573   %}
13574 
13575   ins_pipe(pipe_slow);
13576 %}
13577 
13578 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13579 %{
13580   match(If cmp (OverflowMulL op1 op2));
13581   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13582             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13583   effect(USE labl, KILL cr);
13584 
13585   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13586             "smulh rscratch2, $op1, $op2\n\t"
13587             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13588             "b$cmp $labl" %}
13589   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13590   ins_encode %{
13591     Label* L = $labl$$label;
13592     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13593     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13594     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13595     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13596     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13597   %}
13598 
13599   ins_pipe(pipe_serial);
13600 %}
13601 
13602 // ============================================================================
13603 // Compare Instructions
13604 
13605 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13606 %{
13607   match(Set cr (CmpI op1 op2));
13608 
13609   effect(DEF cr, USE op1, USE op2);
13610 
13611   ins_cost(INSN_COST);
13612   format %{ "cmpw  $op1, $op2" %}
13613 
13614   ins_encode(aarch64_enc_cmpw(op1, op2));
13615 
13616   ins_pipe(icmp_reg_reg);
13617 %}
13618 
13619 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13620 %{
13621   match(Set cr (CmpI op1 zero));
13622 
13623   effect(DEF cr, USE op1);
13624 
13625   ins_cost(INSN_COST);
13626   format %{ "cmpw $op1, 0" %}
13627 
13628   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13629 
13630   ins_pipe(icmp_reg_imm);
13631 %}
13632 
13633 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13634 %{
13635   match(Set cr (CmpI op1 op2));
13636 
13637   effect(DEF cr, USE op1);
13638 
13639   ins_cost(INSN_COST);
13640   format %{ "cmpw  $op1, $op2" %}
13641 
13642   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13643 
13644   ins_pipe(icmp_reg_imm);
13645 %}
13646 
13647 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13648 %{
13649   match(Set cr (CmpI op1 op2));
13650 
13651   effect(DEF cr, USE op1);
13652 
13653   ins_cost(INSN_COST * 2);
13654   format %{ "cmpw  $op1, $op2" %}
13655 
13656   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13657 
13658   ins_pipe(icmp_reg_imm);
13659 %}
13660 
13661 // Unsigned compare Instructions; really, same as signed compare
13662 // except it should only be used to feed an If or a CMovI which takes a
13663 // cmpOpU.
13664 
13665 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13666 %{
13667   match(Set cr (CmpU op1 op2));
13668 
13669   effect(DEF cr, USE op1, USE op2);
13670 
13671   ins_cost(INSN_COST);
13672   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13673 
13674   ins_encode(aarch64_enc_cmpw(op1, op2));
13675 
13676   ins_pipe(icmp_reg_reg);
13677 %}
13678 
13679 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13680 %{
13681   match(Set cr (CmpU op1 zero));
13682 
13683   effect(DEF cr, USE op1);
13684 
13685   ins_cost(INSN_COST);
13686   format %{ "cmpw $op1, #0\t# unsigned" %}
13687 
13688   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13689 
13690   ins_pipe(icmp_reg_imm);
13691 %}
13692 
13693 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13694 %{
13695   match(Set cr (CmpU op1 op2));
13696 
13697   effect(DEF cr, USE op1);
13698 
13699   ins_cost(INSN_COST);
13700   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13701 
13702   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13703 
13704   ins_pipe(icmp_reg_imm);
13705 %}
13706 
13707 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13708 %{
13709   match(Set cr (CmpU op1 op2));
13710 
13711   effect(DEF cr, USE op1);
13712 
13713   ins_cost(INSN_COST * 2);
13714   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13715 
13716   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13717 
13718   ins_pipe(icmp_reg_imm);
13719 %}
13720 
13721 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13722 %{
13723   match(Set cr (CmpL op1 op2));
13724 
13725   effect(DEF cr, USE op1, USE op2);
13726 
13727   ins_cost(INSN_COST);
13728   format %{ "cmp  $op1, $op2" %}
13729 
13730   ins_encode(aarch64_enc_cmp(op1, op2));
13731 
13732   ins_pipe(icmp_reg_reg);
13733 %}
13734 
13735 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13736 %{
13737   match(Set cr (CmpL op1 zero));
13738 
13739   effect(DEF cr, USE op1);
13740 
13741   ins_cost(INSN_COST);
13742   format %{ "tst  $op1" %}
13743 
13744   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13745 
13746   ins_pipe(icmp_reg_imm);
13747 %}
13748 
13749 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13750 %{
13751   match(Set cr (CmpL op1 op2));
13752 
13753   effect(DEF cr, USE op1);
13754 
13755   ins_cost(INSN_COST);
13756   format %{ "cmp  $op1, $op2" %}
13757 
13758   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13759 
13760   ins_pipe(icmp_reg_imm);
13761 %}
13762 
13763 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13764 %{
13765   match(Set cr (CmpL op1 op2));
13766 
13767   effect(DEF cr, USE op1);
13768 
13769   ins_cost(INSN_COST * 2);
13770   format %{ "cmp  $op1, $op2" %}
13771 
13772   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13773 
13774   ins_pipe(icmp_reg_imm);
13775 %}
13776 
13777 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13778 %{
13779   match(Set cr (CmpUL op1 op2));
13780 
13781   effect(DEF cr, USE op1, USE op2);
13782 
13783   ins_cost(INSN_COST);
13784   format %{ "cmp  $op1, $op2" %}
13785 
13786   ins_encode(aarch64_enc_cmp(op1, op2));
13787 
13788   ins_pipe(icmp_reg_reg);
13789 %}
13790 
13791 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13792 %{
13793   match(Set cr (CmpUL op1 zero));
13794 
13795   effect(DEF cr, USE op1);
13796 
13797   ins_cost(INSN_COST);
13798   format %{ "tst  $op1" %}
13799 
13800   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13801 
13802   ins_pipe(icmp_reg_imm);
13803 %}
13804 
13805 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13806 %{
13807   match(Set cr (CmpUL op1 op2));
13808 
13809   effect(DEF cr, USE op1);
13810 
13811   ins_cost(INSN_COST);
13812   format %{ "cmp  $op1, $op2" %}
13813 
13814   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13815 
13816   ins_pipe(icmp_reg_imm);
13817 %}
13818 
13819 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13820 %{
13821   match(Set cr (CmpUL op1 op2));
13822 
13823   effect(DEF cr, USE op1);
13824 
13825   ins_cost(INSN_COST * 2);
13826   format %{ "cmp  $op1, $op2" %}
13827 
13828   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13829 
13830   ins_pipe(icmp_reg_imm);
13831 %}
13832 
13833 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13834 %{
13835   match(Set cr (CmpP op1 op2));
13836 
13837   effect(DEF cr, USE op1, USE op2);
13838 
13839   ins_cost(INSN_COST);
13840   format %{ "cmp  $op1, $op2\t // ptr" %}
13841 
13842   ins_encode(aarch64_enc_cmpp(op1, op2));
13843 
13844   ins_pipe(icmp_reg_reg);
13845 %}
13846 
13847 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13848 %{
13849   match(Set cr (CmpN op1 op2));
13850 
13851   effect(DEF cr, USE op1, USE op2);
13852 
13853   ins_cost(INSN_COST);
13854   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13855 
13856   ins_encode(aarch64_enc_cmpn(op1, op2));
13857 
13858   ins_pipe(icmp_reg_reg);
13859 %}
13860 
13861 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13862 %{
13863   match(Set cr (CmpP op1 zero));
13864 
13865   effect(DEF cr, USE op1, USE zero);
13866 
13867   ins_cost(INSN_COST);
13868   format %{ "cmp  $op1, 0\t // ptr" %}
13869 
13870   ins_encode(aarch64_enc_testp(op1));
13871 
13872   ins_pipe(icmp_reg_imm);
13873 %}
13874 
13875 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13876 %{
13877   match(Set cr (CmpN op1 zero));
13878 
13879   effect(DEF cr, USE op1, USE zero);
13880 
13881   ins_cost(INSN_COST);
13882   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13883 
13884   ins_encode(aarch64_enc_testn(op1));
13885 
13886   ins_pipe(icmp_reg_imm);
13887 %}
13888 
13889 // FP comparisons
13890 //
13891 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13892 // using normal cmpOp. See declaration of rFlagsReg for details.
13893 
13894 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13895 %{
13896   match(Set cr (CmpF src1 src2));
13897 
13898   ins_cost(3 * INSN_COST);
13899   format %{ "fcmps $src1, $src2" %}
13900 
13901   ins_encode %{
13902     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13903   %}
13904 
13905   ins_pipe(pipe_class_compare);
13906 %}
13907 
13908 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13909 %{
13910   match(Set cr (CmpF src1 src2));
13911 
13912   ins_cost(3 * INSN_COST);
13913   format %{ "fcmps $src1, 0.0" %}
13914 
13915   ins_encode %{
13916     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
13917   %}
13918 
13919   ins_pipe(pipe_class_compare);
13920 %}
13921 // FROM HERE
13922 
13923 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13924 %{
13925   match(Set cr (CmpD src1 src2));
13926 
13927   ins_cost(3 * INSN_COST);
13928   format %{ "fcmpd $src1, $src2" %}
13929 
13930   ins_encode %{
13931     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13932   %}
13933 
13934   ins_pipe(pipe_class_compare);
13935 %}
13936 
13937 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13938 %{
13939   match(Set cr (CmpD src1 src2));
13940 
13941   ins_cost(3 * INSN_COST);
13942   format %{ "fcmpd $src1, 0.0" %}
13943 
13944   ins_encode %{
13945     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
13946   %}
13947 
13948   ins_pipe(pipe_class_compare);
13949 %}
13950 
13951 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13952 %{
13953   match(Set dst (CmpF3 src1 src2));
13954   effect(KILL cr);
13955 
13956   ins_cost(5 * INSN_COST);
13957   format %{ "fcmps $src1, $src2\n\t"
13958             "csinvw($dst, zr, zr, eq\n\t"
13959             "csnegw($dst, $dst, $dst, lt)"
13960   %}
13961 
13962   ins_encode %{
13963     Label done;
13964     FloatRegister s1 = as_FloatRegister($src1$$reg);
13965     FloatRegister s2 = as_FloatRegister($src2$$reg);
13966     Register d = as_Register($dst$$reg);
13967     __ fcmps(s1, s2);
13968     // installs 0 if EQ else -1
13969     __ csinvw(d, zr, zr, Assembler::EQ);
13970     // keeps -1 if less or unordered else installs 1
13971     __ csnegw(d, d, d, Assembler::LT);
13972     __ bind(done);
13973   %}
13974 
13975   ins_pipe(pipe_class_default);
13976 
13977 %}
13978 
13979 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13980 %{
13981   match(Set dst (CmpD3 src1 src2));
13982   effect(KILL cr);
13983 
13984   ins_cost(5 * INSN_COST);
13985   format %{ "fcmpd $src1, $src2\n\t"
13986             "csinvw($dst, zr, zr, eq\n\t"
13987             "csnegw($dst, $dst, $dst, lt)"
13988   %}
13989 
13990   ins_encode %{
13991     Label done;
13992     FloatRegister s1 = as_FloatRegister($src1$$reg);
13993     FloatRegister s2 = as_FloatRegister($src2$$reg);
13994     Register d = as_Register($dst$$reg);
13995     __ fcmpd(s1, s2);
13996     // installs 0 if EQ else -1
13997     __ csinvw(d, zr, zr, Assembler::EQ);
13998     // keeps -1 if less or unordered else installs 1
13999     __ csnegw(d, d, d, Assembler::LT);
14000     __ bind(done);
14001   %}
14002   ins_pipe(pipe_class_default);
14003 
14004 %}
14005 
14006 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14007 %{
14008   match(Set dst (CmpF3 src1 zero));
14009   effect(KILL cr);
14010 
14011   ins_cost(5 * INSN_COST);
14012   format %{ "fcmps $src1, 0.0\n\t"
14013             "csinvw($dst, zr, zr, eq\n\t"
14014             "csnegw($dst, $dst, $dst, lt)"
14015   %}
14016 
14017   ins_encode %{
14018     Label done;
14019     FloatRegister s1 = as_FloatRegister($src1$$reg);
14020     Register d = as_Register($dst$$reg);
14021     __ fcmps(s1, 0.0);
14022     // installs 0 if EQ else -1
14023     __ csinvw(d, zr, zr, Assembler::EQ);
14024     // keeps -1 if less or unordered else installs 1
14025     __ csnegw(d, d, d, Assembler::LT);
14026     __ bind(done);
14027   %}
14028 
14029   ins_pipe(pipe_class_default);
14030 
14031 %}
14032 
14033 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14034 %{
14035   match(Set dst (CmpD3 src1 zero));
14036   effect(KILL cr);
14037 
14038   ins_cost(5 * INSN_COST);
14039   format %{ "fcmpd $src1, 0.0\n\t"
14040             "csinvw($dst, zr, zr, eq\n\t"
14041             "csnegw($dst, $dst, $dst, lt)"
14042   %}
14043 
14044   ins_encode %{
14045     Label done;
14046     FloatRegister s1 = as_FloatRegister($src1$$reg);
14047     Register d = as_Register($dst$$reg);
14048     __ fcmpd(s1, 0.0);
14049     // installs 0 if EQ else -1
14050     __ csinvw(d, zr, zr, Assembler::EQ);
14051     // keeps -1 if less or unordered else installs 1
14052     __ csnegw(d, d, d, Assembler::LT);
14053     __ bind(done);
14054   %}
14055   ins_pipe(pipe_class_default);
14056 
14057 %}
14058 
14059 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14060 %{
14061   match(Set dst (CmpLTMask p q));
14062   effect(KILL cr);
14063 
14064   ins_cost(3 * INSN_COST);
14065 
14066   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14067             "csetw $dst, lt\n\t"
14068             "subw $dst, zr, $dst"
14069   %}
14070 
14071   ins_encode %{
14072     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14073     __ csetw(as_Register($dst$$reg), Assembler::LT);
14074     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14075   %}
14076 
14077   ins_pipe(ialu_reg_reg);
14078 %}
14079 
14080 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14081 %{
14082   match(Set dst (CmpLTMask src zero));
14083   effect(KILL cr);
14084 
14085   ins_cost(INSN_COST);
14086 
14087   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14088 
14089   ins_encode %{
14090     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14091   %}
14092 
14093   ins_pipe(ialu_reg_shift);
14094 %}
14095 
14096 // ============================================================================
14097 // Max and Min
14098 
14099 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14100 %{
14101   effect( DEF dst, USE src1, USE src2, USE cr );
14102 
14103   ins_cost(INSN_COST * 2);
14104   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14105 
14106   ins_encode %{
14107     __ cselw(as_Register($dst$$reg),
14108              as_Register($src1$$reg),
14109              as_Register($src2$$reg),
14110              Assembler::LT);
14111   %}
14112 
14113   ins_pipe(icond_reg_reg);
14114 %}
14115 
14116 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14117 %{
14118   match(Set dst (MinI src1 src2));
14119   ins_cost(INSN_COST * 3);
14120 
14121   expand %{
14122     rFlagsReg cr;
14123     compI_reg_reg(cr, src1, src2);
14124     cmovI_reg_reg_lt(dst, src1, src2, cr);
14125   %}
14126 
14127 %}
14128 // FROM HERE
14129 
14130 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14131 %{
14132   effect( DEF dst, USE src1, USE src2, USE cr );
14133 
14134   ins_cost(INSN_COST * 2);
14135   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14136 
14137   ins_encode %{
14138     __ cselw(as_Register($dst$$reg),
14139              as_Register($src1$$reg),
14140              as_Register($src2$$reg),
14141              Assembler::GT);
14142   %}
14143 
14144   ins_pipe(icond_reg_reg);
14145 %}
14146 
14147 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14148 %{
14149   match(Set dst (MaxI src1 src2));
14150   ins_cost(INSN_COST * 3);
14151   expand %{
14152     rFlagsReg cr;
14153     compI_reg_reg(cr, src1, src2);
14154     cmovI_reg_reg_gt(dst, src1, src2, cr);
14155   %}
14156 %}
14157 
14158 // ============================================================================
14159 // Branch Instructions
14160 
14161 // Direct Branch.
14162 instruct branch(label lbl)
14163 %{
14164   match(Goto);
14165 
14166   effect(USE lbl);
14167 
14168   ins_cost(BRANCH_COST);
14169   format %{ "b  $lbl" %}
14170 
14171   ins_encode(aarch64_enc_b(lbl));
14172 
14173   ins_pipe(pipe_branch);
14174 %}
14175 
14176 // Conditional Near Branch
14177 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14178 %{
14179   // Same match rule as `branchConFar'.
14180   match(If cmp cr);
14181 
14182   effect(USE lbl);
14183 
14184   ins_cost(BRANCH_COST);
14185   // If set to 1 this indicates that the current instruction is a
14186   // short variant of a long branch. This avoids using this
14187   // instruction in first-pass matching. It will then only be used in
14188   // the `Shorten_branches' pass.
14189   // ins_short_branch(1);
14190   format %{ "b$cmp  $lbl" %}
14191 
14192   ins_encode(aarch64_enc_br_con(cmp, lbl));
14193 
14194   ins_pipe(pipe_branch_cond);
14195 %}
14196 
14197 // Conditional Near Branch Unsigned
14198 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14199 %{
14200   // Same match rule as `branchConFar'.
14201   match(If cmp cr);
14202 
14203   effect(USE lbl);
14204 
14205   ins_cost(BRANCH_COST);
14206   // If set to 1 this indicates that the current instruction is a
14207   // short variant of a long branch. This avoids using this
14208   // instruction in first-pass matching. It will then only be used in
14209   // the `Shorten_branches' pass.
14210   // ins_short_branch(1);
14211   format %{ "b$cmp  $lbl\t# unsigned" %}
14212 
14213   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14214 
14215   ins_pipe(pipe_branch_cond);
14216 %}
14217 
14218 // Make use of CBZ and CBNZ.  These instructions, as well as being
14219 // shorter than (cmp; branch), have the additional benefit of not
14220 // killing the flags.
14221 
14222 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14223   match(If cmp (CmpI op1 op2));
14224   effect(USE labl);
14225 
14226   ins_cost(BRANCH_COST);
14227   format %{ "cbw$cmp   $op1, $labl" %}
14228   ins_encode %{
14229     Label* L = $labl$$label;
14230     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14231     if (cond == Assembler::EQ)
14232       __ cbzw($op1$$Register, *L);
14233     else
14234       __ cbnzw($op1$$Register, *L);
14235   %}
14236   ins_pipe(pipe_cmp_branch);
14237 %}
14238 
14239 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14240   match(If cmp (CmpL op1 op2));
14241   effect(USE labl);
14242 
14243   ins_cost(BRANCH_COST);
14244   format %{ "cb$cmp   $op1, $labl" %}
14245   ins_encode %{
14246     Label* L = $labl$$label;
14247     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14248     if (cond == Assembler::EQ)
14249       __ cbz($op1$$Register, *L);
14250     else
14251       __ cbnz($op1$$Register, *L);
14252   %}
14253   ins_pipe(pipe_cmp_branch);
14254 %}
14255 
14256 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14257   match(If cmp (CmpP op1 op2));
14258   effect(USE labl);
14259 
14260   ins_cost(BRANCH_COST);
14261   format %{ "cb$cmp   $op1, $labl" %}
14262   ins_encode %{
14263     Label* L = $labl$$label;
14264     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14265     if (cond == Assembler::EQ)
14266       __ cbz($op1$$Register, *L);
14267     else
14268       __ cbnz($op1$$Register, *L);
14269   %}
14270   ins_pipe(pipe_cmp_branch);
14271 %}
14272 
14273 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14274   match(If cmp (CmpN op1 op2));
14275   effect(USE labl);
14276 
14277   ins_cost(BRANCH_COST);
14278   format %{ "cbw$cmp   $op1, $labl" %}
14279   ins_encode %{
14280     Label* L = $labl$$label;
14281     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14282     if (cond == Assembler::EQ)
14283       __ cbzw($op1$$Register, *L);
14284     else
14285       __ cbnzw($op1$$Register, *L);
14286   %}
14287   ins_pipe(pipe_cmp_branch);
14288 %}
14289 
14290 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14291   match(If cmp (CmpP (DecodeN oop) zero));
14292   effect(USE labl);
14293 
14294   ins_cost(BRANCH_COST);
14295   format %{ "cb$cmp   $oop, $labl" %}
14296   ins_encode %{
14297     Label* L = $labl$$label;
14298     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14299     if (cond == Assembler::EQ)
14300       __ cbzw($oop$$Register, *L);
14301     else
14302       __ cbnzw($oop$$Register, *L);
14303   %}
14304   ins_pipe(pipe_cmp_branch);
14305 %}
14306 
14307 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14308   match(If cmp (CmpU op1 op2));
14309   effect(USE labl);
14310 
14311   ins_cost(BRANCH_COST);
14312   format %{ "cbw$cmp   $op1, $labl" %}
14313   ins_encode %{
14314     Label* L = $labl$$label;
14315     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14316     if (cond == Assembler::EQ || cond == Assembler::LS)
14317       __ cbzw($op1$$Register, *L);
14318     else
14319       __ cbnzw($op1$$Register, *L);
14320   %}
14321   ins_pipe(pipe_cmp_branch);
14322 %}
14323 
14324 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14325   match(If cmp (CmpUL op1 op2));
14326   effect(USE labl);
14327 
14328   ins_cost(BRANCH_COST);
14329   format %{ "cb$cmp   $op1, $labl" %}
14330   ins_encode %{
14331     Label* L = $labl$$label;
14332     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14333     if (cond == Assembler::EQ || cond == Assembler::LS)
14334       __ cbz($op1$$Register, *L);
14335     else
14336       __ cbnz($op1$$Register, *L);
14337   %}
14338   ins_pipe(pipe_cmp_branch);
14339 %}
14340 
14341 // Test bit and Branch
14342 
14343 // Patterns for short (< 32KiB) variants
14344 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14345   match(If cmp (CmpL op1 op2));
14346   effect(USE labl);
14347 
14348   ins_cost(BRANCH_COST);
14349   format %{ "cb$cmp   $op1, $labl # long" %}
14350   ins_encode %{
14351     Label* L = $labl$$label;
14352     Assembler::Condition cond =
14353       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14354     __ tbr(cond, $op1$$Register, 63, *L);
14355   %}
14356   ins_pipe(pipe_cmp_branch);
14357   ins_short_branch(1);
14358 %}
14359 
14360 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14361   match(If cmp (CmpI op1 op2));
14362   effect(USE labl);
14363 
14364   ins_cost(BRANCH_COST);
14365   format %{ "cb$cmp   $op1, $labl # int" %}
14366   ins_encode %{
14367     Label* L = $labl$$label;
14368     Assembler::Condition cond =
14369       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14370     __ tbr(cond, $op1$$Register, 31, *L);
14371   %}
14372   ins_pipe(pipe_cmp_branch);
14373   ins_short_branch(1);
14374 %}
14375 
14376 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14377   match(If cmp (CmpL (AndL op1 op2) op3));
14378   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14379   effect(USE labl);
14380 
14381   ins_cost(BRANCH_COST);
14382   format %{ "tb$cmp   $op1, $op2, $labl" %}
14383   ins_encode %{
14384     Label* L = $labl$$label;
14385     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14386     int bit = exact_log2($op2$$constant);
14387     __ tbr(cond, $op1$$Register, bit, *L);
14388   %}
14389   ins_pipe(pipe_cmp_branch);
14390   ins_short_branch(1);
14391 %}
14392 
14393 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14394   match(If cmp (CmpI (AndI op1 op2) op3));
14395   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14396   effect(USE labl);
14397 
14398   ins_cost(BRANCH_COST);
14399   format %{ "tb$cmp   $op1, $op2, $labl" %}
14400   ins_encode %{
14401     Label* L = $labl$$label;
14402     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14403     int bit = exact_log2($op2$$constant);
14404     __ tbr(cond, $op1$$Register, bit, *L);
14405   %}
14406   ins_pipe(pipe_cmp_branch);
14407   ins_short_branch(1);
14408 %}
14409 
14410 // And far variants
14411 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14412   match(If cmp (CmpL op1 op2));
14413   effect(USE labl);
14414 
14415   ins_cost(BRANCH_COST);
14416   format %{ "cb$cmp   $op1, $labl # long" %}
14417   ins_encode %{
14418     Label* L = $labl$$label;
14419     Assembler::Condition cond =
14420       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14421     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14422   %}
14423   ins_pipe(pipe_cmp_branch);
14424 %}
14425 
14426 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14427   match(If cmp (CmpI op1 op2));
14428   effect(USE labl);
14429 
14430   ins_cost(BRANCH_COST);
14431   format %{ "cb$cmp   $op1, $labl # int" %}
14432   ins_encode %{
14433     Label* L = $labl$$label;
14434     Assembler::Condition cond =
14435       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14436     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14437   %}
14438   ins_pipe(pipe_cmp_branch);
14439 %}
14440 
14441 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14442   match(If cmp (CmpL (AndL op1 op2) op3));
14443   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14444   effect(USE labl);
14445 
14446   ins_cost(BRANCH_COST);
14447   format %{ "tb$cmp   $op1, $op2, $labl" %}
14448   ins_encode %{
14449     Label* L = $labl$$label;
14450     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14451     int bit = exact_log2($op2$$constant);
14452     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14453   %}
14454   ins_pipe(pipe_cmp_branch);
14455 %}
14456 
14457 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14458   match(If cmp (CmpI (AndI op1 op2) op3));
14459   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14460   effect(USE labl);
14461 
14462   ins_cost(BRANCH_COST);
14463   format %{ "tb$cmp   $op1, $op2, $labl" %}
14464   ins_encode %{
14465     Label* L = $labl$$label;
14466     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14467     int bit = exact_log2($op2$$constant);
14468     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14469   %}
14470   ins_pipe(pipe_cmp_branch);
14471 %}
14472 
14473 // Test bits
14474 
14475 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14476   match(Set cr (CmpL (AndL op1 op2) op3));
14477   predicate(Assembler::operand_valid_for_logical_immediate
14478             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14479 
14480   ins_cost(INSN_COST);
14481   format %{ "tst $op1, $op2 # long" %}
14482   ins_encode %{
14483     __ tst($op1$$Register, $op2$$constant);
14484   %}
14485   ins_pipe(ialu_reg_reg);
14486 %}
14487 
14488 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14489   match(Set cr (CmpI (AndI op1 op2) op3));
14490   predicate(Assembler::operand_valid_for_logical_immediate
14491             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14492 
14493   ins_cost(INSN_COST);
14494   format %{ "tst $op1, $op2 # int" %}
14495   ins_encode %{
14496     __ tstw($op1$$Register, $op2$$constant);
14497   %}
14498   ins_pipe(ialu_reg_reg);
14499 %}
14500 
14501 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14502   match(Set cr (CmpL (AndL op1 op2) op3));
14503 
14504   ins_cost(INSN_COST);
14505   format %{ "tst $op1, $op2 # long" %}
14506   ins_encode %{
14507     __ tst($op1$$Register, $op2$$Register);
14508   %}
14509   ins_pipe(ialu_reg_reg);
14510 %}
14511 
14512 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14513   match(Set cr (CmpI (AndI op1 op2) op3));
14514 
14515   ins_cost(INSN_COST);
14516   format %{ "tstw $op1, $op2 # int" %}
14517   ins_encode %{
14518     __ tstw($op1$$Register, $op2$$Register);
14519   %}
14520   ins_pipe(ialu_reg_reg);
14521 %}
14522 
14523 
14524 // Conditional Far Branch
14525 // Conditional Far Branch Unsigned
14526 // TODO: fixme
14527 
14528 // counted loop end branch near
14529 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14530 %{
14531   match(CountedLoopEnd cmp cr);
14532 
14533   effect(USE lbl);
14534 
14535   ins_cost(BRANCH_COST);
14536   // short variant.
14537   // ins_short_branch(1);
14538   format %{ "b$cmp $lbl \t// counted loop end" %}
14539 
14540   ins_encode(aarch64_enc_br_con(cmp, lbl));
14541 
14542   ins_pipe(pipe_branch);
14543 %}
14544 
14545 // counted loop end branch near Unsigned
14546 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14547 %{
14548   match(CountedLoopEnd cmp cr);
14549 
14550   effect(USE lbl);
14551 
14552   ins_cost(BRANCH_COST);
14553   // short variant.
14554   // ins_short_branch(1);
14555   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14556 
14557   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14558 
14559   ins_pipe(pipe_branch);
14560 %}
14561 
14562 // counted loop end branch far
14563 // counted loop end branch far unsigned
14564 // TODO: fixme
14565 
14566 // ============================================================================
14567 // inlined locking and unlocking
14568 
14569 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14570 %{
14571   match(Set cr (FastLock object box));
14572   effect(TEMP tmp, TEMP tmp2);
14573 
14574   // TODO
14575   // identify correct cost
14576   ins_cost(5 * INSN_COST);
14577   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14578 
14579   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14580 
14581   ins_pipe(pipe_serial);
14582 %}
14583 
14584 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14585 %{
14586   match(Set cr (FastUnlock object box));
14587   effect(TEMP tmp, TEMP tmp2);
14588 
14589   ins_cost(5 * INSN_COST);
14590   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14591 
14592   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14593 
14594   ins_pipe(pipe_serial);
14595 %}
14596 
14597 
14598 // ============================================================================
14599 // Safepoint Instructions
14600 
14601 // TODO
14602 // provide a near and far version of this code
14603 
14604 instruct safePoint(rFlagsReg cr, iRegP poll)
14605 %{
14606   match(SafePoint poll);
14607   effect(KILL cr);
14608 
14609   format %{
14610     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14611   %}
14612   ins_encode %{
14613     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14614   %}
14615   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14616 %}
14617 
14618 
14619 // ============================================================================
14620 // Procedure Call/Return Instructions
14621 
14622 // Call Java Static Instruction
14623 
14624 instruct CallStaticJavaDirect(method meth)
14625 %{
14626   match(CallStaticJava);
14627 
14628   effect(USE meth);
14629 
14630   ins_cost(CALL_COST);
14631 
14632   format %{ "call,static $meth \t// ==> " %}
14633 
14634   ins_encode( aarch64_enc_java_static_call(meth),
14635               aarch64_enc_call_epilog );
14636 
14637   ins_pipe(pipe_class_call);
14638 %}
14639 
14640 // TO HERE
14641 
14642 // Call Java Dynamic Instruction
14643 instruct CallDynamicJavaDirect(method meth)
14644 %{
14645   match(CallDynamicJava);
14646 
14647   effect(USE meth);
14648 
14649   ins_cost(CALL_COST);
14650 
14651   format %{ "CALL,dynamic $meth \t// ==> " %}
14652 
14653   ins_encode( aarch64_enc_java_dynamic_call(meth),
14654                aarch64_enc_call_epilog );
14655 
14656   ins_pipe(pipe_class_call);
14657 %}
14658 
14659 // Call Runtime Instruction
14660 
14661 instruct CallRuntimeDirect(method meth)
14662 %{
14663   match(CallRuntime);
14664 
14665   effect(USE meth);
14666 
14667   ins_cost(CALL_COST);
14668 
14669   format %{ "CALL, runtime $meth" %}
14670 
14671   ins_encode( aarch64_enc_java_to_runtime(meth) );
14672 
14673   ins_pipe(pipe_class_call);
14674 %}
14675 
14676 // Call Runtime Instruction
14677 
14678 instruct CallLeafDirect(method meth)
14679 %{
14680   match(CallLeaf);
14681 
14682   effect(USE meth);
14683 
14684   ins_cost(CALL_COST);
14685 
14686   format %{ "CALL, runtime leaf $meth" %}
14687 
14688   ins_encode( aarch64_enc_java_to_runtime(meth) );
14689 
14690   ins_pipe(pipe_class_call);
14691 %}
14692 
14693 // Call Runtime Instruction
14694 
14695 instruct CallLeafNoFPDirect(method meth)
14696 %{
14697   match(CallLeafNoFP);
14698 
14699   effect(USE meth);
14700 
14701   ins_cost(CALL_COST);
14702 
14703   format %{ "CALL, runtime leaf nofp $meth" %}
14704 
14705   ins_encode( aarch64_enc_java_to_runtime(meth) );
14706 
14707   ins_pipe(pipe_class_call);
14708 %}
14709 
14710 // Tail Call; Jump from runtime stub to Java code.
14711 // Also known as an 'interprocedural jump'.
14712 // Target of jump will eventually return to caller.
14713 // TailJump below removes the return address.
14714 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14715 %{
14716   match(TailCall jump_target method_oop);
14717 
14718   ins_cost(CALL_COST);
14719 
14720   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14721 
14722   ins_encode(aarch64_enc_tail_call(jump_target));
14723 
14724   ins_pipe(pipe_class_call);
14725 %}
14726 
14727 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14728 %{
14729   match(TailJump jump_target ex_oop);
14730 
14731   ins_cost(CALL_COST);
14732 
14733   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14734 
14735   ins_encode(aarch64_enc_tail_jmp(jump_target));
14736 
14737   ins_pipe(pipe_class_call);
14738 %}
14739 
14740 // Create exception oop: created by stack-crawling runtime code.
14741 // Created exception is now available to this handler, and is setup
14742 // just prior to jumping to this handler. No code emitted.
14743 // TODO check
14744 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14745 instruct CreateException(iRegP_R0 ex_oop)
14746 %{
14747   match(Set ex_oop (CreateEx));
14748 
14749   format %{ " -- \t// exception oop; no code emitted" %}
14750 
14751   size(0);
14752 
14753   ins_encode( /*empty*/ );
14754 
14755   ins_pipe(pipe_class_empty);
14756 %}
14757 
14758 // Rethrow exception: The exception oop will come in the first
14759 // argument position. Then JUMP (not call) to the rethrow stub code.
14760 instruct RethrowException() %{
14761   match(Rethrow);
14762   ins_cost(CALL_COST);
14763 
14764   format %{ "b rethrow_stub" %}
14765 
14766   ins_encode( aarch64_enc_rethrow() );
14767 
14768   ins_pipe(pipe_class_call);
14769 %}
14770 
14771 
14772 // Return Instruction
14773 // epilog node loads ret address into lr as part of frame pop
14774 instruct Ret()
14775 %{
14776   match(Return);
14777 
14778   format %{ "ret\t// return register" %}
14779 
14780   ins_encode( aarch64_enc_ret() );
14781 
14782   ins_pipe(pipe_branch);
14783 %}
14784 
14785 // Die now.
14786 instruct ShouldNotReachHere() %{
14787   match(Halt);
14788 
14789   ins_cost(CALL_COST);
14790   format %{ "ShouldNotReachHere" %}
14791 
14792   ins_encode %{
14793     if (is_reachable()) {
14794       __ dpcs1(0xdead + 1);
14795     }
14796   %}
14797 
14798   ins_pipe(pipe_class_default);
14799 %}
14800 
14801 // ============================================================================
14802 // Partial Subtype Check
14803 //
14804 // superklass array for an instance of the superklass.  Set a hidden
14805 // internal cache on a hit (cache is checked with exposed code in
14806 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14807 // encoding ALSO sets flags.
14808 
14809 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14810 %{
14811   match(Set result (PartialSubtypeCheck sub super));
14812   effect(KILL cr, KILL temp);
14813 
14814   ins_cost(1100);  // slightly larger than the next version
14815   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14816 
14817   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14818 
14819   opcode(0x1); // Force zero of result reg on hit
14820 
14821   ins_pipe(pipe_class_memory);
14822 %}
14823 
14824 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14825 %{
14826   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14827   effect(KILL temp, KILL result);
14828 
14829   ins_cost(1100);  // slightly larger than the next version
14830   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14831 
14832   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14833 
14834   opcode(0x0); // Don't zero result reg on hit
14835 
14836   ins_pipe(pipe_class_memory);
14837 %}
14838 
14839 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14840                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14841 %{
14842   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14843   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14844   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14845 
14846   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14847   ins_encode %{
14848     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14849     __ string_compare($str1$$Register, $str2$$Register,
14850                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14851                       $tmp1$$Register, $tmp2$$Register,
14852                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14853   %}
14854   ins_pipe(pipe_class_memory);
14855 %}
14856 
14857 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14858                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14859 %{
14860   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14861   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14862   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14863 
14864   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14865   ins_encode %{
14866     __ string_compare($str1$$Register, $str2$$Register,
14867                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14868                       $tmp1$$Register, $tmp2$$Register,
14869                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14870   %}
14871   ins_pipe(pipe_class_memory);
14872 %}
14873 
14874 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14875                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14876                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14877 %{
14878   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14879   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14880   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14881          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14882 
14883   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14884   ins_encode %{
14885     __ string_compare($str1$$Register, $str2$$Register,
14886                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14887                       $tmp1$$Register, $tmp2$$Register,
14888                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14889                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14890   %}
14891   ins_pipe(pipe_class_memory);
14892 %}
14893 
14894 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14895                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14896                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14897 %{
14898   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14899   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14900   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14901          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14902 
14903   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14904   ins_encode %{
14905     __ string_compare($str1$$Register, $str2$$Register,
14906                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14907                       $tmp1$$Register, $tmp2$$Register,
14908                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14909                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14910   %}
14911   ins_pipe(pipe_class_memory);
14912 %}
14913 
14914 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14915        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14916        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14917 %{
14918   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14919   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14920   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14921          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14922   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14923 
14924   ins_encode %{
14925     __ string_indexof($str1$$Register, $str2$$Register,
14926                       $cnt1$$Register, $cnt2$$Register,
14927                       $tmp1$$Register, $tmp2$$Register,
14928                       $tmp3$$Register, $tmp4$$Register,
14929                       $tmp5$$Register, $tmp6$$Register,
14930                       -1, $result$$Register, StrIntrinsicNode::UU);
14931   %}
14932   ins_pipe(pipe_class_memory);
14933 %}
14934 
14935 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14936        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14937        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14938 %{
14939   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14940   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14941   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14942          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14943   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14944 
14945   ins_encode %{
14946     __ string_indexof($str1$$Register, $str2$$Register,
14947                       $cnt1$$Register, $cnt2$$Register,
14948                       $tmp1$$Register, $tmp2$$Register,
14949                       $tmp3$$Register, $tmp4$$Register,
14950                       $tmp5$$Register, $tmp6$$Register,
14951                       -1, $result$$Register, StrIntrinsicNode::LL);
14952   %}
14953   ins_pipe(pipe_class_memory);
14954 %}
14955 
14956 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14957        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14958        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14959 %{
14960   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14961   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14962   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14963          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14964   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14965 
14966   ins_encode %{
14967     __ string_indexof($str1$$Register, $str2$$Register,
14968                       $cnt1$$Register, $cnt2$$Register,
14969                       $tmp1$$Register, $tmp2$$Register,
14970                       $tmp3$$Register, $tmp4$$Register,
14971                       $tmp5$$Register, $tmp6$$Register,
14972                       -1, $result$$Register, StrIntrinsicNode::UL);
14973   %}
14974   ins_pipe(pipe_class_memory);
14975 %}
14976 
14977 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14978                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14979                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14980 %{
14981   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14982   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14983   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14984          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14985   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14986 
14987   ins_encode %{
14988     int icnt2 = (int)$int_cnt2$$constant;
14989     __ string_indexof($str1$$Register, $str2$$Register,
14990                       $cnt1$$Register, zr,
14991                       $tmp1$$Register, $tmp2$$Register,
14992                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14993                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14994   %}
14995   ins_pipe(pipe_class_memory);
14996 %}
14997 
14998 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14999                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15000                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15001 %{
15002   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15003   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15004   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15005          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15006   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15007 
15008   ins_encode %{
15009     int icnt2 = (int)$int_cnt2$$constant;
15010     __ string_indexof($str1$$Register, $str2$$Register,
15011                       $cnt1$$Register, zr,
15012                       $tmp1$$Register, $tmp2$$Register,
15013                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15014                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15015   %}
15016   ins_pipe(pipe_class_memory);
15017 %}
15018 
15019 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15020                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15021                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15022 %{
15023   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15024   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15025   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15026          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15027   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15028 
15029   ins_encode %{
15030     int icnt2 = (int)$int_cnt2$$constant;
15031     __ string_indexof($str1$$Register, $str2$$Register,
15032                       $cnt1$$Register, zr,
15033                       $tmp1$$Register, $tmp2$$Register,
15034                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15035                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15036   %}
15037   ins_pipe(pipe_class_memory);
15038 %}
15039 
15040 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15041                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15042                               iRegINoSp tmp3, rFlagsReg cr)
15043 %{
15044   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15045   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15046          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15047 
15048   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15049 
15050   ins_encode %{
15051     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15052                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15053                            $tmp3$$Register);
15054   %}
15055   ins_pipe(pipe_class_memory);
15056 %}
15057 
15058 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15059                         iRegI_R0 result, rFlagsReg cr)
15060 %{
15061   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15062   match(Set result (StrEquals (Binary str1 str2) cnt));
15063   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15064 
15065   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15066   ins_encode %{
15067     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15068     __ string_equals($str1$$Register, $str2$$Register,
15069                      $result$$Register, $cnt$$Register, 1);
15070   %}
15071   ins_pipe(pipe_class_memory);
15072 %}
15073 
15074 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15075                         iRegI_R0 result, rFlagsReg cr)
15076 %{
15077   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15078   match(Set result (StrEquals (Binary str1 str2) cnt));
15079   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15080 
15081   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15082   ins_encode %{
15083     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15084     __ string_equals($str1$$Register, $str2$$Register,
15085                      $result$$Register, $cnt$$Register, 2);
15086   %}
15087   ins_pipe(pipe_class_memory);
15088 %}
15089 
15090 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15091                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15092                        iRegP_R10 tmp, rFlagsReg cr)
15093 %{
15094   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15095   match(Set result (AryEq ary1 ary2));
15096   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15097 
15098   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15099   ins_encode %{
15100     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
15101                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15102                                    $result$$Register, $tmp$$Register, 1);
15103     if (tpc == NULL) {
15104       ciEnv::current()->record_failure("CodeCache is full");
15105       return;
15106     }
15107   %}
15108   ins_pipe(pipe_class_memory);
15109 %}
15110 
15111 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15112                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15113                        iRegP_R10 tmp, rFlagsReg cr)
15114 %{
15115   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15116   match(Set result (AryEq ary1 ary2));
15117   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15118 
15119   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15120   ins_encode %{
15121     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
15122                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15123                                    $result$$Register, $tmp$$Register, 2);
15124     if (tpc == NULL) {
15125       ciEnv::current()->record_failure("CodeCache is full");
15126       return;
15127     }
15128   %}
15129   ins_pipe(pipe_class_memory);
15130 %}
15131 
15132 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15133 %{
15134   match(Set result (HasNegatives ary1 len));
15135   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15136   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15137   ins_encode %{
15138     address tpc = __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15139     if (tpc == NULL) {
15140       ciEnv::current()->record_failure("CodeCache is full");
15141       return;
15142     }
15143   %}
15144   ins_pipe( pipe_slow );
15145 %}
15146 
15147 // fast char[] to byte[] compression
15148 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15149                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15150                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15151                          iRegI_R0 result, rFlagsReg cr)
15152 %{
15153   match(Set result (StrCompressedCopy src (Binary dst len)));
15154   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15155 
15156   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15157   ins_encode %{
15158     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15159                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15160                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15161                            $result$$Register);
15162   %}
15163   ins_pipe( pipe_slow );
15164 %}
15165 
15166 // fast byte[] to char[] inflation
15167 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15168                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15169 %{
15170   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15171   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15172 
15173   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15174   ins_encode %{
15175     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15176                                         $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15177                                         $tmp3$$FloatRegister, $tmp4$$Register);
15178     if (tpc == NULL) {
15179       ciEnv::current()->record_failure("CodeCache is full");
15180       return;
15181     }
15182   %}
15183   ins_pipe(pipe_class_memory);
15184 %}
15185 
15186 // encode char[] to byte[] in ISO_8859_1
15187 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15188                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15189                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15190                           iRegI_R0 result, rFlagsReg cr)
15191 %{
15192   match(Set result (EncodeISOArray src (Binary dst len)));
15193   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15194          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15195 
15196   format %{ "Encode array $src,$dst,$len -> $result" %}
15197   ins_encode %{
15198     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15199          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15200          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15201   %}
15202   ins_pipe( pipe_class_memory );
15203 %}
15204 
15205 // ============================================================================
15206 // This name is KNOWN by the ADLC and cannot be changed.
15207 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15208 // for this guy.
15209 instruct tlsLoadP(thread_RegP dst)
15210 %{
15211   match(Set dst (ThreadLocal));
15212 
15213   ins_cost(0);
15214 
15215   format %{ " -- \t// $dst=Thread::current(), empty" %}
15216 
15217   size(0);
15218 
15219   ins_encode( /*empty*/ );
15220 
15221   ins_pipe(pipe_class_empty);
15222 %}
15223 
15224 // ====================VECTOR INSTRUCTIONS=====================================
15225 
15226 // Load vector (32 bits)
15227 instruct loadV4(vecD dst, vmem4 mem)
15228 %{
15229   predicate(n->as_LoadVector()->memory_size() == 4);
15230   match(Set dst (LoadVector mem));
15231   ins_cost(4 * INSN_COST);
15232   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15233   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15234   ins_pipe(vload_reg_mem64);
15235 %}
15236 
15237 // Load vector (64 bits)
15238 instruct loadV8(vecD dst, vmem8 mem)
15239 %{
15240   predicate(n->as_LoadVector()->memory_size() == 8);
15241   match(Set dst (LoadVector mem));
15242   ins_cost(4 * INSN_COST);
15243   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15244   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15245   ins_pipe(vload_reg_mem64);
15246 %}
15247 
15248 // Load Vector (128 bits)
15249 instruct loadV16(vecX dst, vmem16 mem)
15250 %{
15251   predicate(n->as_LoadVector()->memory_size() == 16);
15252   match(Set dst (LoadVector mem));
15253   ins_cost(4 * INSN_COST);
15254   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15255   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15256   ins_pipe(vload_reg_mem128);
15257 %}
15258 
15259 // Store Vector (32 bits)
15260 instruct storeV4(vecD src, vmem4 mem)
15261 %{
15262   predicate(n->as_StoreVector()->memory_size() == 4);
15263   match(Set mem (StoreVector mem src));
15264   ins_cost(4 * INSN_COST);
15265   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15266   ins_encode( aarch64_enc_strvS(src, mem) );
15267   ins_pipe(vstore_reg_mem64);
15268 %}
15269 
15270 // Store Vector (64 bits)
15271 instruct storeV8(vecD src, vmem8 mem)
15272 %{
15273   predicate(n->as_StoreVector()->memory_size() == 8);
15274   match(Set mem (StoreVector mem src));
15275   ins_cost(4 * INSN_COST);
15276   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15277   ins_encode( aarch64_enc_strvD(src, mem) );
15278   ins_pipe(vstore_reg_mem64);
15279 %}
15280 
15281 // Store Vector (128 bits)
15282 instruct storeV16(vecX src, vmem16 mem)
15283 %{
15284   predicate(n->as_StoreVector()->memory_size() == 16);
15285   match(Set mem (StoreVector mem src));
15286   ins_cost(4 * INSN_COST);
15287   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15288   ins_encode( aarch64_enc_strvQ(src, mem) );
15289   ins_pipe(vstore_reg_mem128);
15290 %}
15291 
15292 instruct replicate8B(vecD dst, iRegIorL2I src)
15293 %{
15294   predicate(n->as_Vector()->length() == 4 ||
15295             n->as_Vector()->length() == 8);
15296   match(Set dst (ReplicateB src));
15297   ins_cost(INSN_COST);
15298   format %{ "dup  $dst, $src\t# vector (8B)" %}
15299   ins_encode %{
15300     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15301   %}
15302   ins_pipe(vdup_reg_reg64);
15303 %}
15304 
15305 instruct replicate16B(vecX dst, iRegIorL2I src)
15306 %{
15307   predicate(n->as_Vector()->length() == 16);
15308   match(Set dst (ReplicateB src));
15309   ins_cost(INSN_COST);
15310   format %{ "dup  $dst, $src\t# vector (16B)" %}
15311   ins_encode %{
15312     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15313   %}
15314   ins_pipe(vdup_reg_reg128);
15315 %}
15316 
15317 instruct replicate8B_imm(vecD dst, immI con)
15318 %{
15319   predicate(n->as_Vector()->length() == 4 ||
15320             n->as_Vector()->length() == 8);
15321   match(Set dst (ReplicateB con));
15322   ins_cost(INSN_COST);
15323   format %{ "movi  $dst, $con\t# vector(8B)" %}
15324   ins_encode %{
15325     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15326   %}
15327   ins_pipe(vmovi_reg_imm64);
15328 %}
15329 
15330 instruct replicate16B_imm(vecX dst, immI con)
15331 %{
15332   predicate(n->as_Vector()->length() == 16);
15333   match(Set dst (ReplicateB con));
15334   ins_cost(INSN_COST);
15335   format %{ "movi  $dst, $con\t# vector(16B)" %}
15336   ins_encode %{
15337     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15338   %}
15339   ins_pipe(vmovi_reg_imm128);
15340 %}
15341 
15342 instruct replicate4S(vecD dst, iRegIorL2I src)
15343 %{
15344   predicate(n->as_Vector()->length() == 2 ||
15345             n->as_Vector()->length() == 4);
15346   match(Set dst (ReplicateS src));
15347   ins_cost(INSN_COST);
15348   format %{ "dup  $dst, $src\t# vector (4S)" %}
15349   ins_encode %{
15350     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15351   %}
15352   ins_pipe(vdup_reg_reg64);
15353 %}
15354 
15355 instruct replicate8S(vecX dst, iRegIorL2I src)
15356 %{
15357   predicate(n->as_Vector()->length() == 8);
15358   match(Set dst (ReplicateS src));
15359   ins_cost(INSN_COST);
15360   format %{ "dup  $dst, $src\t# vector (8S)" %}
15361   ins_encode %{
15362     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15363   %}
15364   ins_pipe(vdup_reg_reg128);
15365 %}
15366 
15367 instruct replicate4S_imm(vecD dst, immI con)
15368 %{
15369   predicate(n->as_Vector()->length() == 2 ||
15370             n->as_Vector()->length() == 4);
15371   match(Set dst (ReplicateS con));
15372   ins_cost(INSN_COST);
15373   format %{ "movi  $dst, $con\t# vector(4H)" %}
15374   ins_encode %{
15375     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15376   %}
15377   ins_pipe(vmovi_reg_imm64);
15378 %}
15379 
15380 instruct replicate8S_imm(vecX dst, immI con)
15381 %{
15382   predicate(n->as_Vector()->length() == 8);
15383   match(Set dst (ReplicateS con));
15384   ins_cost(INSN_COST);
15385   format %{ "movi  $dst, $con\t# vector(8H)" %}
15386   ins_encode %{
15387     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15388   %}
15389   ins_pipe(vmovi_reg_imm128);
15390 %}
15391 
15392 instruct replicate2I(vecD dst, iRegIorL2I src)
15393 %{
15394   predicate(n->as_Vector()->length() == 2);
15395   match(Set dst (ReplicateI src));
15396   ins_cost(INSN_COST);
15397   format %{ "dup  $dst, $src\t# vector (2I)" %}
15398   ins_encode %{
15399     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15400   %}
15401   ins_pipe(vdup_reg_reg64);
15402 %}
15403 
15404 instruct replicate4I(vecX dst, iRegIorL2I src)
15405 %{
15406   predicate(n->as_Vector()->length() == 4);
15407   match(Set dst (ReplicateI src));
15408   ins_cost(INSN_COST);
15409   format %{ "dup  $dst, $src\t# vector (4I)" %}
15410   ins_encode %{
15411     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15412   %}
15413   ins_pipe(vdup_reg_reg128);
15414 %}
15415 
15416 instruct replicate2I_imm(vecD dst, immI con)
15417 %{
15418   predicate(n->as_Vector()->length() == 2);
15419   match(Set dst (ReplicateI con));
15420   ins_cost(INSN_COST);
15421   format %{ "movi  $dst, $con\t# vector(2I)" %}
15422   ins_encode %{
15423     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15424   %}
15425   ins_pipe(vmovi_reg_imm64);
15426 %}
15427 
15428 instruct replicate4I_imm(vecX dst, immI con)
15429 %{
15430   predicate(n->as_Vector()->length() == 4);
15431   match(Set dst (ReplicateI con));
15432   ins_cost(INSN_COST);
15433   format %{ "movi  $dst, $con\t# vector(4I)" %}
15434   ins_encode %{
15435     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15436   %}
15437   ins_pipe(vmovi_reg_imm128);
15438 %}
15439 
15440 instruct replicate2L(vecX dst, iRegL src)
15441 %{
15442   predicate(n->as_Vector()->length() == 2);
15443   match(Set dst (ReplicateL src));
15444   ins_cost(INSN_COST);
15445   format %{ "dup  $dst, $src\t# vector (2L)" %}
15446   ins_encode %{
15447     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15448   %}
15449   ins_pipe(vdup_reg_reg128);
15450 %}
15451 
15452 instruct replicate2L_zero(vecX dst, immI0 zero)
15453 %{
15454   predicate(n->as_Vector()->length() == 2);
15455   match(Set dst (ReplicateI zero));
15456   ins_cost(INSN_COST);
15457   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15458   ins_encode %{
15459     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15460            as_FloatRegister($dst$$reg),
15461            as_FloatRegister($dst$$reg));
15462   %}
15463   ins_pipe(vmovi_reg_imm128);
15464 %}
15465 
15466 instruct replicate2F(vecD dst, vRegF src)
15467 %{
15468   predicate(n->as_Vector()->length() == 2);
15469   match(Set dst (ReplicateF src));
15470   ins_cost(INSN_COST);
15471   format %{ "dup  $dst, $src\t# vector (2F)" %}
15472   ins_encode %{
15473     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15474            as_FloatRegister($src$$reg));
15475   %}
15476   ins_pipe(vdup_reg_freg64);
15477 %}
15478 
15479 instruct replicate4F(vecX dst, vRegF src)
15480 %{
15481   predicate(n->as_Vector()->length() == 4);
15482   match(Set dst (ReplicateF src));
15483   ins_cost(INSN_COST);
15484   format %{ "dup  $dst, $src\t# vector (4F)" %}
15485   ins_encode %{
15486     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15487            as_FloatRegister($src$$reg));
15488   %}
15489   ins_pipe(vdup_reg_freg128);
15490 %}
15491 
15492 instruct replicate2D(vecX dst, vRegD src)
15493 %{
15494   predicate(n->as_Vector()->length() == 2);
15495   match(Set dst (ReplicateD src));
15496   ins_cost(INSN_COST);
15497   format %{ "dup  $dst, $src\t# vector (2D)" %}
15498   ins_encode %{
15499     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15500            as_FloatRegister($src$$reg));
15501   %}
15502   ins_pipe(vdup_reg_dreg128);
15503 %}
15504 
15505 // ====================REDUCTION ARITHMETIC====================================
15506 
15507 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15508 %{
15509   match(Set dst (AddReductionVI src1 src2));
15510   ins_cost(INSN_COST);
15511   effect(TEMP tmp, TEMP tmp2);
15512   format %{ "umov  $tmp, $src2, S, 0\n\t"
15513             "umov  $tmp2, $src2, S, 1\n\t"
15514             "addw  $tmp, $src1, $tmp\n\t"
15515             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
15516   %}
15517   ins_encode %{
15518     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15519     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15520     __ addw($tmp$$Register, $src1$$Register, $tmp$$Register);
15521     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
15522   %}
15523   ins_pipe(pipe_class_default);
15524 %}
15525 
15526 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15527 %{
15528   match(Set dst (AddReductionVI src1 src2));
15529   ins_cost(INSN_COST);
15530   effect(TEMP tmp, TEMP tmp2);
15531   format %{ "addv  $tmp, T4S, $src2\n\t"
15532             "umov  $tmp2, $tmp, S, 0\n\t"
15533             "addw  $dst, $tmp2, $src1\t# add reduction4I"
15534   %}
15535   ins_encode %{
15536     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15537             as_FloatRegister($src2$$reg));
15538     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15539     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15540   %}
15541   ins_pipe(pipe_class_default);
15542 %}
15543 
15544 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15545 %{
15546   match(Set dst (MulReductionVI src1 src2));
15547   ins_cost(INSN_COST);
15548   effect(TEMP tmp, TEMP dst);
15549   format %{ "umov  $tmp, $src2, S, 0\n\t"
15550             "mul   $dst, $tmp, $src1\n\t"
15551             "umov  $tmp, $src2, S, 1\n\t"
15552             "mul   $dst, $tmp, $dst\t# mul reduction2I"
15553   %}
15554   ins_encode %{
15555     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15556     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15557     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15558     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15559   %}
15560   ins_pipe(pipe_class_default);
15561 %}
15562 
15563 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15564 %{
15565   match(Set dst (MulReductionVI src1 src2));
15566   ins_cost(INSN_COST);
15567   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15568   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15569             "mul   $tmp, $tmp, $src2\n\t"
15570             "umov  $tmp2, $tmp, S, 0\n\t"
15571             "mul   $dst, $tmp2, $src1\n\t"
15572             "umov  $tmp2, $tmp, S, 1\n\t"
15573             "mul   $dst, $tmp2, $dst\t# mul reduction4I"
15574   %}
15575   ins_encode %{
15576     __ ins(as_FloatRegister($tmp$$reg), __ D,
15577            as_FloatRegister($src2$$reg), 0, 1);
15578     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15579            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15580     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15581     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15582     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15583     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15584   %}
15585   ins_pipe(pipe_class_default);
15586 %}
15587 
15588 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15589 %{
15590   match(Set dst (AddReductionVF src1 src2));
15591   ins_cost(INSN_COST);
15592   effect(TEMP tmp, TEMP dst);
15593   format %{ "fadds $dst, $src1, $src2\n\t"
15594             "ins   $tmp, S, $src2, 0, 1\n\t"
15595             "fadds $dst, $dst, $tmp\t# add reduction2F"
15596   %}
15597   ins_encode %{
15598     __ fadds(as_FloatRegister($dst$$reg),
15599              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15600     __ ins(as_FloatRegister($tmp$$reg), __ S,
15601            as_FloatRegister($src2$$reg), 0, 1);
15602     __ fadds(as_FloatRegister($dst$$reg),
15603              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15604   %}
15605   ins_pipe(pipe_class_default);
15606 %}
15607 
15608 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15609 %{
15610   match(Set dst (AddReductionVF src1 src2));
15611   ins_cost(INSN_COST);
15612   effect(TEMP tmp, TEMP dst);
15613   format %{ "fadds $dst, $src1, $src2\n\t"
15614             "ins   $tmp, S, $src2, 0, 1\n\t"
15615             "fadds $dst, $dst, $tmp\n\t"
15616             "ins   $tmp, S, $src2, 0, 2\n\t"
15617             "fadds $dst, $dst, $tmp\n\t"
15618             "ins   $tmp, S, $src2, 0, 3\n\t"
15619             "fadds $dst, $dst, $tmp\t# add reduction4F"
15620   %}
15621   ins_encode %{
15622     __ fadds(as_FloatRegister($dst$$reg),
15623              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15624     __ ins(as_FloatRegister($tmp$$reg), __ S,
15625            as_FloatRegister($src2$$reg), 0, 1);
15626     __ fadds(as_FloatRegister($dst$$reg),
15627              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15628     __ ins(as_FloatRegister($tmp$$reg), __ S,
15629            as_FloatRegister($src2$$reg), 0, 2);
15630     __ fadds(as_FloatRegister($dst$$reg),
15631              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15632     __ ins(as_FloatRegister($tmp$$reg), __ S,
15633            as_FloatRegister($src2$$reg), 0, 3);
15634     __ fadds(as_FloatRegister($dst$$reg),
15635              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15636   %}
15637   ins_pipe(pipe_class_default);
15638 %}
15639 
15640 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15641 %{
15642   match(Set dst (MulReductionVF src1 src2));
15643   ins_cost(INSN_COST);
15644   effect(TEMP tmp, TEMP dst);
15645   format %{ "fmuls $dst, $src1, $src2\n\t"
15646             "ins   $tmp, S, $src2, 0, 1\n\t"
15647             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
15648   %}
15649   ins_encode %{
15650     __ fmuls(as_FloatRegister($dst$$reg),
15651              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15652     __ ins(as_FloatRegister($tmp$$reg), __ S,
15653            as_FloatRegister($src2$$reg), 0, 1);
15654     __ fmuls(as_FloatRegister($dst$$reg),
15655              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15656   %}
15657   ins_pipe(pipe_class_default);
15658 %}
15659 
15660 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15661 %{
15662   match(Set dst (MulReductionVF src1 src2));
15663   ins_cost(INSN_COST);
15664   effect(TEMP tmp, TEMP dst);
15665   format %{ "fmuls $dst, $src1, $src2\n\t"
15666             "ins   $tmp, S, $src2, 0, 1\n\t"
15667             "fmuls $dst, $dst, $tmp\n\t"
15668             "ins   $tmp, S, $src2, 0, 2\n\t"
15669             "fmuls $dst, $dst, $tmp\n\t"
15670             "ins   $tmp, S, $src2, 0, 3\n\t"
15671             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
15672   %}
15673   ins_encode %{
15674     __ fmuls(as_FloatRegister($dst$$reg),
15675              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15676     __ ins(as_FloatRegister($tmp$$reg), __ S,
15677            as_FloatRegister($src2$$reg), 0, 1);
15678     __ fmuls(as_FloatRegister($dst$$reg),
15679              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15680     __ ins(as_FloatRegister($tmp$$reg), __ S,
15681            as_FloatRegister($src2$$reg), 0, 2);
15682     __ fmuls(as_FloatRegister($dst$$reg),
15683              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15684     __ ins(as_FloatRegister($tmp$$reg), __ S,
15685            as_FloatRegister($src2$$reg), 0, 3);
15686     __ fmuls(as_FloatRegister($dst$$reg),
15687              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15688   %}
15689   ins_pipe(pipe_class_default);
15690 %}
15691 
15692 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15693 %{
15694   match(Set dst (AddReductionVD src1 src2));
15695   ins_cost(INSN_COST);
15696   effect(TEMP tmp, TEMP dst);
15697   format %{ "faddd $dst, $src1, $src2\n\t"
15698             "ins   $tmp, D, $src2, 0, 1\n\t"
15699             "faddd $dst, $dst, $tmp\t# add reduction2D"
15700   %}
15701   ins_encode %{
15702     __ faddd(as_FloatRegister($dst$$reg),
15703              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15704     __ ins(as_FloatRegister($tmp$$reg), __ D,
15705            as_FloatRegister($src2$$reg), 0, 1);
15706     __ faddd(as_FloatRegister($dst$$reg),
15707              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15708   %}
15709   ins_pipe(pipe_class_default);
15710 %}
15711 
15712 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15713 %{
15714   match(Set dst (MulReductionVD src1 src2));
15715   ins_cost(INSN_COST);
15716   effect(TEMP tmp, TEMP dst);
15717   format %{ "fmuld $dst, $src1, $src2\n\t"
15718             "ins   $tmp, D, $src2, 0, 1\n\t"
15719             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
15720   %}
15721   ins_encode %{
15722     __ fmuld(as_FloatRegister($dst$$reg),
15723              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15724     __ ins(as_FloatRegister($tmp$$reg), __ D,
15725            as_FloatRegister($src2$$reg), 0, 1);
15726     __ fmuld(as_FloatRegister($dst$$reg),
15727              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15728   %}
15729   ins_pipe(pipe_class_default);
15730 %}
15731 
15732 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
15733   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
15734   match(Set dst (MaxReductionV src1 src2));
15735   ins_cost(INSN_COST);
15736   effect(TEMP_DEF dst, TEMP tmp);
15737   format %{ "fmaxs $dst, $src1, $src2\n\t"
15738             "ins   $tmp, S, $src2, 0, 1\n\t"
15739             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
15740   ins_encode %{
15741     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15742     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
15743     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15744   %}
15745   ins_pipe(pipe_class_default);
15746 %}
15747 
15748 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
15749   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
15750   match(Set dst (MaxReductionV src1 src2));
15751   ins_cost(INSN_COST);
15752   effect(TEMP_DEF dst);
15753   format %{ "fmaxv $dst, T4S, $src2\n\t"
15754             "fmaxs $dst, $dst, $src1\t# max reduction4F" %}
15755   ins_encode %{
15756     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
15757     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
15758   %}
15759   ins_pipe(pipe_class_default);
15760 %}
15761 
15762 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
15763   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
15764   match(Set dst (MaxReductionV src1 src2));
15765   ins_cost(INSN_COST);
15766   effect(TEMP_DEF dst, TEMP tmp);
15767   format %{ "fmaxd $dst, $src1, $src2\n\t"
15768             "ins   $tmp, D, $src2, 0, 1\n\t"
15769             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
15770   ins_encode %{
15771     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15772     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
15773     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15774   %}
15775   ins_pipe(pipe_class_default);
15776 %}
15777 
15778 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
15779   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
15780   match(Set dst (MinReductionV src1 src2));
15781   ins_cost(INSN_COST);
15782   effect(TEMP_DEF dst, TEMP tmp);
15783   format %{ "fmins $dst, $src1, $src2\n\t"
15784             "ins   $tmp, S, $src2, 0, 1\n\t"
15785             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
15786   ins_encode %{
15787     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15788     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
15789     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15790   %}
15791   ins_pipe(pipe_class_default);
15792 %}
15793 
15794 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
15795   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
15796   match(Set dst (MinReductionV src1 src2));
15797   ins_cost(INSN_COST);
15798   effect(TEMP_DEF dst);
15799   format %{ "fminv $dst, T4S, $src2\n\t"
15800             "fmins $dst, $dst, $src1\t# min reduction4F" %}
15801   ins_encode %{
15802     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
15803     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
15804   %}
15805   ins_pipe(pipe_class_default);
15806 %}
15807 
15808 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
15809   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
15810   match(Set dst (MinReductionV src1 src2));
15811   ins_cost(INSN_COST);
15812   effect(TEMP_DEF dst, TEMP tmp);
15813   format %{ "fmind $dst, $src1, $src2\n\t"
15814             "ins   $tmp, D, $src2, 0, 1\n\t"
15815             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
15816   ins_encode %{
15817     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15818     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
15819     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15820   %}
15821   ins_pipe(pipe_class_default);
15822 %}
15823 
15824 // ====================VECTOR ARITHMETIC=======================================
15825 
15826 // --------------------------------- ADD --------------------------------------
15827 
15828 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15829 %{
15830   predicate(n->as_Vector()->length() == 4 ||
15831             n->as_Vector()->length() == 8);
15832   match(Set dst (AddVB src1 src2));
15833   ins_cost(INSN_COST);
15834   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15835   ins_encode %{
15836     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15837             as_FloatRegister($src1$$reg),
15838             as_FloatRegister($src2$$reg));
15839   %}
15840   ins_pipe(vdop64);
15841 %}
15842 
15843 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15844 %{
15845   predicate(n->as_Vector()->length() == 16);
15846   match(Set dst (AddVB src1 src2));
15847   ins_cost(INSN_COST);
15848   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15849   ins_encode %{
15850     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15851             as_FloatRegister($src1$$reg),
15852             as_FloatRegister($src2$$reg));
15853   %}
15854   ins_pipe(vdop128);
15855 %}
15856 
15857 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15858 %{
15859   predicate(n->as_Vector()->length() == 2 ||
15860             n->as_Vector()->length() == 4);
15861   match(Set dst (AddVS src1 src2));
15862   ins_cost(INSN_COST);
15863   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15864   ins_encode %{
15865     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15866             as_FloatRegister($src1$$reg),
15867             as_FloatRegister($src2$$reg));
15868   %}
15869   ins_pipe(vdop64);
15870 %}
15871 
15872 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15873 %{
15874   predicate(n->as_Vector()->length() == 8);
15875   match(Set dst (AddVS src1 src2));
15876   ins_cost(INSN_COST);
15877   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15878   ins_encode %{
15879     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15880             as_FloatRegister($src1$$reg),
15881             as_FloatRegister($src2$$reg));
15882   %}
15883   ins_pipe(vdop128);
15884 %}
15885 
15886 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15887 %{
15888   predicate(n->as_Vector()->length() == 2);
15889   match(Set dst (AddVI src1 src2));
15890   ins_cost(INSN_COST);
15891   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15892   ins_encode %{
15893     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15894             as_FloatRegister($src1$$reg),
15895             as_FloatRegister($src2$$reg));
15896   %}
15897   ins_pipe(vdop64);
15898 %}
15899 
15900 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15901 %{
15902   predicate(n->as_Vector()->length() == 4);
15903   match(Set dst (AddVI src1 src2));
15904   ins_cost(INSN_COST);
15905   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15906   ins_encode %{
15907     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15908             as_FloatRegister($src1$$reg),
15909             as_FloatRegister($src2$$reg));
15910   %}
15911   ins_pipe(vdop128);
15912 %}
15913 
15914 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15915 %{
15916   predicate(n->as_Vector()->length() == 2);
15917   match(Set dst (AddVL src1 src2));
15918   ins_cost(INSN_COST);
15919   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15920   ins_encode %{
15921     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15922             as_FloatRegister($src1$$reg),
15923             as_FloatRegister($src2$$reg));
15924   %}
15925   ins_pipe(vdop128);
15926 %}
15927 
15928 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15929 %{
15930   predicate(n->as_Vector()->length() == 2);
15931   match(Set dst (AddVF src1 src2));
15932   ins_cost(INSN_COST);
15933   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15934   ins_encode %{
15935     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15936             as_FloatRegister($src1$$reg),
15937             as_FloatRegister($src2$$reg));
15938   %}
15939   ins_pipe(vdop_fp64);
15940 %}
15941 
15942 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15943 %{
15944   predicate(n->as_Vector()->length() == 4);
15945   match(Set dst (AddVF src1 src2));
15946   ins_cost(INSN_COST);
15947   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15948   ins_encode %{
15949     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15950             as_FloatRegister($src1$$reg),
15951             as_FloatRegister($src2$$reg));
15952   %}
15953   ins_pipe(vdop_fp128);
15954 %}
15955 
15956 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15957 %{
15958   match(Set dst (AddVD src1 src2));
15959   ins_cost(INSN_COST);
15960   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15961   ins_encode %{
15962     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15963             as_FloatRegister($src1$$reg),
15964             as_FloatRegister($src2$$reg));
15965   %}
15966   ins_pipe(vdop_fp128);
15967 %}
15968 
15969 // --------------------------------- SUB --------------------------------------
15970 
15971 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15972 %{
15973   predicate(n->as_Vector()->length() == 4 ||
15974             n->as_Vector()->length() == 8);
15975   match(Set dst (SubVB src1 src2));
15976   ins_cost(INSN_COST);
15977   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15978   ins_encode %{
15979     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15980             as_FloatRegister($src1$$reg),
15981             as_FloatRegister($src2$$reg));
15982   %}
15983   ins_pipe(vdop64);
15984 %}
15985 
15986 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15987 %{
15988   predicate(n->as_Vector()->length() == 16);
15989   match(Set dst (SubVB src1 src2));
15990   ins_cost(INSN_COST);
15991   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15992   ins_encode %{
15993     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15994             as_FloatRegister($src1$$reg),
15995             as_FloatRegister($src2$$reg));
15996   %}
15997   ins_pipe(vdop128);
15998 %}
15999 
16000 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16001 %{
16002   predicate(n->as_Vector()->length() == 2 ||
16003             n->as_Vector()->length() == 4);
16004   match(Set dst (SubVS src1 src2));
16005   ins_cost(INSN_COST);
16006   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16007   ins_encode %{
16008     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16009             as_FloatRegister($src1$$reg),
16010             as_FloatRegister($src2$$reg));
16011   %}
16012   ins_pipe(vdop64);
16013 %}
16014 
16015 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16016 %{
16017   predicate(n->as_Vector()->length() == 8);
16018   match(Set dst (SubVS src1 src2));
16019   ins_cost(INSN_COST);
16020   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16021   ins_encode %{
16022     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16023             as_FloatRegister($src1$$reg),
16024             as_FloatRegister($src2$$reg));
16025   %}
16026   ins_pipe(vdop128);
16027 %}
16028 
16029 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16030 %{
16031   predicate(n->as_Vector()->length() == 2);
16032   match(Set dst (SubVI src1 src2));
16033   ins_cost(INSN_COST);
16034   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16035   ins_encode %{
16036     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16037             as_FloatRegister($src1$$reg),
16038             as_FloatRegister($src2$$reg));
16039   %}
16040   ins_pipe(vdop64);
16041 %}
16042 
16043 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16044 %{
16045   predicate(n->as_Vector()->length() == 4);
16046   match(Set dst (SubVI src1 src2));
16047   ins_cost(INSN_COST);
16048   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16049   ins_encode %{
16050     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16051             as_FloatRegister($src1$$reg),
16052             as_FloatRegister($src2$$reg));
16053   %}
16054   ins_pipe(vdop128);
16055 %}
16056 
16057 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16058 %{
16059   predicate(n->as_Vector()->length() == 2);
16060   match(Set dst (SubVL src1 src2));
16061   ins_cost(INSN_COST);
16062   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16063   ins_encode %{
16064     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16065             as_FloatRegister($src1$$reg),
16066             as_FloatRegister($src2$$reg));
16067   %}
16068   ins_pipe(vdop128);
16069 %}
16070 
16071 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16072 %{
16073   predicate(n->as_Vector()->length() == 2);
16074   match(Set dst (SubVF src1 src2));
16075   ins_cost(INSN_COST);
16076   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16077   ins_encode %{
16078     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16079             as_FloatRegister($src1$$reg),
16080             as_FloatRegister($src2$$reg));
16081   %}
16082   ins_pipe(vdop_fp64);
16083 %}
16084 
16085 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16086 %{
16087   predicate(n->as_Vector()->length() == 4);
16088   match(Set dst (SubVF src1 src2));
16089   ins_cost(INSN_COST);
16090   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16091   ins_encode %{
16092     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16093             as_FloatRegister($src1$$reg),
16094             as_FloatRegister($src2$$reg));
16095   %}
16096   ins_pipe(vdop_fp128);
16097 %}
16098 
16099 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16100 %{
16101   predicate(n->as_Vector()->length() == 2);
16102   match(Set dst (SubVD src1 src2));
16103   ins_cost(INSN_COST);
16104   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16105   ins_encode %{
16106     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16107             as_FloatRegister($src1$$reg),
16108             as_FloatRegister($src2$$reg));
16109   %}
16110   ins_pipe(vdop_fp128);
16111 %}
16112 
16113 // --------------------------------- MUL --------------------------------------
16114 
16115 instruct vmul8B(vecD dst, vecD src1, vecD src2)
16116 %{
16117   predicate(n->as_Vector()->length() == 4 ||
16118             n->as_Vector()->length() == 8);
16119   match(Set dst (MulVB src1 src2));
16120   ins_cost(INSN_COST);
16121   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
16122   ins_encode %{
16123     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
16124             as_FloatRegister($src1$$reg),
16125             as_FloatRegister($src2$$reg));
16126   %}
16127   ins_pipe(vmul64);
16128 %}
16129 
16130 instruct vmul16B(vecX dst, vecX src1, vecX src2)
16131 %{
16132   predicate(n->as_Vector()->length() == 16);
16133   match(Set dst (MulVB src1 src2));
16134   ins_cost(INSN_COST);
16135   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
16136   ins_encode %{
16137     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
16138             as_FloatRegister($src1$$reg),
16139             as_FloatRegister($src2$$reg));
16140   %}
16141   ins_pipe(vmul128);
16142 %}
16143 
16144 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16145 %{
16146   predicate(n->as_Vector()->length() == 2 ||
16147             n->as_Vector()->length() == 4);
16148   match(Set dst (MulVS src1 src2));
16149   ins_cost(INSN_COST);
16150   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16151   ins_encode %{
16152     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16153             as_FloatRegister($src1$$reg),
16154             as_FloatRegister($src2$$reg));
16155   %}
16156   ins_pipe(vmul64);
16157 %}
16158 
16159 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16160 %{
16161   predicate(n->as_Vector()->length() == 8);
16162   match(Set dst (MulVS src1 src2));
16163   ins_cost(INSN_COST);
16164   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16165   ins_encode %{
16166     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16167             as_FloatRegister($src1$$reg),
16168             as_FloatRegister($src2$$reg));
16169   %}
16170   ins_pipe(vmul128);
16171 %}
16172 
16173 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16174 %{
16175   predicate(n->as_Vector()->length() == 2);
16176   match(Set dst (MulVI src1 src2));
16177   ins_cost(INSN_COST);
16178   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16179   ins_encode %{
16180     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16181             as_FloatRegister($src1$$reg),
16182             as_FloatRegister($src2$$reg));
16183   %}
16184   ins_pipe(vmul64);
16185 %}
16186 
16187 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16188 %{
16189   predicate(n->as_Vector()->length() == 4);
16190   match(Set dst (MulVI src1 src2));
16191   ins_cost(INSN_COST);
16192   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16193   ins_encode %{
16194     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16195             as_FloatRegister($src1$$reg),
16196             as_FloatRegister($src2$$reg));
16197   %}
16198   ins_pipe(vmul128);
16199 %}
16200 
16201 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16202 %{
16203   predicate(n->as_Vector()->length() == 2);
16204   match(Set dst (MulVF src1 src2));
16205   ins_cost(INSN_COST);
16206   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16207   ins_encode %{
16208     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16209             as_FloatRegister($src1$$reg),
16210             as_FloatRegister($src2$$reg));
16211   %}
16212   ins_pipe(vmuldiv_fp64);
16213 %}
16214 
16215 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16216 %{
16217   predicate(n->as_Vector()->length() == 4);
16218   match(Set dst (MulVF src1 src2));
16219   ins_cost(INSN_COST);
16220   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16221   ins_encode %{
16222     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16223             as_FloatRegister($src1$$reg),
16224             as_FloatRegister($src2$$reg));
16225   %}
16226   ins_pipe(vmuldiv_fp128);
16227 %}
16228 
16229 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16230 %{
16231   predicate(n->as_Vector()->length() == 2);
16232   match(Set dst (MulVD src1 src2));
16233   ins_cost(INSN_COST);
16234   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16235   ins_encode %{
16236     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16237             as_FloatRegister($src1$$reg),
16238             as_FloatRegister($src2$$reg));
16239   %}
16240   ins_pipe(vmuldiv_fp128);
16241 %}
16242 
16243 // --------------------------------- MLA --------------------------------------
16244 
16245 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16246 %{
16247   predicate(n->as_Vector()->length() == 2 ||
16248             n->as_Vector()->length() == 4);
16249   match(Set dst (AddVS dst (MulVS src1 src2)));
16250   ins_cost(INSN_COST);
16251   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16252   ins_encode %{
16253     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16254             as_FloatRegister($src1$$reg),
16255             as_FloatRegister($src2$$reg));
16256   %}
16257   ins_pipe(vmla64);
16258 %}
16259 
16260 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16261 %{
16262   predicate(n->as_Vector()->length() == 8);
16263   match(Set dst (AddVS dst (MulVS src1 src2)));
16264   ins_cost(INSN_COST);
16265   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16266   ins_encode %{
16267     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16268             as_FloatRegister($src1$$reg),
16269             as_FloatRegister($src2$$reg));
16270   %}
16271   ins_pipe(vmla128);
16272 %}
16273 
16274 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16275 %{
16276   predicate(n->as_Vector()->length() == 2);
16277   match(Set dst (AddVI dst (MulVI src1 src2)));
16278   ins_cost(INSN_COST);
16279   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16280   ins_encode %{
16281     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16282             as_FloatRegister($src1$$reg),
16283             as_FloatRegister($src2$$reg));
16284   %}
16285   ins_pipe(vmla64);
16286 %}
16287 
16288 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16289 %{
16290   predicate(n->as_Vector()->length() == 4);
16291   match(Set dst (AddVI dst (MulVI src1 src2)));
16292   ins_cost(INSN_COST);
16293   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16294   ins_encode %{
16295     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16296             as_FloatRegister($src1$$reg),
16297             as_FloatRegister($src2$$reg));
16298   %}
16299   ins_pipe(vmla128);
16300 %}
16301 
16302 // dst + src1 * src2
16303 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16304   predicate(UseFMA && n->as_Vector()->length() == 2);
16305   match(Set dst (FmaVF  dst (Binary src1 src2)));
16306   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16307   ins_cost(INSN_COST);
16308   ins_encode %{
16309     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16310             as_FloatRegister($src1$$reg),
16311             as_FloatRegister($src2$$reg));
16312   %}
16313   ins_pipe(vmuldiv_fp64);
16314 %}
16315 
16316 // dst + src1 * src2
16317 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16318   predicate(UseFMA && n->as_Vector()->length() == 4);
16319   match(Set dst (FmaVF  dst (Binary src1 src2)));
16320   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16321   ins_cost(INSN_COST);
16322   ins_encode %{
16323     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16324             as_FloatRegister($src1$$reg),
16325             as_FloatRegister($src2$$reg));
16326   %}
16327   ins_pipe(vmuldiv_fp128);
16328 %}
16329 
16330 // dst + src1 * src2
16331 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16332   predicate(UseFMA && n->as_Vector()->length() == 2);
16333   match(Set dst (FmaVD  dst (Binary src1 src2)));
16334   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16335   ins_cost(INSN_COST);
16336   ins_encode %{
16337     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16338             as_FloatRegister($src1$$reg),
16339             as_FloatRegister($src2$$reg));
16340   %}
16341   ins_pipe(vmuldiv_fp128);
16342 %}
16343 
16344 // --------------------------------- MLS --------------------------------------
16345 
16346 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16347 %{
16348   predicate(n->as_Vector()->length() == 2 ||
16349             n->as_Vector()->length() == 4);
16350   match(Set dst (SubVS dst (MulVS src1 src2)));
16351   ins_cost(INSN_COST);
16352   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16353   ins_encode %{
16354     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16355             as_FloatRegister($src1$$reg),
16356             as_FloatRegister($src2$$reg));
16357   %}
16358   ins_pipe(vmla64);
16359 %}
16360 
16361 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16362 %{
16363   predicate(n->as_Vector()->length() == 8);
16364   match(Set dst (SubVS dst (MulVS src1 src2)));
16365   ins_cost(INSN_COST);
16366   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16367   ins_encode %{
16368     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16369             as_FloatRegister($src1$$reg),
16370             as_FloatRegister($src2$$reg));
16371   %}
16372   ins_pipe(vmla128);
16373 %}
16374 
16375 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16376 %{
16377   predicate(n->as_Vector()->length() == 2);
16378   match(Set dst (SubVI dst (MulVI src1 src2)));
16379   ins_cost(INSN_COST);
16380   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16381   ins_encode %{
16382     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16383             as_FloatRegister($src1$$reg),
16384             as_FloatRegister($src2$$reg));
16385   %}
16386   ins_pipe(vmla64);
16387 %}
16388 
16389 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16390 %{
16391   predicate(n->as_Vector()->length() == 4);
16392   match(Set dst (SubVI dst (MulVI src1 src2)));
16393   ins_cost(INSN_COST);
16394   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16395   ins_encode %{
16396     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16397             as_FloatRegister($src1$$reg),
16398             as_FloatRegister($src2$$reg));
16399   %}
16400   ins_pipe(vmla128);
16401 %}
16402 
16403 // dst - src1 * src2
16404 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16405   predicate(UseFMA && n->as_Vector()->length() == 2);
16406   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16407   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16408   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16409   ins_cost(INSN_COST);
16410   ins_encode %{
16411     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16412             as_FloatRegister($src1$$reg),
16413             as_FloatRegister($src2$$reg));
16414   %}
16415   ins_pipe(vmuldiv_fp64);
16416 %}
16417 
16418 // dst - src1 * src2
16419 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16420   predicate(UseFMA && n->as_Vector()->length() == 4);
16421   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16422   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16423   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16424   ins_cost(INSN_COST);
16425   ins_encode %{
16426     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16427             as_FloatRegister($src1$$reg),
16428             as_FloatRegister($src2$$reg));
16429   %}
16430   ins_pipe(vmuldiv_fp128);
16431 %}
16432 
16433 // dst - src1 * src2
16434 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16435   predicate(UseFMA && n->as_Vector()->length() == 2);
16436   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16437   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16438   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16439   ins_cost(INSN_COST);
16440   ins_encode %{
16441     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16442             as_FloatRegister($src1$$reg),
16443             as_FloatRegister($src2$$reg));
16444   %}
16445   ins_pipe(vmuldiv_fp128);
16446 %}
16447 
16448 // --------------------------------- DIV --------------------------------------
16449 
16450 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16451 %{
16452   predicate(n->as_Vector()->length() == 2);
16453   match(Set dst (DivVF src1 src2));
16454   ins_cost(INSN_COST);
16455   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16456   ins_encode %{
16457     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16458             as_FloatRegister($src1$$reg),
16459             as_FloatRegister($src2$$reg));
16460   %}
16461   ins_pipe(vmuldiv_fp64);
16462 %}
16463 
16464 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16465 %{
16466   predicate(n->as_Vector()->length() == 4);
16467   match(Set dst (DivVF src1 src2));
16468   ins_cost(INSN_COST);
16469   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16470   ins_encode %{
16471     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16472             as_FloatRegister($src1$$reg),
16473             as_FloatRegister($src2$$reg));
16474   %}
16475   ins_pipe(vmuldiv_fp128);
16476 %}
16477 
16478 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16479 %{
16480   predicate(n->as_Vector()->length() == 2);
16481   match(Set dst (DivVD src1 src2));
16482   ins_cost(INSN_COST);
16483   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16484   ins_encode %{
16485     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16486             as_FloatRegister($src1$$reg),
16487             as_FloatRegister($src2$$reg));
16488   %}
16489   ins_pipe(vmuldiv_fp128);
16490 %}
16491 
16492 // --------------------------------- SQRT -------------------------------------
16493 
16494 instruct vsqrt2D(vecX dst, vecX src)
16495 %{
16496   predicate(n->as_Vector()->length() == 2);
16497   match(Set dst (SqrtVD src));
16498   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16499   ins_encode %{
16500     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16501              as_FloatRegister($src$$reg));
16502   %}
16503   ins_pipe(vsqrt_fp128);
16504 %}
16505 
16506 // --------------------------------- ABS --------------------------------------
16507 
16508 instruct vabs8B(vecD dst, vecD src)
16509 %{
16510   predicate(n->as_Vector()->length() == 4 ||
16511             n->as_Vector()->length() == 8);
16512   match(Set dst (AbsVB src));
16513   ins_cost(INSN_COST);
16514   format %{ "abs  $dst, $src\t# vector (8B)" %}
16515   ins_encode %{
16516     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
16517   %}
16518   ins_pipe(vlogical64);
16519 %}
16520 
16521 instruct vabs16B(vecX dst, vecX src)
16522 %{
16523   predicate(n->as_Vector()->length() == 16);
16524   match(Set dst (AbsVB src));
16525   ins_cost(INSN_COST);
16526   format %{ "abs  $dst, $src\t# vector (16B)" %}
16527   ins_encode %{
16528     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
16529   %}
16530   ins_pipe(vlogical128);
16531 %}
16532 
16533 instruct vabs4S(vecD dst, vecD src)
16534 %{
16535   predicate(n->as_Vector()->length() == 4);
16536   match(Set dst (AbsVS src));
16537   ins_cost(INSN_COST);
16538   format %{ "abs  $dst, $src\t# vector (4H)" %}
16539   ins_encode %{
16540     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
16541   %}
16542   ins_pipe(vlogical64);
16543 %}
16544 
16545 instruct vabs8S(vecX dst, vecX src)
16546 %{
16547   predicate(n->as_Vector()->length() == 8);
16548   match(Set dst (AbsVS src));
16549   ins_cost(INSN_COST);
16550   format %{ "abs  $dst, $src\t# vector (8H)" %}
16551   ins_encode %{
16552     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
16553   %}
16554   ins_pipe(vlogical128);
16555 %}
16556 
16557 instruct vabs2I(vecD dst, vecD src)
16558 %{
16559   predicate(n->as_Vector()->length() == 2);
16560   match(Set dst (AbsVI src));
16561   ins_cost(INSN_COST);
16562   format %{ "abs  $dst, $src\t# vector (2S)" %}
16563   ins_encode %{
16564     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
16565   %}
16566   ins_pipe(vlogical64);
16567 %}
16568 
16569 instruct vabs4I(vecX dst, vecX src)
16570 %{
16571   predicate(n->as_Vector()->length() == 4);
16572   match(Set dst (AbsVI src));
16573   ins_cost(INSN_COST);
16574   format %{ "abs  $dst, $src\t# vector (4S)" %}
16575   ins_encode %{
16576     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
16577   %}
16578   ins_pipe(vlogical128);
16579 %}
16580 
16581 instruct vabs2L(vecX dst, vecX src)
16582 %{
16583   predicate(n->as_Vector()->length() == 2);
16584   match(Set dst (AbsVL src));
16585   ins_cost(INSN_COST);
16586   format %{ "abs  $dst, $src\t# vector (2D)" %}
16587   ins_encode %{
16588     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
16589   %}
16590   ins_pipe(vlogical128);
16591 %}
16592 
16593 instruct vabs2F(vecD dst, vecD src)
16594 %{
16595   predicate(n->as_Vector()->length() == 2);
16596   match(Set dst (AbsVF src));
16597   ins_cost(INSN_COST * 3);
16598   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16599   ins_encode %{
16600     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16601             as_FloatRegister($src$$reg));
16602   %}
16603   ins_pipe(vunop_fp64);
16604 %}
16605 
16606 instruct vabs4F(vecX dst, vecX src)
16607 %{
16608   predicate(n->as_Vector()->length() == 4);
16609   match(Set dst (AbsVF src));
16610   ins_cost(INSN_COST * 3);
16611   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16612   ins_encode %{
16613     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16614             as_FloatRegister($src$$reg));
16615   %}
16616   ins_pipe(vunop_fp128);
16617 %}
16618 
16619 instruct vabs2D(vecX dst, vecX src)
16620 %{
16621   predicate(n->as_Vector()->length() == 2);
16622   match(Set dst (AbsVD src));
16623   ins_cost(INSN_COST * 3);
16624   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16625   ins_encode %{
16626     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16627             as_FloatRegister($src$$reg));
16628   %}
16629   ins_pipe(vunop_fp128);
16630 %}
16631 
16632 // --------------------------------- NEG --------------------------------------
16633 
16634 instruct vneg2F(vecD dst, vecD src)
16635 %{
16636   predicate(n->as_Vector()->length() == 2);
16637   match(Set dst (NegVF src));
16638   ins_cost(INSN_COST * 3);
16639   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16640   ins_encode %{
16641     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16642             as_FloatRegister($src$$reg));
16643   %}
16644   ins_pipe(vunop_fp64);
16645 %}
16646 
16647 instruct vneg4F(vecX dst, vecX src)
16648 %{
16649   predicate(n->as_Vector()->length() == 4);
16650   match(Set dst (NegVF src));
16651   ins_cost(INSN_COST * 3);
16652   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16653   ins_encode %{
16654     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16655             as_FloatRegister($src$$reg));
16656   %}
16657   ins_pipe(vunop_fp128);
16658 %}
16659 
16660 instruct vneg2D(vecX dst, vecX src)
16661 %{
16662   predicate(n->as_Vector()->length() == 2);
16663   match(Set dst (NegVD src));
16664   ins_cost(INSN_COST * 3);
16665   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16666   ins_encode %{
16667     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16668             as_FloatRegister($src$$reg));
16669   %}
16670   ins_pipe(vunop_fp128);
16671 %}
16672 
16673 // --------------------------------- AND --------------------------------------
16674 
16675 instruct vand8B(vecD dst, vecD src1, vecD src2)
16676 %{
16677   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16678             n->as_Vector()->length_in_bytes() == 8);
16679   match(Set dst (AndV src1 src2));
16680   ins_cost(INSN_COST);
16681   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16682   ins_encode %{
16683     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16684             as_FloatRegister($src1$$reg),
16685             as_FloatRegister($src2$$reg));
16686   %}
16687   ins_pipe(vlogical64);
16688 %}
16689 
16690 instruct vand16B(vecX dst, vecX src1, vecX src2)
16691 %{
16692   predicate(n->as_Vector()->length_in_bytes() == 16);
16693   match(Set dst (AndV src1 src2));
16694   ins_cost(INSN_COST);
16695   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16696   ins_encode %{
16697     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16698             as_FloatRegister($src1$$reg),
16699             as_FloatRegister($src2$$reg));
16700   %}
16701   ins_pipe(vlogical128);
16702 %}
16703 
16704 // --------------------------------- OR ---------------------------------------
16705 
16706 instruct vor8B(vecD dst, vecD src1, vecD src2)
16707 %{
16708   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16709             n->as_Vector()->length_in_bytes() == 8);
16710   match(Set dst (OrV src1 src2));
16711   ins_cost(INSN_COST);
16712   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16713   ins_encode %{
16714     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16715             as_FloatRegister($src1$$reg),
16716             as_FloatRegister($src2$$reg));
16717   %}
16718   ins_pipe(vlogical64);
16719 %}
16720 
16721 instruct vor16B(vecX dst, vecX src1, vecX src2)
16722 %{
16723   predicate(n->as_Vector()->length_in_bytes() == 16);
16724   match(Set dst (OrV src1 src2));
16725   ins_cost(INSN_COST);
16726   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16727   ins_encode %{
16728     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16729             as_FloatRegister($src1$$reg),
16730             as_FloatRegister($src2$$reg));
16731   %}
16732   ins_pipe(vlogical128);
16733 %}
16734 
16735 // --------------------------------- XOR --------------------------------------
16736 
16737 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16738 %{
16739   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16740             n->as_Vector()->length_in_bytes() == 8);
16741   match(Set dst (XorV src1 src2));
16742   ins_cost(INSN_COST);
16743   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16744   ins_encode %{
16745     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16746             as_FloatRegister($src1$$reg),
16747             as_FloatRegister($src2$$reg));
16748   %}
16749   ins_pipe(vlogical64);
16750 %}
16751 
16752 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16753 %{
16754   predicate(n->as_Vector()->length_in_bytes() == 16);
16755   match(Set dst (XorV src1 src2));
16756   ins_cost(INSN_COST);
16757   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16758   ins_encode %{
16759     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16760             as_FloatRegister($src1$$reg),
16761             as_FloatRegister($src2$$reg));
16762   %}
16763   ins_pipe(vlogical128);
16764 %}
16765 
16766 // ------------------------------ Shift ---------------------------------------
16767 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
16768   predicate(n->as_Vector()->length_in_bytes() == 8);
16769   match(Set dst (LShiftCntV cnt));
16770   match(Set dst (RShiftCntV cnt));
16771   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
16772   ins_encode %{
16773     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
16774   %}
16775   ins_pipe(vdup_reg_reg64);
16776 %}
16777 
16778 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
16779   predicate(n->as_Vector()->length_in_bytes() == 16);
16780   match(Set dst (LShiftCntV cnt));
16781   match(Set dst (RShiftCntV cnt));
16782   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
16783   ins_encode %{
16784     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16785   %}
16786   ins_pipe(vdup_reg_reg128);
16787 %}
16788 
16789 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
16790   predicate(n->as_Vector()->length() == 4 ||
16791             n->as_Vector()->length() == 8);
16792   match(Set dst (LShiftVB src shift));
16793   ins_cost(INSN_COST);
16794   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16795   ins_encode %{
16796     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16797             as_FloatRegister($src$$reg),
16798             as_FloatRegister($shift$$reg));
16799   %}
16800   ins_pipe(vshift64);
16801 %}
16802 
16803 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16804   predicate(n->as_Vector()->length() == 16);
16805   match(Set dst (LShiftVB src shift));
16806   ins_cost(INSN_COST);
16807   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16808   ins_encode %{
16809     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16810             as_FloatRegister($src$$reg),
16811             as_FloatRegister($shift$$reg));
16812   %}
16813   ins_pipe(vshift128);
16814 %}
16815 
16816 // Right shifts with vector shift count on aarch64 SIMD are implemented
16817 // as left shift by negative shift count.
16818 // There are two cases for vector shift count.
16819 //
16820 // Case 1: The vector shift count is from replication.
16821 //        |            |
16822 //    LoadVector  RShiftCntV
16823 //        |       /
16824 //     RShiftVI
16825 // Note: In inner loop, multiple neg instructions are used, which can be
16826 // moved to outer loop and merge into one neg instruction.
16827 //
16828 // Case 2: The vector shift count is from loading.
16829 // This case isn't supported by middle-end now. But it's supported by
16830 // panama/vectorIntrinsics(JEP 338: Vector API).
16831 //        |            |
16832 //    LoadVector  LoadVector
16833 //        |       /
16834 //     RShiftVI
16835 //
16836 
16837 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16838   predicate(n->as_Vector()->length() == 4 ||
16839             n->as_Vector()->length() == 8);
16840   match(Set dst (RShiftVB src shift));
16841   ins_cost(INSN_COST);
16842   effect(TEMP tmp);
16843   format %{ "negr  $tmp,$shift\t"
16844             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
16845   ins_encode %{
16846     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16847             as_FloatRegister($shift$$reg));
16848     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16849             as_FloatRegister($src$$reg),
16850             as_FloatRegister($tmp$$reg));
16851   %}
16852   ins_pipe(vshift64);
16853 %}
16854 
16855 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16856   predicate(n->as_Vector()->length() == 16);
16857   match(Set dst (RShiftVB src shift));
16858   ins_cost(INSN_COST);
16859   effect(TEMP tmp);
16860   format %{ "negr  $tmp,$shift\t"
16861             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
16862   ins_encode %{
16863     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16864             as_FloatRegister($shift$$reg));
16865     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16866             as_FloatRegister($src$$reg),
16867             as_FloatRegister($tmp$$reg));
16868   %}
16869   ins_pipe(vshift128);
16870 %}
16871 
16872 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16873   predicate(n->as_Vector()->length() == 4 ||
16874             n->as_Vector()->length() == 8);
16875   match(Set dst (URShiftVB src shift));
16876   ins_cost(INSN_COST);
16877   effect(TEMP tmp);
16878   format %{ "negr  $tmp,$shift\t"
16879             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
16880   ins_encode %{
16881     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16882             as_FloatRegister($shift$$reg));
16883     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16884             as_FloatRegister($src$$reg),
16885             as_FloatRegister($tmp$$reg));
16886   %}
16887   ins_pipe(vshift64);
16888 %}
16889 
16890 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16891   predicate(n->as_Vector()->length() == 16);
16892   match(Set dst (URShiftVB src shift));
16893   ins_cost(INSN_COST);
16894   effect(TEMP tmp);
16895   format %{ "negr  $tmp,$shift\t"
16896             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
16897   ins_encode %{
16898     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16899             as_FloatRegister($shift$$reg));
16900     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16901             as_FloatRegister($src$$reg),
16902             as_FloatRegister($tmp$$reg));
16903   %}
16904   ins_pipe(vshift128);
16905 %}
16906 
16907 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16908   predicate(n->as_Vector()->length() == 4 ||
16909             n->as_Vector()->length() == 8);
16910   match(Set dst (LShiftVB src shift));
16911   ins_cost(INSN_COST);
16912   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16913   ins_encode %{
16914     int sh = (int)$shift$$constant;
16915     if (sh >= 8) {
16916       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16917              as_FloatRegister($src$$reg),
16918              as_FloatRegister($src$$reg));
16919     } else {
16920       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16921              as_FloatRegister($src$$reg), sh);
16922     }
16923   %}
16924   ins_pipe(vshift64_imm);
16925 %}
16926 
16927 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16928   predicate(n->as_Vector()->length() == 16);
16929   match(Set dst (LShiftVB src shift));
16930   ins_cost(INSN_COST);
16931   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16932   ins_encode %{
16933     int sh = (int)$shift$$constant;
16934     if (sh >= 8) {
16935       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16936              as_FloatRegister($src$$reg),
16937              as_FloatRegister($src$$reg));
16938     } else {
16939       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16940              as_FloatRegister($src$$reg), sh);
16941     }
16942   %}
16943   ins_pipe(vshift128_imm);
16944 %}
16945 
16946 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16947   predicate(n->as_Vector()->length() == 4 ||
16948             n->as_Vector()->length() == 8);
16949   match(Set dst (RShiftVB src shift));
16950   ins_cost(INSN_COST);
16951   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16952   ins_encode %{
16953     int sh = (int)$shift$$constant;
16954     if (sh >= 8) sh = 7;
16955     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16956            as_FloatRegister($src$$reg), sh);
16957   %}
16958   ins_pipe(vshift64_imm);
16959 %}
16960 
16961 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16962   predicate(n->as_Vector()->length() == 16);
16963   match(Set dst (RShiftVB src shift));
16964   ins_cost(INSN_COST);
16965   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16966   ins_encode %{
16967     int sh = (int)$shift$$constant;
16968     if (sh >= 8) sh = 7;
16969     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16970            as_FloatRegister($src$$reg), sh);
16971   %}
16972   ins_pipe(vshift128_imm);
16973 %}
16974 
16975 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16976   predicate(n->as_Vector()->length() == 4 ||
16977             n->as_Vector()->length() == 8);
16978   match(Set dst (URShiftVB src shift));
16979   ins_cost(INSN_COST);
16980   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16981   ins_encode %{
16982     int sh = (int)$shift$$constant;
16983     if (sh >= 8) {
16984       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16985              as_FloatRegister($src$$reg),
16986              as_FloatRegister($src$$reg));
16987     } else {
16988       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16989              as_FloatRegister($src$$reg), sh);
16990     }
16991   %}
16992   ins_pipe(vshift64_imm);
16993 %}
16994 
16995 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16996   predicate(n->as_Vector()->length() == 16);
16997   match(Set dst (URShiftVB src shift));
16998   ins_cost(INSN_COST);
16999   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17000   ins_encode %{
17001     int sh = (int)$shift$$constant;
17002     if (sh >= 8) {
17003       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17004              as_FloatRegister($src$$reg),
17005              as_FloatRegister($src$$reg));
17006     } else {
17007       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17008              as_FloatRegister($src$$reg), sh);
17009     }
17010   %}
17011   ins_pipe(vshift128_imm);
17012 %}
17013 
17014 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17015   predicate(n->as_Vector()->length() == 2 ||
17016             n->as_Vector()->length() == 4);
17017   match(Set dst (LShiftVS src shift));
17018   ins_cost(INSN_COST);
17019   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17020   ins_encode %{
17021     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17022             as_FloatRegister($src$$reg),
17023             as_FloatRegister($shift$$reg));
17024   %}
17025   ins_pipe(vshift64);
17026 %}
17027 
17028 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17029   predicate(n->as_Vector()->length() == 8);
17030   match(Set dst (LShiftVS src shift));
17031   ins_cost(INSN_COST);
17032   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17033   ins_encode %{
17034     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17035             as_FloatRegister($src$$reg),
17036             as_FloatRegister($shift$$reg));
17037   %}
17038   ins_pipe(vshift128);
17039 %}
17040 
17041 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17042   predicate(n->as_Vector()->length() == 2 ||
17043             n->as_Vector()->length() == 4);
17044   match(Set dst (RShiftVS src shift));
17045   ins_cost(INSN_COST);
17046   effect(TEMP tmp);
17047   format %{ "negr  $tmp,$shift\t"
17048             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17049   ins_encode %{
17050     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17051             as_FloatRegister($shift$$reg));
17052     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17053             as_FloatRegister($src$$reg),
17054             as_FloatRegister($tmp$$reg));
17055   %}
17056   ins_pipe(vshift64);
17057 %}
17058 
17059 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17060   predicate(n->as_Vector()->length() == 8);
17061   match(Set dst (RShiftVS src shift));
17062   ins_cost(INSN_COST);
17063   effect(TEMP tmp);
17064   format %{ "negr  $tmp,$shift\t"
17065             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17066   ins_encode %{
17067     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17068             as_FloatRegister($shift$$reg));
17069     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17070             as_FloatRegister($src$$reg),
17071             as_FloatRegister($tmp$$reg));
17072   %}
17073   ins_pipe(vshift128);
17074 %}
17075 
17076 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17077   predicate(n->as_Vector()->length() == 2 ||
17078             n->as_Vector()->length() == 4);
17079   match(Set dst (URShiftVS src shift));
17080   ins_cost(INSN_COST);
17081   effect(TEMP tmp);
17082   format %{ "negr  $tmp,$shift\t"
17083             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17084   ins_encode %{
17085     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17086             as_FloatRegister($shift$$reg));
17087     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17088             as_FloatRegister($src$$reg),
17089             as_FloatRegister($tmp$$reg));
17090   %}
17091   ins_pipe(vshift64);
17092 %}
17093 
17094 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17095   predicate(n->as_Vector()->length() == 8);
17096   match(Set dst (URShiftVS src shift));
17097   ins_cost(INSN_COST);
17098   effect(TEMP tmp);
17099   format %{ "negr  $tmp,$shift\t"
17100             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17101   ins_encode %{
17102     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17103             as_FloatRegister($shift$$reg));
17104     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17105             as_FloatRegister($src$$reg),
17106             as_FloatRegister($tmp$$reg));
17107   %}
17108   ins_pipe(vshift128);
17109 %}
17110 
17111 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17112   predicate(n->as_Vector()->length() == 2 ||
17113             n->as_Vector()->length() == 4);
17114   match(Set dst (LShiftVS src shift));
17115   ins_cost(INSN_COST);
17116   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17117   ins_encode %{
17118     int sh = (int)$shift$$constant;
17119     if (sh >= 16) {
17120       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17121              as_FloatRegister($src$$reg),
17122              as_FloatRegister($src$$reg));
17123     } else {
17124       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17125              as_FloatRegister($src$$reg), sh);
17126     }
17127   %}
17128   ins_pipe(vshift64_imm);
17129 %}
17130 
17131 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17132   predicate(n->as_Vector()->length() == 8);
17133   match(Set dst (LShiftVS src shift));
17134   ins_cost(INSN_COST);
17135   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17136   ins_encode %{
17137     int sh = (int)$shift$$constant;
17138     if (sh >= 16) {
17139       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17140              as_FloatRegister($src$$reg),
17141              as_FloatRegister($src$$reg));
17142     } else {
17143       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17144              as_FloatRegister($src$$reg), sh);
17145     }
17146   %}
17147   ins_pipe(vshift128_imm);
17148 %}
17149 
17150 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17151   predicate(n->as_Vector()->length() == 2 ||
17152             n->as_Vector()->length() == 4);
17153   match(Set dst (RShiftVS src shift));
17154   ins_cost(INSN_COST);
17155   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17156   ins_encode %{
17157     int sh = (int)$shift$$constant;
17158     if (sh >= 16) sh = 15;
17159     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17160            as_FloatRegister($src$$reg), sh);
17161   %}
17162   ins_pipe(vshift64_imm);
17163 %}
17164 
17165 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17166   predicate(n->as_Vector()->length() == 8);
17167   match(Set dst (RShiftVS src shift));
17168   ins_cost(INSN_COST);
17169   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17170   ins_encode %{
17171     int sh = (int)$shift$$constant;
17172     if (sh >= 16) sh = 15;
17173     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17174            as_FloatRegister($src$$reg), sh);
17175   %}
17176   ins_pipe(vshift128_imm);
17177 %}
17178 
17179 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17180   predicate(n->as_Vector()->length() == 2 ||
17181             n->as_Vector()->length() == 4);
17182   match(Set dst (URShiftVS src shift));
17183   ins_cost(INSN_COST);
17184   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17185   ins_encode %{
17186     int sh = (int)$shift$$constant;
17187     if (sh >= 16) {
17188       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17189              as_FloatRegister($src$$reg),
17190              as_FloatRegister($src$$reg));
17191     } else {
17192       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17193              as_FloatRegister($src$$reg), sh);
17194     }
17195   %}
17196   ins_pipe(vshift64_imm);
17197 %}
17198 
17199 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17200   predicate(n->as_Vector()->length() == 8);
17201   match(Set dst (URShiftVS src shift));
17202   ins_cost(INSN_COST);
17203   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17204   ins_encode %{
17205     int sh = (int)$shift$$constant;
17206     if (sh >= 16) {
17207       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17208              as_FloatRegister($src$$reg),
17209              as_FloatRegister($src$$reg));
17210     } else {
17211       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17212              as_FloatRegister($src$$reg), sh);
17213     }
17214   %}
17215   ins_pipe(vshift128_imm);
17216 %}
17217 
17218 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17219   predicate(n->as_Vector()->length() == 2);
17220   match(Set dst (LShiftVI src shift));
17221   ins_cost(INSN_COST);
17222   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17223   ins_encode %{
17224     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17225             as_FloatRegister($src$$reg),
17226             as_FloatRegister($shift$$reg));
17227   %}
17228   ins_pipe(vshift64);
17229 %}
17230 
17231 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17232   predicate(n->as_Vector()->length() == 4);
17233   match(Set dst (LShiftVI src shift));
17234   ins_cost(INSN_COST);
17235   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17236   ins_encode %{
17237     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17238             as_FloatRegister($src$$reg),
17239             as_FloatRegister($shift$$reg));
17240   %}
17241   ins_pipe(vshift128);
17242 %}
17243 
17244 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17245   predicate(n->as_Vector()->length() == 2);
17246   match(Set dst (RShiftVI src shift));
17247   ins_cost(INSN_COST);
17248   effect(TEMP tmp);
17249   format %{ "negr  $tmp,$shift\t"
17250             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17251   ins_encode %{
17252     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17253             as_FloatRegister($shift$$reg));
17254     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17255             as_FloatRegister($src$$reg),
17256             as_FloatRegister($tmp$$reg));
17257   %}
17258   ins_pipe(vshift64);
17259 %}
17260 
17261 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17262   predicate(n->as_Vector()->length() == 4);
17263   match(Set dst (RShiftVI src shift));
17264   ins_cost(INSN_COST);
17265   effect(TEMP tmp);
17266   format %{ "negr  $tmp,$shift\t"
17267             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17268   ins_encode %{
17269     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17270             as_FloatRegister($shift$$reg));
17271     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17272             as_FloatRegister($src$$reg),
17273             as_FloatRegister($tmp$$reg));
17274   %}
17275   ins_pipe(vshift128);
17276 %}
17277 
17278 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17279   predicate(n->as_Vector()->length() == 2);
17280   match(Set dst (URShiftVI src shift));
17281   ins_cost(INSN_COST);
17282   effect(TEMP tmp);
17283   format %{ "negr  $tmp,$shift\t"
17284             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17285   ins_encode %{
17286     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17287             as_FloatRegister($shift$$reg));
17288     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17289             as_FloatRegister($src$$reg),
17290             as_FloatRegister($tmp$$reg));
17291   %}
17292   ins_pipe(vshift64);
17293 %}
17294 
17295 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17296   predicate(n->as_Vector()->length() == 4);
17297   match(Set dst (URShiftVI src shift));
17298   ins_cost(INSN_COST);
17299   effect(TEMP tmp);
17300   format %{ "negr  $tmp,$shift\t"
17301             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17302   ins_encode %{
17303     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17304             as_FloatRegister($shift$$reg));
17305     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17306             as_FloatRegister($src$$reg),
17307             as_FloatRegister($tmp$$reg));
17308   %}
17309   ins_pipe(vshift128);
17310 %}
17311 
17312 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17313   predicate(n->as_Vector()->length() == 2);
17314   match(Set dst (LShiftVI src shift));
17315   ins_cost(INSN_COST);
17316   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17317   ins_encode %{
17318     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17319            as_FloatRegister($src$$reg),
17320            (int)$shift$$constant);
17321   %}
17322   ins_pipe(vshift64_imm);
17323 %}
17324 
17325 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17326   predicate(n->as_Vector()->length() == 4);
17327   match(Set dst (LShiftVI src shift));
17328   ins_cost(INSN_COST);
17329   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17330   ins_encode %{
17331     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17332            as_FloatRegister($src$$reg),
17333            (int)$shift$$constant);
17334   %}
17335   ins_pipe(vshift128_imm);
17336 %}
17337 
17338 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17339   predicate(n->as_Vector()->length() == 2);
17340   match(Set dst (RShiftVI src shift));
17341   ins_cost(INSN_COST);
17342   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17343   ins_encode %{
17344     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17345             as_FloatRegister($src$$reg),
17346             (int)$shift$$constant);
17347   %}
17348   ins_pipe(vshift64_imm);
17349 %}
17350 
17351 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17352   predicate(n->as_Vector()->length() == 4);
17353   match(Set dst (RShiftVI src shift));
17354   ins_cost(INSN_COST);
17355   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17356   ins_encode %{
17357     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17358             as_FloatRegister($src$$reg),
17359             (int)$shift$$constant);
17360   %}
17361   ins_pipe(vshift128_imm);
17362 %}
17363 
17364 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17365   predicate(n->as_Vector()->length() == 2);
17366   match(Set dst (URShiftVI src shift));
17367   ins_cost(INSN_COST);
17368   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17369   ins_encode %{
17370     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17371             as_FloatRegister($src$$reg),
17372             (int)$shift$$constant);
17373   %}
17374   ins_pipe(vshift64_imm);
17375 %}
17376 
17377 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17378   predicate(n->as_Vector()->length() == 4);
17379   match(Set dst (URShiftVI src shift));
17380   ins_cost(INSN_COST);
17381   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17382   ins_encode %{
17383     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17384             as_FloatRegister($src$$reg),
17385             (int)$shift$$constant);
17386   %}
17387   ins_pipe(vshift128_imm);
17388 %}
17389 
17390 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17391   predicate(n->as_Vector()->length() == 2);
17392   match(Set dst (LShiftVL src shift));
17393   ins_cost(INSN_COST);
17394   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17395   ins_encode %{
17396     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17397             as_FloatRegister($src$$reg),
17398             as_FloatRegister($shift$$reg));
17399   %}
17400   ins_pipe(vshift128);
17401 %}
17402 
17403 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17404   predicate(n->as_Vector()->length() == 2);
17405   match(Set dst (RShiftVL src shift));
17406   ins_cost(INSN_COST);
17407   effect(TEMP tmp);
17408   format %{ "negr  $tmp,$shift\t"
17409             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17410   ins_encode %{
17411     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17412             as_FloatRegister($shift$$reg));
17413     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17414             as_FloatRegister($src$$reg),
17415             as_FloatRegister($tmp$$reg));
17416   %}
17417   ins_pipe(vshift128);
17418 %}
17419 
17420 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17421   predicate(n->as_Vector()->length() == 2);
17422   match(Set dst (URShiftVL src shift));
17423   ins_cost(INSN_COST);
17424   effect(TEMP tmp);
17425   format %{ "negr  $tmp,$shift\t"
17426             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17427   ins_encode %{
17428     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17429             as_FloatRegister($shift$$reg));
17430     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17431             as_FloatRegister($src$$reg),
17432             as_FloatRegister($tmp$$reg));
17433   %}
17434   ins_pipe(vshift128);
17435 %}
17436 
17437 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17438   predicate(n->as_Vector()->length() == 2);
17439   match(Set dst (LShiftVL src shift));
17440   ins_cost(INSN_COST);
17441   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17442   ins_encode %{
17443     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17444            as_FloatRegister($src$$reg),
17445            (int)$shift$$constant);
17446   %}
17447   ins_pipe(vshift128_imm);
17448 %}
17449 
17450 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17451   predicate(n->as_Vector()->length() == 2);
17452   match(Set dst (RShiftVL src shift));
17453   ins_cost(INSN_COST);
17454   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17455   ins_encode %{
17456     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17457             as_FloatRegister($src$$reg),
17458             (int)$shift$$constant);
17459   %}
17460   ins_pipe(vshift128_imm);
17461 %}
17462 
17463 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17464   predicate(n->as_Vector()->length() == 2);
17465   match(Set dst (URShiftVL src shift));
17466   ins_cost(INSN_COST);
17467   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17468   ins_encode %{
17469     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17470             as_FloatRegister($src$$reg),
17471             (int)$shift$$constant);
17472   %}
17473   ins_pipe(vshift128_imm);
17474 %}
17475 
17476 instruct vmax2F(vecD dst, vecD src1, vecD src2)
17477 %{
17478   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17479   match(Set dst (MaxV src1 src2));
17480   ins_cost(INSN_COST);
17481   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
17482   ins_encode %{
17483     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
17484             as_FloatRegister($src1$$reg),
17485             as_FloatRegister($src2$$reg));
17486   %}
17487   ins_pipe(vdop_fp64);
17488 %}
17489 
17490 instruct vmax4F(vecX dst, vecX src1, vecX src2)
17491 %{
17492   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17493   match(Set dst (MaxV src1 src2));
17494   ins_cost(INSN_COST);
17495   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
17496   ins_encode %{
17497     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
17498             as_FloatRegister($src1$$reg),
17499             as_FloatRegister($src2$$reg));
17500   %}
17501   ins_pipe(vdop_fp128);
17502 %}
17503 
17504 instruct vmax2D(vecX dst, vecX src1, vecX src2)
17505 %{
17506   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17507   match(Set dst (MaxV src1 src2));
17508   ins_cost(INSN_COST);
17509   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
17510   ins_encode %{
17511     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
17512             as_FloatRegister($src1$$reg),
17513             as_FloatRegister($src2$$reg));
17514   %}
17515   ins_pipe(vdop_fp128);
17516 %}
17517 
17518 instruct vmin2F(vecD dst, vecD src1, vecD src2)
17519 %{
17520   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17521   match(Set dst (MinV src1 src2));
17522   ins_cost(INSN_COST);
17523   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
17524   ins_encode %{
17525     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
17526             as_FloatRegister($src1$$reg),
17527             as_FloatRegister($src2$$reg));
17528   %}
17529   ins_pipe(vdop_fp64);
17530 %}
17531 
17532 instruct vmin4F(vecX dst, vecX src1, vecX src2)
17533 %{
17534   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17535   match(Set dst (MinV src1 src2));
17536   ins_cost(INSN_COST);
17537   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
17538   ins_encode %{
17539     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
17540             as_FloatRegister($src1$$reg),
17541             as_FloatRegister($src2$$reg));
17542   %}
17543   ins_pipe(vdop_fp128);
17544 %}
17545 
17546 instruct vmin2D(vecX dst, vecX src1, vecX src2)
17547 %{
17548   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17549   match(Set dst (MinV src1 src2));
17550   ins_cost(INSN_COST);
17551   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
17552   ins_encode %{
17553     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
17554             as_FloatRegister($src1$$reg),
17555             as_FloatRegister($src2$$reg));
17556   %}
17557   ins_pipe(vdop_fp128);
17558 %}
17559 
17560 instruct vpopcount4I(vecX dst, vecX src) %{
17561   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
17562   match(Set dst (PopCountVI src));
17563   format %{
17564     "cnt     $dst, $src\t# vector (16B)\n\t"
17565     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
17566     "uaddlp  $dst, $dst\t# vector (8H)"
17567   %}
17568   ins_encode %{
17569      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
17570             as_FloatRegister($src$$reg));
17571      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
17572                as_FloatRegister($dst$$reg));
17573      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
17574                as_FloatRegister($dst$$reg));
17575   %}
17576   ins_pipe(pipe_class_default);
17577 %}
17578 
17579 instruct vpopcount2I(vecD dst, vecD src) %{
17580   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
17581   match(Set dst (PopCountVI src));
17582   format %{
17583     "cnt     $dst, $src\t# vector (8B)\n\t"
17584     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
17585     "uaddlp  $dst, $dst\t# vector (4H)"
17586   %}
17587   ins_encode %{
17588      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
17589             as_FloatRegister($src$$reg));
17590      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
17591                as_FloatRegister($dst$$reg));
17592      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
17593                as_FloatRegister($dst$$reg));
17594   %}
17595   ins_pipe(pipe_class_default);
17596 %}
17597 
17598 //----------PEEPHOLE RULES-----------------------------------------------------
17599 // These must follow all instruction definitions as they use the names
17600 // defined in the instructions definitions.
17601 //
17602 // peepmatch ( root_instr_name [preceding_instruction]* );
17603 //
17604 // peepconstraint %{
17605 // (instruction_number.operand_name relational_op instruction_number.operand_name
17606 //  [, ...] );
17607 // // instruction numbers are zero-based using left to right order in peepmatch
17608 //
17609 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17610 // // provide an instruction_number.operand_name for each operand that appears
17611 // // in the replacement instruction's match rule
17612 //
17613 // ---------VM FLAGS---------------------------------------------------------
17614 //
17615 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17616 //
17617 // Each peephole rule is given an identifying number starting with zero and
17618 // increasing by one in the order seen by the parser.  An individual peephole
17619 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17620 // on the command-line.
17621 //
17622 // ---------CURRENT LIMITATIONS----------------------------------------------
17623 //
17624 // Only match adjacent instructions in same basic block
17625 // Only equality constraints
17626 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17627 // Only one replacement instruction
17628 //
17629 // ---------EXAMPLE----------------------------------------------------------
17630 //
17631 // // pertinent parts of existing instructions in architecture description
17632 // instruct movI(iRegINoSp dst, iRegI src)
17633 // %{
17634 //   match(Set dst (CopyI src));
17635 // %}
17636 //
17637 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17638 // %{
17639 //   match(Set dst (AddI dst src));
17640 //   effect(KILL cr);
17641 // %}
17642 //
17643 // // Change (inc mov) to lea
17644 // peephole %{
17645 //   // increment preceeded by register-register move
17646 //   peepmatch ( incI_iReg movI );
17647 //   // require that the destination register of the increment
17648 //   // match the destination register of the move
17649 //   peepconstraint ( 0.dst == 1.dst );
17650 //   // construct a replacement instruction that sets
17651 //   // the destination to ( move's source register + one )
17652 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17653 // %}
17654 //
17655 
17656 // Implementation no longer uses movX instructions since
17657 // machine-independent system no longer uses CopyX nodes.
17658 //
17659 // peephole
17660 // %{
17661 //   peepmatch (incI_iReg movI);
17662 //   peepconstraint (0.dst == 1.dst);
17663 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17664 // %}
17665 
17666 // peephole
17667 // %{
17668 //   peepmatch (decI_iReg movI);
17669 //   peepconstraint (0.dst == 1.dst);
17670 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17671 // %}
17672 
17673 // peephole
17674 // %{
17675 //   peepmatch (addI_iReg_imm movI);
17676 //   peepconstraint (0.dst == 1.dst);
17677 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17678 // %}
17679 
17680 // peephole
17681 // %{
17682 //   peepmatch (incL_iReg movL);
17683 //   peepconstraint (0.dst == 1.dst);
17684 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17685 // %}
17686 
17687 // peephole
17688 // %{
17689 //   peepmatch (decL_iReg movL);
17690 //   peepconstraint (0.dst == 1.dst);
17691 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17692 // %}
17693 
17694 // peephole
17695 // %{
17696 //   peepmatch (addL_iReg_imm movL);
17697 //   peepconstraint (0.dst == 1.dst);
17698 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17699 // %}
17700 
17701 // peephole
17702 // %{
17703 //   peepmatch (addP_iReg_imm movP);
17704 //   peepconstraint (0.dst == 1.dst);
17705 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17706 // %}
17707 
17708 // // Change load of spilled value to only a spill
17709 // instruct storeI(memory mem, iRegI src)
17710 // %{
17711 //   match(Set mem (StoreI mem src));
17712 // %}
17713 //
17714 // instruct loadI(iRegINoSp dst, memory mem)
17715 // %{
17716 //   match(Set dst (LoadI mem));
17717 // %}
17718 //
17719 
17720 //----------SMARTSPILL RULES---------------------------------------------------
17721 // These must follow all instruction definitions as they use the names
17722 // defined in the instructions definitions.
17723 
17724 // Local Variables:
17725 // mode: c++
17726 // End: