1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   // graph traversal helpers
1040 
1041   MemBarNode *parent_membar(const Node *n);
1042   MemBarNode *child_membar(const MemBarNode *n);
1043   bool leading_membar(const MemBarNode *barrier);
1044 
1045   bool is_card_mark_membar(const MemBarNode *barrier);
1046   bool is_CAS(int opcode);
1047 
1048   MemBarNode *leading_to_normal(MemBarNode *leading);
1049   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1050   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1051   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1052   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1053 
1054   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1055 
1056   bool unnecessary_acquire(const Node *barrier);
1057   bool needs_acquiring_load(const Node *load);
1058 
1059   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1060 
1061   bool unnecessary_release(const Node *barrier);
1062   bool unnecessary_volatile(const Node *barrier);
1063   bool needs_releasing_store(const Node *store);
1064 
1065   // predicate controlling translation of CompareAndSwapX
1066   bool needs_acquiring_load_exclusive(const Node *load);
1067 
1068   // predicate controlling translation of StoreCM
1069   bool unnecessary_storestore(const Node *storecm);
1070 
1071   // predicate controlling addressing modes
1072   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1073 %}
1074 
1075 source %{
1076 
1077   // Optimizaton of volatile gets and puts
1078   // -------------------------------------
1079   //
1080   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1081   // use to implement volatile reads and writes. For a volatile read
1082   // we simply need
1083   //
1084   //   ldar<x>
1085   //
1086   // and for a volatile write we need
1087   //
1088   //   stlr<x>
1089   //
1090   // Alternatively, we can implement them by pairing a normal
1091   // load/store with a memory barrier. For a volatile read we need
1092   //
1093   //   ldr<x>
1094   //   dmb ishld
1095   //
1096   // for a volatile write
1097   //
1098   //   dmb ish
1099   //   str<x>
1100   //   dmb ish
1101   //
1102   // We can also use ldaxr and stlxr to implement compare and swap CAS
1103   // sequences. These are normally translated to an instruction
1104   // sequence like the following
1105   //
1106   //   dmb      ish
1107   // retry:
1108   //   ldxr<x>   rval raddr
1109   //   cmp       rval rold
1110   //   b.ne done
1111   //   stlxr<x>  rval, rnew, rold
1112   //   cbnz      rval retry
1113   // done:
1114   //   cset      r0, eq
1115   //   dmb ishld
1116   //
1117   // Note that the exclusive store is already using an stlxr
1118   // instruction. That is required to ensure visibility to other
1119   // threads of the exclusive write (assuming it succeeds) before that
1120   // of any subsequent writes.
1121   //
1122   // The following instruction sequence is an improvement on the above
1123   //
1124   // retry:
1125   //   ldaxr<x>  rval raddr
1126   //   cmp       rval rold
1127   //   b.ne done
1128   //   stlxr<x>  rval, rnew, rold
1129   //   cbnz      rval retry
1130   // done:
1131   //   cset      r0, eq
1132   //
1133   // We don't need the leading dmb ish since the stlxr guarantees
1134   // visibility of prior writes in the case that the swap is
1135   // successful. Crucially we don't have to worry about the case where
1136   // the swap is not successful since no valid program should be
1137   // relying on visibility of prior changes by the attempting thread
1138   // in the case where the CAS fails.
1139   //
1140   // Similarly, we don't need the trailing dmb ishld if we substitute
1141   // an ldaxr instruction since that will provide all the guarantees we
1142   // require regarding observation of changes made by other threads
1143   // before any change to the CAS address observed by the load.
1144   //
1145   // In order to generate the desired instruction sequence we need to
1146   // be able to identify specific 'signature' ideal graph node
1147   // sequences which i) occur as a translation of a volatile reads or
1148   // writes or CAS operations and ii) do not occur through any other
1149   // translation or graph transformation. We can then provide
1150   // alternative aldc matching rules which translate these node
1151   // sequences to the desired machine code sequences. Selection of the
1152   // alternative rules can be implemented by predicates which identify
1153   // the relevant node sequences.
1154   //
1155   // The ideal graph generator translates a volatile read to the node
1156   // sequence
1157   //
1158   //   LoadX[mo_acquire]
1159   //   MemBarAcquire
1160   //
1161   // As a special case when using the compressed oops optimization we
1162   // may also see this variant
1163   //
1164   //   LoadN[mo_acquire]
1165   //   DecodeN
1166   //   MemBarAcquire
1167   //
1168   // A volatile write is translated to the node sequence
1169   //
1170   //   MemBarRelease
1171   //   StoreX[mo_release] {CardMark}-optional
1172   //   MemBarVolatile
1173   //
1174   // n.b. the above node patterns are generated with a strict
1175   // 'signature' configuration of input and output dependencies (see
1176   // the predicates below for exact details). The card mark may be as
1177   // simple as a few extra nodes or, in a few GC configurations, may
1178   // include more complex control flow between the leading and
1179   // trailing memory barriers. However, whatever the card mark
1180   // configuration these signatures are unique to translated volatile
1181   // reads/stores -- they will not appear as a result of any other
1182   // bytecode translation or inlining nor as a consequence of
1183   // optimizing transforms.
1184   //
1185   // We also want to catch inlined unsafe volatile gets and puts and
1186   // be able to implement them using either ldar<x>/stlr<x> or some
1187   // combination of ldr<x>/stlr<x> and dmb instructions.
1188   //
1189   // Inlined unsafe volatiles puts manifest as a minor variant of the
1190   // normal volatile put node sequence containing an extra cpuorder
1191   // membar
1192   //
1193   //   MemBarRelease
1194   //   MemBarCPUOrder
1195   //   StoreX[mo_release] {CardMark}-optional
1196   //   MemBarVolatile
1197   //
1198   // n.b. as an aside, the cpuorder membar is not itself subject to
1199   // matching and translation by adlc rules.  However, the rule
1200   // predicates need to detect its presence in order to correctly
1201   // select the desired adlc rules.
1202   //
1203   // Inlined unsafe volatile gets manifest as a somewhat different
1204   // node sequence to a normal volatile get
1205   //
1206   //   MemBarCPUOrder
1207   //        ||       \\
1208   //   MemBarAcquire LoadX[mo_acquire]
1209   //        ||
1210   //   MemBarCPUOrder
1211   //
1212   // In this case the acquire membar does not directly depend on the
1213   // load. However, we can be sure that the load is generated from an
1214   // inlined unsafe volatile get if we see it dependent on this unique
1215   // sequence of membar nodes. Similarly, given an acquire membar we
1216   // can know that it was added because of an inlined unsafe volatile
1217   // get if it is fed and feeds a cpuorder membar and if its feed
1218   // membar also feeds an acquiring load.
1219   //
1220   // Finally an inlined (Unsafe) CAS operation is translated to the
1221   // following ideal graph
1222   //
1223   //   MemBarRelease
1224   //   MemBarCPUOrder
1225   //   CompareAndSwapX {CardMark}-optional
1226   //   MemBarCPUOrder
1227   //   MemBarAcquire
1228   //
1229   // So, where we can identify these volatile read and write
1230   // signatures we can choose to plant either of the above two code
1231   // sequences. For a volatile read we can simply plant a normal
1232   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1233   // also choose to inhibit translation of the MemBarAcquire and
1234   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1235   //
1236   // When we recognise a volatile store signature we can choose to
1237   // plant at a dmb ish as a translation for the MemBarRelease, a
1238   // normal str<x> and then a dmb ish for the MemBarVolatile.
1239   // Alternatively, we can inhibit translation of the MemBarRelease
1240   // and MemBarVolatile and instead plant a simple stlr<x>
1241   // instruction.
1242   //
1243   // when we recognise a CAS signature we can choose to plant a dmb
1244   // ish as a translation for the MemBarRelease, the conventional
1245   // macro-instruction sequence for the CompareAndSwap node (which
1246   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1247   // Alternatively, we can elide generation of the dmb instructions
1248   // and plant the alternative CompareAndSwap macro-instruction
1249   // sequence (which uses ldaxr<x>).
1250   //
1251   // Of course, the above only applies when we see these signature
1252   // configurations. We still want to plant dmb instructions in any
1253   // other cases where we may see a MemBarAcquire, MemBarRelease or
1254   // MemBarVolatile. For example, at the end of a constructor which
1255   // writes final/volatile fields we will see a MemBarRelease
1256   // instruction and this needs a 'dmb ish' lest we risk the
1257   // constructed object being visible without making the
1258   // final/volatile field writes visible.
1259   //
1260   // n.b. the translation rules below which rely on detection of the
1261   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1262   // If we see anything other than the signature configurations we
1263   // always just translate the loads and stores to ldr<x> and str<x>
1264   // and translate acquire, release and volatile membars to the
1265   // relevant dmb instructions.
1266   //
1267 
1268   // graph traversal helpers used for volatile put/get and CAS
1269   // optimization
1270 
1271   // 1) general purpose helpers
1272 
1273   // if node n is linked to a parent MemBarNode by an intervening
1274   // Control and Memory ProjNode return the MemBarNode otherwise return
1275   // NULL.
1276   //
1277   // n may only be a Load or a MemBar.
1278 
1279   MemBarNode *parent_membar(const Node *n)
1280   {
1281     Node *ctl = NULL;
1282     Node *mem = NULL;
1283     Node *membar = NULL;
1284 
1285     if (n->is_Load()) {
1286       ctl = n->lookup(LoadNode::Control);
1287       mem = n->lookup(LoadNode::Memory);
1288     } else if (n->is_MemBar()) {
1289       ctl = n->lookup(TypeFunc::Control);
1290       mem = n->lookup(TypeFunc::Memory);
1291     } else {
1292         return NULL;
1293     }
1294 
1295     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1296       return NULL;
1297     }
1298 
1299     membar = ctl->lookup(0);
1300 
1301     if (!membar || !membar->is_MemBar()) {
1302       return NULL;
1303     }
1304 
1305     if (mem->lookup(0) != membar) {
1306       return NULL;
1307     }
1308 
1309     return membar->as_MemBar();
1310   }
1311 
1312   // if n is linked to a child MemBarNode by intervening Control and
1313   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1314 
1315   MemBarNode *child_membar(const MemBarNode *n)
1316   {
1317     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1318     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1319 
1320     // MemBar needs to have both a Ctl and Mem projection
1321     if (! ctl || ! mem)
1322       return NULL;
1323 
1324     MemBarNode *child = NULL;
1325     Node *x;
1326 
1327     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1328       x = ctl->fast_out(i);
1329       // if we see a membar we keep hold of it. we may also see a new
1330       // arena copy of the original but it will appear later
1331       if (x->is_MemBar()) {
1332           child = x->as_MemBar();
1333           break;
1334       }
1335     }
1336 
1337     if (child == NULL) {
1338       return NULL;
1339     }
1340 
1341     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1342       x = mem->fast_out(i);
1343       // if we see a membar we keep hold of it. we may also see a new
1344       // arena copy of the original but it will appear later
1345       if (x == child) {
1346         return child;
1347       }
1348     }
1349     return NULL;
1350   }
1351 
1352   // helper predicate use to filter candidates for a leading memory
1353   // barrier
1354   //
1355   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1356   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1357 
1358   bool leading_membar(const MemBarNode *barrier)
1359   {
1360     int opcode = barrier->Opcode();
1361     // if this is a release membar we are ok
1362     if (opcode == Op_MemBarRelease) {
1363       return true;
1364     }
1365     // if its a cpuorder membar . . .
1366     if (opcode != Op_MemBarCPUOrder) {
1367       return false;
1368     }
1369     // then the parent has to be a release membar
1370     MemBarNode *parent = parent_membar(barrier);
1371     if (!parent) {
1372       return false;
1373     }
1374     opcode = parent->Opcode();
1375     return opcode == Op_MemBarRelease;
1376   }
1377 
1378   // 2) card mark detection helper
1379 
1380   // helper predicate which can be used to detect a volatile membar
1381   // introduced as part of a conditional card mark sequence either by
1382   // G1 or by CMS when UseCondCardMark is true.
1383   //
1384   // membar can be definitively determined to be part of a card mark
1385   // sequence if and only if all the following hold
1386   //
1387   // i) it is a MemBarVolatile
1388   //
1389   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1390   // true
1391   //
1392   // iii) the node's Mem projection feeds a StoreCM node.
1393 
1394   bool is_card_mark_membar(const MemBarNode *barrier)
1395   {
1396     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1397       return false;
1398     }
1399 
1400     if (barrier->Opcode() != Op_MemBarVolatile) {
1401       return false;
1402     }
1403 
1404     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1405 
1406     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1407       Node *y = mem->fast_out(i);
1408       if (y->Opcode() == Op_StoreCM) {
1409         return true;
1410       }
1411     }
1412 
1413     return false;
1414   }
1415 
1416 
1417   // 3) helper predicates to traverse volatile put or CAS graphs which
1418   // may contain GC barrier subgraphs
1419 
1420   // Preamble
1421   // --------
1422   //
1423   // for volatile writes we can omit generating barriers and employ a
1424   // releasing store when we see a node sequence sequence with a
1425   // leading MemBarRelease and a trailing MemBarVolatile as follows
1426   //
1427   //   MemBarRelease
1428   //  {      ||      } -- optional
1429   //  {MemBarCPUOrder}
1430   //         ||     \\
1431   //         ||     StoreX[mo_release]
1432   //         | \     /
1433   //         | MergeMem
1434   //         | /
1435   //   MemBarVolatile
1436   //
1437   // where
1438   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1439   //  | \ and / indicate further routing of the Ctl and Mem feeds
1440   //
1441   // this is the graph we see for non-object stores. however, for a
1442   // volatile Object store (StoreN/P) we may see other nodes below the
1443   // leading membar because of the need for a GC pre- or post-write
1444   // barrier.
1445   //
1446   // with most GC configurations we with see this simple variant which
1447   // includes a post-write barrier card mark.
1448   //
1449   //   MemBarRelease______________________________
1450   //         ||    \\               Ctl \        \\
1451   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1452   //         | \     /                       . . .  /
1453   //         | MergeMem
1454   //         | /
1455   //         ||      /
1456   //   MemBarVolatile
1457   //
1458   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1459   // the object address to an int used to compute the card offset) and
1460   // Ctl+Mem to a StoreB node (which does the actual card mark).
1461   //
1462   // n.b. a StoreCM node will only appear in this configuration when
1463   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1464   // because it implies a requirement to order visibility of the card
1465   // mark (StoreCM) relative to the object put (StoreP/N) using a
1466   // StoreStore memory barrier (arguably this ought to be represented
1467   // explicitly in the ideal graph but that is not how it works). This
1468   // ordering is required for both non-volatile and volatile
1469   // puts. Normally that means we need to translate a StoreCM using
1470   // the sequence
1471   //
1472   //   dmb ishst
1473   //   stlrb
1474   //
1475   // However, in the case of a volatile put if we can recognise this
1476   // configuration and plant an stlr for the object write then we can
1477   // omit the dmb and just plant an strb since visibility of the stlr
1478   // is ordered before visibility of subsequent stores. StoreCM nodes
1479   // also arise when using G1 or using CMS with conditional card
1480   // marking. In these cases (as we shall see) we don't need to insert
1481   // the dmb when translating StoreCM because there is already an
1482   // intervening StoreLoad barrier between it and the StoreP/N.
1483   //
1484   // It is also possible to perform the card mark conditionally on it
1485   // currently being unmarked in which case the volatile put graph
1486   // will look slightly different
1487   //
1488   //   MemBarRelease____________________________________________
1489   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1490   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1491   //         | \     /                              \            |
1492   //         | MergeMem                            . . .      StoreB
1493   //         | /                                                /
1494   //         ||     /
1495   //   MemBarVolatile
1496   //
1497   // It is worth noting at this stage that both the above
1498   // configurations can be uniquely identified by checking that the
1499   // memory flow includes the following subgraph:
1500   //
1501   //   MemBarRelease
1502   //  {MemBarCPUOrder}
1503   //          |  \      . . .
1504   //          |  StoreX[mo_release]  . . .
1505   //          |   /
1506   //         MergeMem
1507   //          |
1508   //   MemBarVolatile
1509   //
1510   // This is referred to as a *normal* subgraph. It can easily be
1511   // detected starting from any candidate MemBarRelease,
1512   // StoreX[mo_release] or MemBarVolatile.
1513   //
1514   // A simple variation on this normal case occurs for an unsafe CAS
1515   // operation. The basic graph for a non-object CAS is
1516   //
1517   //   MemBarRelease
1518   //         ||
1519   //   MemBarCPUOrder
1520   //         ||     \\   . . .
1521   //         ||     CompareAndSwapX
1522   //         ||       |
1523   //         ||     SCMemProj
1524   //         | \     /
1525   //         | MergeMem
1526   //         | /
1527   //   MemBarCPUOrder
1528   //         ||
1529   //   MemBarAcquire
1530   //
1531   // The same basic variations on this arrangement (mutatis mutandis)
1532   // occur when a card mark is introduced. i.e. we se the same basic
1533   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1534   // tail of the graph is a pair comprising a MemBarCPUOrder +
1535   // MemBarAcquire.
1536   //
1537   // So, in the case of a CAS the normal graph has the variant form
1538   //
1539   //   MemBarRelease
1540   //   MemBarCPUOrder
1541   //          |   \      . . .
1542   //          |  CompareAndSwapX  . . .
1543   //          |    |
1544   //          |   SCMemProj
1545   //          |   /  . . .
1546   //         MergeMem
1547   //          |
1548   //   MemBarCPUOrder
1549   //   MemBarAcquire
1550   //
1551   // This graph can also easily be detected starting from any
1552   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1553   //
1554   // the code below uses two helper predicates, leading_to_normal and
1555   // normal_to_leading to identify these normal graphs, one validating
1556   // the layout starting from the top membar and searching down and
1557   // the other validating the layout starting from the lower membar
1558   // and searching up.
1559   //
1560   // There are two special case GC configurations when a normal graph
1561   // may not be generated: when using G1 (which always employs a
1562   // conditional card mark); and when using CMS with conditional card
1563   // marking configured. These GCs are both concurrent rather than
1564   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1565   // graph between the leading and trailing membar nodes, in
1566   // particular enforcing stronger memory serialisation beween the
1567   // object put and the corresponding conditional card mark. CMS
1568   // employs a post-write GC barrier while G1 employs both a pre- and
1569   // post-write GC barrier. Of course the extra nodes may be absent --
1570   // they are only inserted for object puts. This significantly
1571   // complicates the task of identifying whether a MemBarRelease,
1572   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1573   // when using these GC configurations (see below). It adds similar
1574   // complexity to the task of identifying whether a MemBarRelease,
1575   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1576   //
1577   // In both cases the post-write subtree includes an auxiliary
1578   // MemBarVolatile (StoreLoad barrier) separating the object put and
1579   // the read of the corresponding card. This poses two additional
1580   // problems.
1581   //
1582   // Firstly, a card mark MemBarVolatile needs to be distinguished
1583   // from a normal trailing MemBarVolatile. Resolving this first
1584   // problem is straightforward: a card mark MemBarVolatile always
1585   // projects a Mem feed to a StoreCM node and that is a unique marker
1586   //
1587   //      MemBarVolatile (card mark)
1588   //       C |    \     . . .
1589   //         |   StoreCM   . . .
1590   //       . . .
1591   //
1592   // The second problem is how the code generator is to translate the
1593   // card mark barrier? It always needs to be translated to a "dmb
1594   // ish" instruction whether or not it occurs as part of a volatile
1595   // put. A StoreLoad barrier is needed after the object put to ensure
1596   // i) visibility to GC threads of the object put and ii) visibility
1597   // to the mutator thread of any card clearing write by a GC
1598   // thread. Clearly a normal store (str) will not guarantee this
1599   // ordering but neither will a releasing store (stlr). The latter
1600   // guarantees that the object put is visible but does not guarantee
1601   // that writes by other threads have also been observed.
1602   //
1603   // So, returning to the task of translating the object put and the
1604   // leading/trailing membar nodes: what do the non-normal node graph
1605   // look like for these 2 special cases? and how can we determine the
1606   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1607   // in both normal and non-normal cases?
1608   //
1609   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1610   // which selects conditonal execution based on the value loaded
1611   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1612   // intervening StoreLoad barrier (MemBarVolatile).
1613   //
1614   // So, with CMS we may see a node graph for a volatile object store
1615   // which looks like this
1616   //
1617   //   MemBarRelease
1618   //   MemBarCPUOrder_(leading)__________________
1619   //     C |    M \       \\                   C \
1620   //       |       \    StoreN/P[mo_release]  CastP2X
1621   //       |    Bot \    /
1622   //       |       MergeMem
1623   //       |         /
1624   //      MemBarVolatile (card mark)
1625   //     C |  ||    M |
1626   //       | LoadB    |
1627   //       |   |      |
1628   //       | Cmp      |\
1629   //       | /        | \
1630   //       If         |  \
1631   //       | \        |   \
1632   // IfFalse  IfTrue  |    \
1633   //       \     / \  |     \
1634   //        \   / StoreCM    |
1635   //         \ /      |      |
1636   //        Region   . . .   |
1637   //          | \           /
1638   //          |  . . .  \  / Bot
1639   //          |       MergeMem
1640   //          |          |
1641   //        MemBarVolatile (trailing)
1642   //
1643   // The first MergeMem merges the AliasIdxBot Mem slice from the
1644   // leading membar and the oopptr Mem slice from the Store into the
1645   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1646   // Mem slice from the card mark membar and the AliasIdxRaw slice
1647   // from the StoreCM into the trailing membar (n.b. the latter
1648   // proceeds via a Phi associated with the If region).
1649   //
1650   // The graph for a CAS varies slightly, the obvious difference being
1651   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1652   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1653   // MemBarAcquire pair. The other important difference is that the
1654   // CompareAndSwap node's SCMemProj is not merged into the card mark
1655   // membar - it still feeds the trailing MergeMem. This also means
1656   // that the card mark membar receives its Mem feed directly from the
1657   // leading membar rather than via a MergeMem.
1658   //
1659   //   MemBarRelease
1660   //   MemBarCPUOrder__(leading)_________________________
1661   //       ||                       \\                 C \
1662   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1663   //     C |  ||    M |              |
1664   //       | LoadB    |       ______/|
1665   //       |   |      |      /       |
1666   //       | Cmp      |     /      SCMemProj
1667   //       | /        |    /         |
1668   //       If         |   /         /
1669   //       | \        |  /         /
1670   // IfFalse  IfTrue  | /         /
1671   //       \     / \  |/ prec    /
1672   //        \   / StoreCM       /
1673   //         \ /      |        /
1674   //        Region   . . .    /
1675   //          | \            /
1676   //          |  . . .  \   / Bot
1677   //          |       MergeMem
1678   //          |          |
1679   //        MemBarCPUOrder
1680   //        MemBarAcquire (trailing)
1681   //
1682   // This has a slightly different memory subgraph to the one seen
1683   // previously but the core of it is the same as for the CAS normal
1684   // sungraph
1685   //
1686   //   MemBarRelease
1687   //   MemBarCPUOrder____
1688   //      ||             \      . . .
1689   //   MemBarVolatile  CompareAndSwapX  . . .
1690   //      |  \            |
1691   //        . . .   SCMemProj
1692   //          |     /  . . .
1693   //         MergeMem
1694   //          |
1695   //   MemBarCPUOrder
1696   //   MemBarAcquire
1697   //
1698   //
1699   // G1 is quite a lot more complicated. The nodes inserted on behalf
1700   // of G1 may comprise: a pre-write graph which adds the old value to
1701   // the SATB queue; the releasing store itself; and, finally, a
1702   // post-write graph which performs a card mark.
1703   //
1704   // The pre-write graph may be omitted, but only when the put is
1705   // writing to a newly allocated (young gen) object and then only if
1706   // there is a direct memory chain to the Initialize node for the
1707   // object allocation. This will not happen for a volatile put since
1708   // any memory chain passes through the leading membar.
1709   //
1710   // The pre-write graph includes a series of 3 If tests. The outermost
1711   // If tests whether SATB is enabled (no else case). The next If tests
1712   // whether the old value is non-NULL (no else case). The third tests
1713   // whether the SATB queue index is > 0, if so updating the queue. The
1714   // else case for this third If calls out to the runtime to allocate a
1715   // new queue buffer.
1716   //
1717   // So with G1 the pre-write and releasing store subgraph looks like
1718   // this (the nested Ifs are omitted).
1719   //
1720   //  MemBarRelease (leading)____________
1721   //     C |  ||  M \   M \    M \  M \ . . .
1722   //       | LoadB   \  LoadL  LoadN   \
1723   //       | /        \                 \
1724   //       If         |\                 \
1725   //       | \        | \                 \
1726   //  IfFalse  IfTrue |  \                 \
1727   //       |     |    |   \                 |
1728   //       |     If   |   /\                |
1729   //       |     |          \               |
1730   //       |                 \              |
1731   //       |    . . .         \             |
1732   //       | /       | /       |            |
1733   //      Region  Phi[M]       |            |
1734   //       | \       |         |            |
1735   //       |  \_____ | ___     |            |
1736   //     C | C \     |   C \ M |            |
1737   //       | CastP2X | StoreN/P[mo_release] |
1738   //       |         |         |            |
1739   //     C |       M |       M |          M |
1740   //        \        |         |           /
1741   //                  . . .
1742   //          (post write subtree elided)
1743   //                    . . .
1744   //             C \         M /
1745   //         MemBarVolatile (trailing)
1746   //
1747   // n.b. the LoadB in this subgraph is not the card read -- it's a
1748   // read of the SATB queue active flag.
1749   //
1750   // Once again the CAS graph is a minor variant on the above with the
1751   // expected substitutions of CompareAndSawpX for StoreN/P and
1752   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1753   //
1754   // The G1 post-write subtree is also optional, this time when the
1755   // new value being written is either null or can be identified as a
1756   // newly allocated (young gen) object with no intervening control
1757   // flow. The latter cannot happen but the former may, in which case
1758   // the card mark membar is omitted and the memory feeds form the
1759   // leading membar and the SToreN/P are merged direct into the
1760   // trailing membar as per the normal subgraph. So, the only special
1761   // case which arises is when the post-write subgraph is generated.
1762   //
1763   // The kernel of the post-write G1 subgraph is the card mark itself
1764   // which includes a card mark memory barrier (MemBarVolatile), a
1765   // card test (LoadB), and a conditional update (If feeding a
1766   // StoreCM). These nodes are surrounded by a series of nested Ifs
1767   // which try to avoid doing the card mark. The top level If skips if
1768   // the object reference does not cross regions (i.e. it tests if
1769   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1770   // need not be recorded. The next If, which skips on a NULL value,
1771   // may be absent (it is not generated if the type of value is >=
1772   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1773   // checking if card_val != young).  n.b. although this test requires
1774   // a pre-read of the card it can safely be done before the StoreLoad
1775   // barrier. However that does not bypass the need to reread the card
1776   // after the barrier.
1777   //
1778   //                (pre-write subtree elided)
1779   //        . . .                  . . .    . . .  . . .
1780   //        C |                    M |     M |    M |
1781   //       Region                  Phi[M] StoreN    |
1782   //          |                     / \      |      |
1783   //         / \_______            /   \     |      |
1784   //      C / C \      . . .            \    |      |
1785   //       If   CastP2X . . .            |   |      |
1786   //       / \                           |   |      |
1787   //      /   \                          |   |      |
1788   // IfFalse IfTrue                      |   |      |
1789   //   |       |                         |   |     /|
1790   //   |       If                        |   |    / |
1791   //   |      / \                        |   |   /  |
1792   //   |     /   \                        \  |  /   |
1793   //   | IfFalse IfTrue                   MergeMem  |
1794   //   |  . . .    / \                       /      |
1795   //   |          /   \                     /       |
1796   //   |     IfFalse IfTrue                /        |
1797   //   |      . . .    |                  /         |
1798   //   |               If                /          |
1799   //   |               / \              /           |
1800   //   |              /   \            /            |
1801   //   |         IfFalse IfTrue       /             |
1802   //   |           . . .   |         /              |
1803   //   |                    \       /               |
1804   //   |                     \     /                |
1805   //   |             MemBarVolatile__(card mark)    |
1806   //   |                ||   C |  M \  M \          |
1807   //   |               LoadB   If    |    |         |
1808   //   |                      / \    |    |         |
1809   //   |                     . . .   |    |         |
1810   //   |                          \  |    |        /
1811   //   |                        StoreCM   |       /
1812   //   |                          . . .   |      /
1813   //   |                        _________/      /
1814   //   |                       /  _____________/
1815   //   |   . . .       . . .  |  /            /
1816   //   |    |                 | /   _________/
1817   //   |    |               Phi[M] /        /
1818   //   |    |                 |   /        /
1819   //   |    |                 |  /        /
1820   //   |  Region  . . .     Phi[M]  _____/
1821   //   |    /                 |    /
1822   //   |                      |   /
1823   //   | . . .   . . .        |  /
1824   //   | /                    | /
1825   // Region           |  |  Phi[M]
1826   //   |              |  |  / Bot
1827   //    \            MergeMem
1828   //     \            /
1829   //     MemBarVolatile
1830   //
1831   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1832   // from the leading membar and the oopptr Mem slice from the Store
1833   // into the card mark membar i.e. the memory flow to the card mark
1834   // membar still looks like a normal graph.
1835   //
1836   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1837   // Mem slices (from the StoreCM and other card mark queue stores).
1838   // However in this case the AliasIdxBot Mem slice does not come
1839   // direct from the card mark membar. It is merged through a series
1840   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1841   // from the leading membar with the Mem feed from the card mark
1842   // membar. Each Phi corresponds to one of the Ifs which may skip
1843   // around the card mark membar. So when the If implementing the NULL
1844   // value check has been elided the total number of Phis is 2
1845   // otherwise it is 3.
1846   //
1847   // The CAS graph when using G1GC also includes a pre-write subgraph
1848   // and an optional post-write subgraph. Teh sam evarioations are
1849   // introduced as for CMS with conditional card marking i.e. the
1850   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1851   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1852   // Mem feed from the CompareAndSwapP/N includes a precedence
1853   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1854   // trailing membar. So, as before the configuration includes the
1855   // normal CAS graph as a subgraph of the memory flow.
1856   //
1857   // So, the upshot is that in all cases the volatile put graph will
1858   // include a *normal* memory subgraph betwen the leading membar and
1859   // its child membar, either a volatile put graph (including a
1860   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1861   // When that child is not a card mark membar then it marks the end
1862   // of the volatile put or CAS subgraph. If the child is a card mark
1863   // membar then the normal subgraph will form part of a volatile put
1864   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1865   // to a trailing barrier via a MergeMem. That feed is either direct
1866   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1867   // memory flow (for G1).
1868   //
1869   // The predicates controlling generation of instructions for store
1870   // and barrier nodes employ a few simple helper functions (described
1871   // below) which identify the presence or absence of all these
1872   // subgraph configurations and provide a means of traversing from
1873   // one node in the subgraph to another.
1874 
1875   // is_CAS(int opcode)
1876   //
1877   // return true if opcode is one of the possible CompareAndSwapX
1878   // values otherwise false.
1879 
1880   bool is_CAS(int opcode)
1881   {
1882     switch(opcode) {
1883       // We handle these
1884     case Op_CompareAndSwapI:
1885     case Op_CompareAndSwapL:
1886     case Op_CompareAndSwapP:
1887     case Op_CompareAndSwapN:
1888  // case Op_CompareAndSwapB:
1889  // case Op_CompareAndSwapS:
1890       return true;
1891       // These are TBD
1892     case Op_WeakCompareAndSwapB:
1893     case Op_WeakCompareAndSwapS:
1894     case Op_WeakCompareAndSwapI:
1895     case Op_WeakCompareAndSwapL:
1896     case Op_WeakCompareAndSwapP:
1897     case Op_WeakCompareAndSwapN:
1898     case Op_CompareAndExchangeB:
1899     case Op_CompareAndExchangeS:
1900     case Op_CompareAndExchangeI:
1901     case Op_CompareAndExchangeL:
1902     case Op_CompareAndExchangeP:
1903     case Op_CompareAndExchangeN:
1904       return false;
1905     default:
1906       return false;
1907     }
1908   }
1909 
1910 
1911   // leading_to_normal
1912   //
1913   //graph traversal helper which detects the normal case Mem feed from
1914   // a release membar (or, optionally, its cpuorder child) to a
1915   // dependent volatile membar i.e. it ensures that one or other of
1916   // the following Mem flow subgraph is present.
1917   //
1918   //   MemBarRelease
1919   //   MemBarCPUOrder {leading}
1920   //          |  \      . . .
1921   //          |  StoreN/P[mo_release]  . . .
1922   //          |   /
1923   //         MergeMem
1924   //          |
1925   //   MemBarVolatile {trailing or card mark}
1926   //
1927   //   MemBarRelease
1928   //   MemBarCPUOrder {leading}
1929   //      |       \      . . .
1930   //      |     CompareAndSwapX  . . .
1931   //               |
1932   //     . . .    SCMemProj
1933   //           \   |
1934   //      |    MergeMem
1935   //      |       /
1936   //    MemBarCPUOrder
1937   //    MemBarAcquire {trailing}
1938   //
1939   // if the correct configuration is present returns the trailing
1940   // membar otherwise NULL.
1941   //
1942   // the input membar is expected to be either a cpuorder membar or a
1943   // release membar. in the latter case it should not have a cpu membar
1944   // child.
1945   //
1946   // the returned value may be a card mark or trailing membar
1947   //
1948 
1949   MemBarNode *leading_to_normal(MemBarNode *leading)
1950   {
1951     assert((leading->Opcode() == Op_MemBarRelease ||
1952             leading->Opcode() == Op_MemBarCPUOrder),
1953            "expecting a volatile or cpuroder membar!");
1954 
1955     // check the mem flow
1956     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1957 
1958     if (!mem) {
1959       return NULL;
1960     }
1961 
1962     Node *x = NULL;
1963     StoreNode * st = NULL;
1964     LoadStoreNode *cas = NULL;
1965     MergeMemNode *mm = NULL;
1966 
1967     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1968       x = mem->fast_out(i);
1969       if (x->is_MergeMem()) {
1970         if (mm != NULL) {
1971           return NULL;
1972         }
1973         // two merge mems is one too many
1974         mm = x->as_MergeMem();
1975       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1976         // two releasing stores/CAS nodes is one too many
1977         if (st != NULL || cas != NULL) {
1978           return NULL;
1979         }
1980         st = x->as_Store();
1981       } else if (is_CAS(x->Opcode())) {
1982         if (st != NULL || cas != NULL) {
1983           return NULL;
1984         }
1985         cas = x->as_LoadStore();
1986       }
1987     }
1988 
1989     // must have a store or a cas
1990     if (!st && !cas) {
1991       return NULL;
1992     }
1993 
1994     // must have a merge if we also have st
1995     if (st && !mm) {
1996       return NULL;
1997     }
1998 
1999     Node *y = NULL;
2000     if (cas) {
2001       // look for an SCMemProj
2002       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2003         x = cas->fast_out(i);
2004         if (x->is_Proj()) {
2005           y = x;
2006           break;
2007         }
2008       }
2009       if (y == NULL) {
2010         return NULL;
2011       }
2012       // the proj must feed a MergeMem
2013       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2014         x = y->fast_out(i);
2015         if (x->is_MergeMem()) {
2016           mm = x->as_MergeMem();
2017           break;
2018         }
2019       }
2020       if (mm == NULL)
2021         return NULL;
2022     } else {
2023       // ensure the store feeds the existing mergemem;
2024       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2025         if (st->fast_out(i) == mm) {
2026           y = st;
2027           break;
2028         }
2029       }
2030       if (y == NULL) {
2031         return NULL;
2032       }
2033     }
2034 
2035     MemBarNode *mbar = NULL;
2036     // ensure the merge feeds to the expected type of membar
2037     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2038       x = mm->fast_out(i);
2039       if (x->is_MemBar()) {
2040         int opcode = x->Opcode();
2041         if (opcode == Op_MemBarVolatile && st) {
2042           mbar = x->as_MemBar();
2043         } else if (cas && opcode == Op_MemBarCPUOrder) {
2044           MemBarNode *y =  x->as_MemBar();
2045           y = child_membar(y);
2046           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2047             mbar = y;
2048           }
2049         }
2050         break;
2051       }
2052     }
2053 
2054     return mbar;
2055   }
2056 
2057   // normal_to_leading
2058   //
2059   // graph traversal helper which detects the normal case Mem feed
2060   // from either a card mark or a trailing membar to a preceding
2061   // release membar (optionally its cpuorder child) i.e. it ensures
2062   // that one or other of the following Mem flow subgraphs is present.
2063   //
2064   //   MemBarRelease
2065   //   MemBarCPUOrder {leading}
2066   //          |  \      . . .
2067   //          |  StoreN/P[mo_release]  . . .
2068   //          |   /
2069   //         MergeMem
2070   //          |
2071   //   MemBarVolatile {card mark or trailing}
2072   //
2073   //   MemBarRelease
2074   //   MemBarCPUOrder {leading}
2075   //      |       \      . . .
2076   //      |     CompareAndSwapX  . . .
2077   //               |
2078   //     . . .    SCMemProj
2079   //           \   |
2080   //      |    MergeMem
2081   //      |        /
2082   //    MemBarCPUOrder
2083   //    MemBarAcquire {trailing}
2084   //
2085   // this predicate checks for the same flow as the previous predicate
2086   // but starting from the bottom rather than the top.
2087   //
2088   // if the configuration is present returns the cpuorder member for
2089   // preference or when absent the release membar otherwise NULL.
2090   //
2091   // n.b. the input membar is expected to be a MemBarVolatile but
2092   // need not be a card mark membar.
2093 
2094   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2095   {
2096     // input must be a volatile membar
2097     assert((barrier->Opcode() == Op_MemBarVolatile ||
2098             barrier->Opcode() == Op_MemBarAcquire),
2099            "expecting a volatile or an acquire membar");
2100     Node *x;
2101     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2102 
2103     // if we have an acquire membar then it must be fed via a CPUOrder
2104     // membar
2105 
2106     if (is_cas) {
2107       // skip to parent barrier which must be a cpuorder
2108       x = parent_membar(barrier);
2109       if (x->Opcode() != Op_MemBarCPUOrder)
2110         return NULL;
2111     } else {
2112       // start from the supplied barrier
2113       x = (Node *)barrier;
2114     }
2115 
2116     // the Mem feed to the membar should be a merge
2117     x = x ->in(TypeFunc::Memory);
2118     if (!x->is_MergeMem())
2119       return NULL;
2120 
2121     MergeMemNode *mm = x->as_MergeMem();
2122 
2123     if (is_cas) {
2124       // the merge should be fed from the CAS via an SCMemProj node
2125       x = NULL;
2126       for (uint idx = 1; idx < mm->req(); idx++) {
2127         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2128           x = mm->in(idx);
2129           break;
2130         }
2131       }
2132       if (x == NULL) {
2133         return NULL;
2134       }
2135       // check for a CAS feeding this proj
2136       x = x->in(0);
2137       int opcode = x->Opcode();
2138       if (!is_CAS(opcode)) {
2139         return NULL;
2140       }
2141       // the CAS should get its mem feed from the leading membar
2142       x = x->in(MemNode::Memory);
2143     } else {
2144       // the merge should get its Bottom mem feed from the leading membar
2145       x = mm->in(Compile::AliasIdxBot);
2146     }
2147 
2148     // ensure this is a non control projection
2149     if (!x->is_Proj() || x->is_CFG()) {
2150       return NULL;
2151     }
2152     // if it is fed by a membar that's the one we want
2153     x = x->in(0);
2154 
2155     if (!x->is_MemBar()) {
2156       return NULL;
2157     }
2158 
2159     MemBarNode *leading = x->as_MemBar();
2160     // reject invalid candidates
2161     if (!leading_membar(leading)) {
2162       return NULL;
2163     }
2164 
2165     // ok, we have a leading membar, now for the sanity clauses
2166 
2167     // the leading membar must feed Mem to a releasing store or CAS
2168     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2169     StoreNode *st = NULL;
2170     LoadStoreNode *cas = NULL;
2171     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2172       x = mem->fast_out(i);
2173       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2174         // two stores or CASes is one too many
2175         if (st != NULL || cas != NULL) {
2176           return NULL;
2177         }
2178         st = x->as_Store();
2179       } else if (is_CAS(x->Opcode())) {
2180         if (st != NULL || cas != NULL) {
2181           return NULL;
2182         }
2183         cas = x->as_LoadStore();
2184       }
2185     }
2186 
2187     // we should not have both a store and a cas
2188     if (st == NULL & cas == NULL) {
2189       return NULL;
2190     }
2191 
2192     if (st == NULL) {
2193       // nothing more to check
2194       return leading;
2195     } else {
2196       // we should not have a store if we started from an acquire
2197       if (is_cas) {
2198         return NULL;
2199       }
2200 
2201       // the store should feed the merge we used to get here
2202       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2203         if (st->fast_out(i) == mm) {
2204           return leading;
2205         }
2206       }
2207     }
2208 
2209     return NULL;
2210   }
2211 
2212   // card_mark_to_trailing
2213   //
2214   // graph traversal helper which detects extra, non-normal Mem feed
2215   // from a card mark volatile membar to a trailing membar i.e. it
2216   // ensures that one of the following three GC post-write Mem flow
2217   // subgraphs is present.
2218   //
2219   // 1)
2220   //     . . .
2221   //       |
2222   //   MemBarVolatile (card mark)
2223   //      |          |
2224   //      |        StoreCM
2225   //      |          |
2226   //      |        . . .
2227   //  Bot |  /
2228   //   MergeMem
2229   //      |
2230   //      |
2231   //    MemBarVolatile {trailing}
2232   //
2233   // 2)
2234   //   MemBarRelease/CPUOrder (leading)
2235   //    |
2236   //    |
2237   //    |\       . . .
2238   //    | \        |
2239   //    |  \  MemBarVolatile (card mark)
2240   //    |   \   |     |
2241   //     \   \  |   StoreCM    . . .
2242   //      \   \ |
2243   //       \  Phi
2244   //        \ /
2245   //        Phi  . . .
2246   //     Bot |   /
2247   //       MergeMem
2248   //         |
2249   //    MemBarVolatile {trailing}
2250   //
2251   //
2252   // 3)
2253   //   MemBarRelease/CPUOrder (leading)
2254   //    |
2255   //    |\
2256   //    | \
2257   //    |  \      . . .
2258   //    |   \       |
2259   //    |\   \  MemBarVolatile (card mark)
2260   //    | \   \   |     |
2261   //    |  \   \  |   StoreCM    . . .
2262   //    |   \   \ |
2263   //     \   \  Phi
2264   //      \   \ /
2265   //       \  Phi
2266   //        \ /
2267   //        Phi  . . .
2268   //     Bot |   /
2269   //       MergeMem
2270   //         |
2271   //         |
2272   //    MemBarVolatile {trailing}
2273   //
2274   // configuration 1 is only valid if UseConcMarkSweepGC &&
2275   // UseCondCardMark
2276   //
2277   // configurations 2 and 3 are only valid if UseG1GC.
2278   //
2279   // if a valid configuration is present returns the trailing membar
2280   // otherwise NULL.
2281   //
2282   // n.b. the supplied membar is expected to be a card mark
2283   // MemBarVolatile i.e. the caller must ensure the input node has the
2284   // correct operand and feeds Mem to a StoreCM node
2285 
2286   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2287   {
2288     // input must be a card mark volatile membar
2289     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2290 
2291     Node *feed = barrier->proj_out(TypeFunc::Memory);
2292     Node *x;
2293     MergeMemNode *mm = NULL;
2294 
2295     const int MAX_PHIS = 3;     // max phis we will search through
2296     int phicount = 0;           // current search count
2297 
2298     bool retry_feed = true;
2299     while (retry_feed) {
2300       // see if we have a direct MergeMem feed
2301       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2302         x = feed->fast_out(i);
2303         // the correct Phi will be merging a Bot memory slice
2304         if (x->is_MergeMem()) {
2305           mm = x->as_MergeMem();
2306           break;
2307         }
2308       }
2309       if (mm) {
2310         retry_feed = false;
2311       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2312         // the barrier may feed indirectly via one or two Phi nodes
2313         PhiNode *phi = NULL;
2314         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2315           x = feed->fast_out(i);
2316           // the correct Phi will be merging a Bot memory slice
2317           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2318             phi = x->as_Phi();
2319             break;
2320           }
2321         }
2322         if (!phi) {
2323           return NULL;
2324         }
2325         // look for another merge below this phi
2326         feed = phi;
2327       } else {
2328         // couldn't find a merge
2329         return NULL;
2330       }
2331     }
2332 
2333     // sanity check this feed turns up as the expected slice
2334     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2335 
2336     MemBarNode *trailing = NULL;
2337     // be sure we have a trailing membar the merge
2338     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2339       x = mm->fast_out(i);
2340       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2341         trailing = x->as_MemBar();
2342         break;
2343       }
2344     }
2345 
2346     return trailing;
2347   }
2348 
2349   // trailing_to_card_mark
2350   //
2351   // graph traversal helper which detects extra, non-normal Mem feed
2352   // from a trailing volatile membar to a preceding card mark volatile
2353   // membar i.e. it identifies whether one of the three possible extra
2354   // GC post-write Mem flow subgraphs is present
2355   //
2356   // this predicate checks for the same flow as the previous predicate
2357   // but starting from the bottom rather than the top.
2358   //
2359   // if the configuration is present returns the card mark membar
2360   // otherwise NULL
2361   //
2362   // n.b. the supplied membar is expected to be a trailing
2363   // MemBarVolatile i.e. the caller must ensure the input node has the
2364   // correct opcode
2365 
2366   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2367   {
2368     assert(trailing->Opcode() == Op_MemBarVolatile,
2369            "expecting a volatile membar");
2370     assert(!is_card_mark_membar(trailing),
2371            "not expecting a card mark membar");
2372 
2373     // the Mem feed to the membar should be a merge
2374     Node *x = trailing->in(TypeFunc::Memory);
2375     if (!x->is_MergeMem()) {
2376       return NULL;
2377     }
2378 
2379     MergeMemNode *mm = x->as_MergeMem();
2380 
2381     x = mm->in(Compile::AliasIdxBot);
2382     // with G1 we may possibly see a Phi or two before we see a Memory
2383     // Proj from the card mark membar
2384 
2385     const int MAX_PHIS = 3;     // max phis we will search through
2386     int phicount = 0;           // current search count
2387 
2388     bool retry_feed = !x->is_Proj();
2389 
2390     while (retry_feed) {
2391       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2392         PhiNode *phi = x->as_Phi();
2393         ProjNode *proj = NULL;
2394         PhiNode *nextphi = NULL;
2395         bool found_leading = false;
2396         for (uint i = 1; i < phi->req(); i++) {
2397           x = phi->in(i);
2398           if (x->is_Phi()) {
2399             nextphi = x->as_Phi();
2400           } else if (x->is_Proj()) {
2401             int opcode = x->in(0)->Opcode();
2402             if (opcode == Op_MemBarVolatile) {
2403               proj = x->as_Proj();
2404             } else if (opcode == Op_MemBarRelease ||
2405                        opcode == Op_MemBarCPUOrder) {
2406               // probably a leading membar
2407               found_leading = true;
2408             }
2409           }
2410         }
2411         // if we found a correct looking proj then retry from there
2412         // otherwise we must see a leading and a phi or this the
2413         // wrong config
2414         if (proj != NULL) {
2415           x = proj;
2416           retry_feed = false;
2417         } else if (found_leading && nextphi != NULL) {
2418           // retry from this phi to check phi2
2419           x = nextphi;
2420         } else {
2421           // not what we were looking for
2422           return NULL;
2423         }
2424       } else {
2425         return NULL;
2426       }
2427     }
2428     // the proj has to come from the card mark membar
2429     x = x->in(0);
2430     if (!x->is_MemBar()) {
2431       return NULL;
2432     }
2433 
2434     MemBarNode *card_mark_membar = x->as_MemBar();
2435 
2436     if (!is_card_mark_membar(card_mark_membar)) {
2437       return NULL;
2438     }
2439 
2440     return card_mark_membar;
2441   }
2442 
2443   // trailing_to_leading
2444   //
2445   // graph traversal helper which checks the Mem flow up the graph
2446   // from a (non-card mark) trailing membar attempting to locate and
2447   // return an associated leading membar. it first looks for a
2448   // subgraph in the normal configuration (relying on helper
2449   // normal_to_leading). failing that it then looks for one of the
2450   // possible post-write card mark subgraphs linking the trailing node
2451   // to a the card mark membar (relying on helper
2452   // trailing_to_card_mark), and then checks that the card mark membar
2453   // is fed by a leading membar (once again relying on auxiliary
2454   // predicate normal_to_leading).
2455   //
2456   // if the configuration is valid returns the cpuorder member for
2457   // preference or when absent the release membar otherwise NULL.
2458   //
2459   // n.b. the input membar is expected to be either a volatile or
2460   // acquire membar but in the former case must *not* be a card mark
2461   // membar.
2462 
2463   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2464   {
2465     assert((trailing->Opcode() == Op_MemBarAcquire ||
2466             trailing->Opcode() == Op_MemBarVolatile),
2467            "expecting an acquire or volatile membar");
2468     assert((trailing->Opcode() != Op_MemBarVolatile ||
2469             !is_card_mark_membar(trailing)),
2470            "not expecting a card mark membar");
2471 
2472     MemBarNode *leading = normal_to_leading(trailing);
2473 
2474     if (leading) {
2475       return leading;
2476     }
2477 
2478     // nothing more to do if this is an acquire
2479     if (trailing->Opcode() == Op_MemBarAcquire) {
2480       return NULL;
2481     }
2482 
2483     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2484 
2485     if (!card_mark_membar) {
2486       return NULL;
2487     }
2488 
2489     return normal_to_leading(card_mark_membar);
2490   }
2491 
2492   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2493 
2494 bool unnecessary_acquire(const Node *barrier)
2495 {
2496   assert(barrier->is_MemBar(), "expecting a membar");
2497 
2498   if (UseBarriersForVolatile) {
2499     // we need to plant a dmb
2500     return false;
2501   }
2502 
2503   // a volatile read derived from bytecode (or also from an inlined
2504   // SHA field read via LibraryCallKit::load_field_from_object)
2505   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2506   // with a bogus read dependency on it's preceding load. so in those
2507   // cases we will find the load node at the PARMS offset of the
2508   // acquire membar.  n.b. there may be an intervening DecodeN node.
2509   //
2510   // a volatile load derived from an inlined unsafe field access
2511   // manifests as a cpuorder membar with Ctl and Mem projections
2512   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2513   // acquire then feeds another cpuorder membar via Ctl and Mem
2514   // projections. The load has no output dependency on these trailing
2515   // membars because subsequent nodes inserted into the graph take
2516   // their control feed from the final membar cpuorder meaning they
2517   // are all ordered after the load.
2518 
2519   Node *x = barrier->lookup(TypeFunc::Parms);
2520   if (x) {
2521     // we are starting from an acquire and it has a fake dependency
2522     //
2523     // need to check for
2524     //
2525     //   LoadX[mo_acquire]
2526     //   {  |1   }
2527     //   {DecodeN}
2528     //      |Parms
2529     //   MemBarAcquire*
2530     //
2531     // where * tags node we were passed
2532     // and |k means input k
2533     if (x->is_DecodeNarrowPtr()) {
2534       x = x->in(1);
2535     }
2536 
2537     return (x->is_Load() && x->as_Load()->is_acquire());
2538   }
2539 
2540   // now check for an unsafe volatile get
2541 
2542   // need to check for
2543   //
2544   //   MemBarCPUOrder
2545   //        ||       \\
2546   //   MemBarAcquire* LoadX[mo_acquire]
2547   //        ||
2548   //   MemBarCPUOrder
2549   //
2550   // where * tags node we were passed
2551   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2552 
2553   // check for a parent MemBarCPUOrder
2554   ProjNode *ctl;
2555   ProjNode *mem;
2556   MemBarNode *parent = parent_membar(barrier);
2557   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2558     return false;
2559   ctl = parent->proj_out(TypeFunc::Control);
2560   mem = parent->proj_out(TypeFunc::Memory);
2561   if (!ctl || !mem) {
2562     return false;
2563   }
2564   // ensure the proj nodes both feed a LoadX[mo_acquire]
2565   LoadNode *ld = NULL;
2566   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2567     x = ctl->fast_out(i);
2568     // if we see a load we keep hold of it and stop searching
2569     if (x->is_Load()) {
2570       ld = x->as_Load();
2571       break;
2572     }
2573   }
2574   // it must be an acquiring load
2575   if (ld && ld->is_acquire()) {
2576 
2577     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2578       x = mem->fast_out(i);
2579       // if we see the same load we drop it and stop searching
2580       if (x == ld) {
2581         ld = NULL;
2582         break;
2583       }
2584     }
2585     // we must have dropped the load
2586     if (ld == NULL) {
2587       // check for a child cpuorder membar
2588       MemBarNode *child  = child_membar(barrier->as_MemBar());
2589       if (child && child->Opcode() == Op_MemBarCPUOrder)
2590         return true;
2591     }
2592   }
2593 
2594   // final option for unnecessary mebar is that it is a trailing node
2595   // belonging to a CAS
2596 
2597   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2598 
2599   return leading != NULL;
2600 }
2601 
2602 bool needs_acquiring_load(const Node *n)
2603 {
2604   assert(n->is_Load(), "expecting a load");
2605   if (UseBarriersForVolatile) {
2606     // we use a normal load and a dmb
2607     return false;
2608   }
2609 
2610   LoadNode *ld = n->as_Load();
2611 
2612   if (!ld->is_acquire()) {
2613     return false;
2614   }
2615 
2616   // check if this load is feeding an acquire membar
2617   //
2618   //   LoadX[mo_acquire]
2619   //   {  |1   }
2620   //   {DecodeN}
2621   //      |Parms
2622   //   MemBarAcquire*
2623   //
2624   // where * tags node we were passed
2625   // and |k means input k
2626 
2627   Node *start = ld;
2628   Node *mbacq = NULL;
2629 
2630   // if we hit a DecodeNarrowPtr we reset the start node and restart
2631   // the search through the outputs
2632  restart:
2633 
2634   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2635     Node *x = start->fast_out(i);
2636     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2637       mbacq = x;
2638     } else if (!mbacq &&
2639                (x->is_DecodeNarrowPtr() ||
2640                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2641       start = x;
2642       goto restart;
2643     }
2644   }
2645 
2646   if (mbacq) {
2647     return true;
2648   }
2649 
2650   // now check for an unsafe volatile get
2651 
2652   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2653   //
2654   //     MemBarCPUOrder
2655   //        ||       \\
2656   //   MemBarAcquire* LoadX[mo_acquire]
2657   //        ||
2658   //   MemBarCPUOrder
2659 
2660   MemBarNode *membar;
2661 
2662   membar = parent_membar(ld);
2663 
2664   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2665     return false;
2666   }
2667 
2668   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2669 
2670   membar = child_membar(membar);
2671 
2672   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2673     return false;
2674   }
2675 
2676   membar = child_membar(membar);
2677 
2678   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2679     return false;
2680   }
2681 
2682   return true;
2683 }
2684 
2685 bool unnecessary_release(const Node *n)
2686 {
2687   assert((n->is_MemBar() &&
2688           n->Opcode() == Op_MemBarRelease),
2689          "expecting a release membar");
2690 
2691   if (UseBarriersForVolatile) {
2692     // we need to plant a dmb
2693     return false;
2694   }
2695 
2696   // if there is a dependent CPUOrder barrier then use that as the
2697   // leading
2698 
2699   MemBarNode *barrier = n->as_MemBar();
2700   // check for an intervening cpuorder membar
2701   MemBarNode *b = child_membar(barrier);
2702   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2703     // ok, so start the check from the dependent cpuorder barrier
2704     barrier = b;
2705   }
2706 
2707   // must start with a normal feed
2708   MemBarNode *child_barrier = leading_to_normal(barrier);
2709 
2710   if (!child_barrier) {
2711     return false;
2712   }
2713 
2714   if (!is_card_mark_membar(child_barrier)) {
2715     // this is the trailing membar and we are done
2716     return true;
2717   }
2718 
2719   // must be sure this card mark feeds a trailing membar
2720   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2721   return (trailing != NULL);
2722 }
2723 
2724 bool unnecessary_volatile(const Node *n)
2725 {
2726   // assert n->is_MemBar();
2727   if (UseBarriersForVolatile) {
2728     // we need to plant a dmb
2729     return false;
2730   }
2731 
2732   MemBarNode *mbvol = n->as_MemBar();
2733 
2734   // first we check if this is part of a card mark. if so then we have
2735   // to generate a StoreLoad barrier
2736 
2737   if (is_card_mark_membar(mbvol)) {
2738       return false;
2739   }
2740 
2741   // ok, if it's not a card mark then we still need to check if it is
2742   // a trailing membar of a volatile put hgraph.
2743 
2744   return (trailing_to_leading(mbvol) != NULL);
2745 }
2746 
2747 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2748 
2749 bool needs_releasing_store(const Node *n)
2750 {
2751   // assert n->is_Store();
2752   if (UseBarriersForVolatile) {
2753     // we use a normal store and dmb combination
2754     return false;
2755   }
2756 
2757   StoreNode *st = n->as_Store();
2758 
2759   // the store must be marked as releasing
2760   if (!st->is_release()) {
2761     return false;
2762   }
2763 
2764   // the store must be fed by a membar
2765 
2766   Node *x = st->lookup(StoreNode::Memory);
2767 
2768   if (! x || !x->is_Proj()) {
2769     return false;
2770   }
2771 
2772   ProjNode *proj = x->as_Proj();
2773 
2774   x = proj->lookup(0);
2775 
2776   if (!x || !x->is_MemBar()) {
2777     return false;
2778   }
2779 
2780   MemBarNode *barrier = x->as_MemBar();
2781 
2782   // if the barrier is a release membar or a cpuorder mmebar fed by a
2783   // release membar then we need to check whether that forms part of a
2784   // volatile put graph.
2785 
2786   // reject invalid candidates
2787   if (!leading_membar(barrier)) {
2788     return false;
2789   }
2790 
2791   // does this lead a normal subgraph?
2792   MemBarNode *mbvol = leading_to_normal(barrier);
2793 
2794   if (!mbvol) {
2795     return false;
2796   }
2797 
2798   // all done unless this is a card mark
2799   if (!is_card_mark_membar(mbvol)) {
2800     return true;
2801   }
2802 
2803   // we found a card mark -- just make sure we have a trailing barrier
2804 
2805   return (card_mark_to_trailing(mbvol) != NULL);
2806 }
2807 
2808 // predicate controlling translation of CAS
2809 //
2810 // returns true if CAS needs to use an acquiring load otherwise false
2811 
2812 bool needs_acquiring_load_exclusive(const Node *n)
2813 {
2814   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2815   if (UseBarriersForVolatile) {
2816     return false;
2817   }
2818 
2819   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2820 #ifdef ASSERT
2821   LoadStoreNode *st = n->as_LoadStore();
2822 
2823   // the store must be fed by a membar
2824 
2825   Node *x = st->lookup(StoreNode::Memory);
2826 
2827   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2828 
2829   ProjNode *proj = x->as_Proj();
2830 
2831   x = proj->lookup(0);
2832 
2833   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2834 
2835   MemBarNode *barrier = x->as_MemBar();
2836 
2837   // the barrier must be a cpuorder mmebar fed by a release membar
2838 
2839   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2840          "CAS not fed by cpuorder membar!");
2841 
2842   MemBarNode *b = parent_membar(barrier);
2843   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2844           "CAS not fed by cpuorder+release membar pair!");
2845 
2846   // does this lead a normal subgraph?
2847   MemBarNode *mbar = leading_to_normal(barrier);
2848 
2849   assert(mbar != NULL, "CAS not embedded in normal graph!");
2850 
2851   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2852 #endif // ASSERT
2853   // so we can just return true here
2854   return true;
2855 }
2856 
2857 // predicate controlling translation of StoreCM
2858 //
2859 // returns true if a StoreStore must precede the card write otherwise
2860 // false
2861 
2862 bool unnecessary_storestore(const Node *storecm)
2863 {
2864   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2865 
2866   // we only ever need to generate a dmb ishst between an object put
2867   // and the associated card mark when we are using CMS without
2868   // conditional card marking
2869 
2870   if (!UseConcMarkSweepGC || UseCondCardMark) {
2871     return true;
2872   }
2873 
2874   // if we are implementing volatile puts using barriers then the
2875   // object put as an str so we must insert the dmb ishst
2876 
2877   if (UseBarriersForVolatile) {
2878     return false;
2879   }
2880 
2881   // we can omit the dmb ishst if this StoreCM is part of a volatile
2882   // put because in thta case the put will be implemented by stlr
2883   //
2884   // we need to check for a normal subgraph feeding this StoreCM.
2885   // that means the StoreCM must be fed Memory from a leading membar,
2886   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2887   // leading membar must be part of a normal subgraph
2888 
2889   Node *x = storecm->in(StoreNode::Memory);
2890 
2891   if (!x->is_Proj()) {
2892     return false;
2893   }
2894 
2895   x = x->in(0);
2896 
2897   if (!x->is_MemBar()) {
2898     return false;
2899   }
2900 
2901   MemBarNode *leading = x->as_MemBar();
2902 
2903   // reject invalid candidates
2904   if (!leading_membar(leading)) {
2905     return false;
2906   }
2907 
2908   // we can omit the StoreStore if it is the head of a normal subgraph
2909   return (leading_to_normal(leading) != NULL);
2910 }
2911 
2912 
2913 #define __ _masm.
2914 
2915 // advance declarations for helper functions to convert register
2916 // indices to register objects
2917 
2918 // the ad file has to provide implementations of certain methods
2919 // expected by the generic code
2920 //
2921 // REQUIRED FUNCTIONALITY
2922 
2923 //=============================================================================
2924 
2925 // !!!!! Special hack to get all types of calls to specify the byte offset
2926 //       from the start of the call to the point where the return address
2927 //       will point.
2928 
2929 int MachCallStaticJavaNode::ret_addr_offset()
2930 {
2931   // call should be a simple bl
2932   int off = 4;
2933   return off;
2934 }
2935 
2936 int MachCallDynamicJavaNode::ret_addr_offset()
2937 {
2938   return 16; // movz, movk, movk, bl
2939 }
2940 
2941 int MachCallRuntimeNode::ret_addr_offset() {
2942   // for generated stubs the call will be
2943   //   far_call(addr)
2944   // for real runtime callouts it will be six instructions
2945   // see aarch64_enc_java_to_runtime
2946   //   adr(rscratch2, retaddr)
2947   //   lea(rscratch1, RuntimeAddress(addr)
2948   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2949   //   blrt rscratch1
2950   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2951   if (cb) {
2952     return MacroAssembler::far_branch_size();
2953   } else {
2954     return 6 * NativeInstruction::instruction_size;
2955   }
2956 }
2957 
2958 // Indicate if the safepoint node needs the polling page as an input
2959 
2960 // the shared code plants the oop data at the start of the generated
2961 // code for the safepoint node and that needs ot be at the load
2962 // instruction itself. so we cannot plant a mov of the safepoint poll
2963 // address followed by a load. setting this to true means the mov is
2964 // scheduled as a prior instruction. that's better for scheduling
2965 // anyway.
2966 
2967 bool SafePointNode::needs_polling_address_input()
2968 {
2969   return true;
2970 }
2971 
2972 //=============================================================================
2973 
2974 #ifndef PRODUCT
2975 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2976   st->print("BREAKPOINT");
2977 }
2978 #endif
2979 
2980 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2981   MacroAssembler _masm(&cbuf);
2982   __ brk(0);
2983 }
2984 
2985 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2986   return MachNode::size(ra_);
2987 }
2988 
2989 //=============================================================================
2990 
2991 #ifndef PRODUCT
2992   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2993     st->print("nop \t# %d bytes pad for loops and calls", _count);
2994   }
2995 #endif
2996 
2997   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2998     MacroAssembler _masm(&cbuf);
2999     for (int i = 0; i < _count; i++) {
3000       __ nop();
3001     }
3002   }
3003 
3004   uint MachNopNode::size(PhaseRegAlloc*) const {
3005     return _count * NativeInstruction::instruction_size;
3006   }
3007 
3008 //=============================================================================
3009 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
3010 
3011 int Compile::ConstantTable::calculate_table_base_offset() const {
3012   return 0;  // absolute addressing, no offset
3013 }
3014 
3015 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
3016 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
3017   ShouldNotReachHere();
3018 }
3019 
3020 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
3021   // Empty encoding
3022 }
3023 
3024 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
3025   return 0;
3026 }
3027 
3028 #ifndef PRODUCT
3029 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3030   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3031 }
3032 #endif
3033 
3034 #ifndef PRODUCT
3035 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3036   Compile* C = ra_->C;
3037 
3038   int framesize = C->frame_slots() << LogBytesPerInt;
3039 
3040   if (C->need_stack_bang(framesize))
3041     st->print("# stack bang size=%d\n\t", framesize);
3042 
3043   if (framesize < ((1 << 9) + 2 * wordSize)) {
3044     st->print("sub  sp, sp, #%d\n\t", framesize);
3045     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3046     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3047   } else {
3048     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3049     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3050     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3051     st->print("sub  sp, sp, rscratch1");
3052   }
3053 }
3054 #endif
3055 
3056 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3057   Compile* C = ra_->C;
3058   MacroAssembler _masm(&cbuf);
3059 
3060   // n.b. frame size includes space for return pc and rfp
3061   const long framesize = C->frame_size_in_bytes();
3062   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3063 
3064   // insert a nop at the start of the prolog so we can patch in a
3065   // branch if we need to invalidate the method later
3066   __ nop();
3067 
3068   int bangsize = C->bang_size_in_bytes();
3069   if (C->need_stack_bang(bangsize) && UseStackBanging)
3070     __ generate_stack_overflow_check(bangsize);
3071 
3072   __ build_frame(framesize);
3073 
3074   if (NotifySimulator) {
3075     __ notify(Assembler::method_entry);
3076   }
3077 
3078   if (VerifyStackAtCalls) {
3079     Unimplemented();
3080   }
3081 
3082   C->set_frame_complete(cbuf.insts_size());
3083 
3084   if (C->has_mach_constant_base_node()) {
3085     // NOTE: We set the table base offset here because users might be
3086     // emitted before MachConstantBaseNode.
3087     Compile::ConstantTable& constant_table = C->constant_table();
3088     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3089   }
3090 }
3091 
3092 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3093 {
3094   return MachNode::size(ra_); // too many variables; just compute it
3095                               // the hard way
3096 }
3097 
3098 int MachPrologNode::reloc() const
3099 {
3100   return 0;
3101 }
3102 
3103 //=============================================================================
3104 
3105 #ifndef PRODUCT
3106 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3107   Compile* C = ra_->C;
3108   int framesize = C->frame_slots() << LogBytesPerInt;
3109 
3110   st->print("# pop frame %d\n\t",framesize);
3111 
3112   if (framesize == 0) {
3113     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3114   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3115     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3116     st->print("add  sp, sp, #%d\n\t", framesize);
3117   } else {
3118     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3119     st->print("add  sp, sp, rscratch1\n\t");
3120     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3121   }
3122 
3123   if (do_polling() && C->is_method_compilation()) {
3124     st->print("# touch polling page\n\t");
3125     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3126     st->print("ldr zr, [rscratch1]");
3127   }
3128 }
3129 #endif
3130 
3131 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3132   Compile* C = ra_->C;
3133   MacroAssembler _masm(&cbuf);
3134   int framesize = C->frame_slots() << LogBytesPerInt;
3135 
3136   __ remove_frame(framesize);
3137 
3138   if (NotifySimulator) {
3139     __ notify(Assembler::method_reentry);
3140   }
3141 
3142   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3143     __ reserved_stack_check();
3144   }
3145 
3146   if (do_polling() && C->is_method_compilation()) {
3147     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3148   }
3149 }
3150 
3151 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3152   // Variable size. Determine dynamically.
3153   return MachNode::size(ra_);
3154 }
3155 
3156 int MachEpilogNode::reloc() const {
3157   // Return number of relocatable values contained in this instruction.
3158   return 1; // 1 for polling page.
3159 }
3160 
3161 const Pipeline * MachEpilogNode::pipeline() const {
3162   return MachNode::pipeline_class();
3163 }
3164 
3165 // This method seems to be obsolete. It is declared in machnode.hpp
3166 // and defined in all *.ad files, but it is never called. Should we
3167 // get rid of it?
3168 int MachEpilogNode::safepoint_offset() const {
3169   assert(do_polling(), "no return for this epilog node");
3170   return 4;
3171 }
3172 
3173 //=============================================================================
3174 
3175 // Figure out which register class each belongs in: rc_int, rc_float or
3176 // rc_stack.
3177 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3178 
3179 static enum RC rc_class(OptoReg::Name reg) {
3180 
3181   if (reg == OptoReg::Bad) {
3182     return rc_bad;
3183   }
3184 
3185   // we have 30 int registers * 2 halves
3186   // (rscratch1 and rscratch2 are omitted)
3187 
3188   if (reg < 60) {
3189     return rc_int;
3190   }
3191 
3192   // we have 32 float register * 2 halves
3193   if (reg < 60 + 128) {
3194     return rc_float;
3195   }
3196 
3197   // Between float regs & stack is the flags regs.
3198   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3199 
3200   return rc_stack;
3201 }
3202 
3203 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3204   Compile* C = ra_->C;
3205 
3206   // Get registers to move.
3207   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3208   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3209   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3210   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3211 
3212   enum RC src_hi_rc = rc_class(src_hi);
3213   enum RC src_lo_rc = rc_class(src_lo);
3214   enum RC dst_hi_rc = rc_class(dst_hi);
3215   enum RC dst_lo_rc = rc_class(dst_lo);
3216 
3217   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3218 
3219   if (src_hi != OptoReg::Bad) {
3220     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3221            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3222            "expected aligned-adjacent pairs");
3223   }
3224 
3225   if (src_lo == dst_lo && src_hi == dst_hi) {
3226     return 0;            // Self copy, no move.
3227   }
3228 
3229   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3230               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3231   int src_offset = ra_->reg2offset(src_lo);
3232   int dst_offset = ra_->reg2offset(dst_lo);
3233 
3234   if (bottom_type()->isa_vect() != NULL) {
3235     uint ireg = ideal_reg();
3236     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3237     if (cbuf) {
3238       MacroAssembler _masm(cbuf);
3239       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3240       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3241         // stack->stack
3242         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3243         if (ireg == Op_VecD) {
3244           __ unspill(rscratch1, true, src_offset);
3245           __ spill(rscratch1, true, dst_offset);
3246         } else {
3247           __ spill_copy128(src_offset, dst_offset);
3248         }
3249       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3250         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3251                ireg == Op_VecD ? __ T8B : __ T16B,
3252                as_FloatRegister(Matcher::_regEncode[src_lo]));
3253       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3254         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3255                        ireg == Op_VecD ? __ D : __ Q,
3256                        ra_->reg2offset(dst_lo));
3257       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3258         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3259                        ireg == Op_VecD ? __ D : __ Q,
3260                        ra_->reg2offset(src_lo));
3261       } else {
3262         ShouldNotReachHere();
3263       }
3264     }
3265   } else if (cbuf) {
3266     MacroAssembler _masm(cbuf);
3267     switch (src_lo_rc) {
3268     case rc_int:
3269       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3270         if (is64) {
3271             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3272                    as_Register(Matcher::_regEncode[src_lo]));
3273         } else {
3274             MacroAssembler _masm(cbuf);
3275             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3276                     as_Register(Matcher::_regEncode[src_lo]));
3277         }
3278       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3279         if (is64) {
3280             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3281                      as_Register(Matcher::_regEncode[src_lo]));
3282         } else {
3283             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3284                      as_Register(Matcher::_regEncode[src_lo]));
3285         }
3286       } else {                    // gpr --> stack spill
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3289       }
3290       break;
3291     case rc_float:
3292       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3293         if (is64) {
3294             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3295                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3296         } else {
3297             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3298                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3299         }
3300       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3301           if (cbuf) {
3302             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3303                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3304         } else {
3305             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3306                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3307         }
3308       } else {                    // fpr --> stack spill
3309         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3310         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3311                  is64 ? __ D : __ S, dst_offset);
3312       }
3313       break;
3314     case rc_stack:
3315       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3316         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3317       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3318         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3319                    is64 ? __ D : __ S, src_offset);
3320       } else {                    // stack --> stack copy
3321         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3322         __ unspill(rscratch1, is64, src_offset);
3323         __ spill(rscratch1, is64, dst_offset);
3324       }
3325       break;
3326     default:
3327       assert(false, "bad rc_class for spill");
3328       ShouldNotReachHere();
3329     }
3330   }
3331 
3332   if (st) {
3333     st->print("spill ");
3334     if (src_lo_rc == rc_stack) {
3335       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3336     } else {
3337       st->print("%s -> ", Matcher::regName[src_lo]);
3338     }
3339     if (dst_lo_rc == rc_stack) {
3340       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3341     } else {
3342       st->print("%s", Matcher::regName[dst_lo]);
3343     }
3344     if (bottom_type()->isa_vect() != NULL) {
3345       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3346     } else {
3347       st->print("\t# spill size = %d", is64 ? 64:32);
3348     }
3349   }
3350 
3351   return 0;
3352 
3353 }
3354 
3355 #ifndef PRODUCT
3356 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3357   if (!ra_)
3358     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3359   else
3360     implementation(NULL, ra_, false, st);
3361 }
3362 #endif
3363 
3364 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3365   implementation(&cbuf, ra_, false, NULL);
3366 }
3367 
3368 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3369   return MachNode::size(ra_);
3370 }
3371 
3372 //=============================================================================
3373 
3374 #ifndef PRODUCT
3375 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3376   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3377   int reg = ra_->get_reg_first(this);
3378   st->print("add %s, rsp, #%d]\t# box lock",
3379             Matcher::regName[reg], offset);
3380 }
3381 #endif
3382 
3383 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3384   MacroAssembler _masm(&cbuf);
3385 
3386   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3387   int reg    = ra_->get_encode(this);
3388 
3389   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3390     __ add(as_Register(reg), sp, offset);
3391   } else {
3392     ShouldNotReachHere();
3393   }
3394 }
3395 
3396 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3397   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3398   return 4;
3399 }
3400 
3401 //=============================================================================
3402 
3403 #ifndef PRODUCT
3404 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3405 {
3406   st->print_cr("# MachUEPNode");
3407   if (UseCompressedClassPointers) {
3408     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3409     if (Universe::narrow_klass_shift() != 0) {
3410       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3411     }
3412   } else {
3413    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3414   }
3415   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3416   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3417 }
3418 #endif
3419 
3420 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3421 {
3422   // This is the unverified entry point.
3423   MacroAssembler _masm(&cbuf);
3424 
3425   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3426   Label skip;
3427   // TODO
3428   // can we avoid this skip and still use a reloc?
3429   __ br(Assembler::EQ, skip);
3430   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3431   __ bind(skip);
3432 }
3433 
3434 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3435 {
3436   return MachNode::size(ra_);
3437 }
3438 
3439 // REQUIRED EMIT CODE
3440 
3441 //=============================================================================
3442 
3443 // Emit exception handler code.
3444 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3445 {
3446   // mov rscratch1 #exception_blob_entry_point
3447   // br rscratch1
3448   // Note that the code buffer's insts_mark is always relative to insts.
3449   // That's why we must use the macroassembler to generate a handler.
3450   MacroAssembler _masm(&cbuf);
3451   address base = __ start_a_stub(size_exception_handler());
3452   if (base == NULL) {
3453     ciEnv::current()->record_failure("CodeCache is full");
3454     return 0;  // CodeBuffer::expand failed
3455   }
3456   int offset = __ offset();
3457   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3458   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3459   __ end_a_stub();
3460   return offset;
3461 }
3462 
3463 // Emit deopt handler code.
3464 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3465 {
3466   // Note that the code buffer's insts_mark is always relative to insts.
3467   // That's why we must use the macroassembler to generate a handler.
3468   MacroAssembler _masm(&cbuf);
3469   address base = __ start_a_stub(size_deopt_handler());
3470   if (base == NULL) {
3471     ciEnv::current()->record_failure("CodeCache is full");
3472     return 0;  // CodeBuffer::expand failed
3473   }
3474   int offset = __ offset();
3475 
3476   __ adr(lr, __ pc());
3477   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3478 
3479   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3480   __ end_a_stub();
3481   return offset;
3482 }
3483 
3484 // REQUIRED MATCHER CODE
3485 
3486 //=============================================================================
3487 
3488 const bool Matcher::match_rule_supported(int opcode) {
3489 
3490   switch (opcode) {
3491   default:
3492     break;
3493   }
3494 
3495   if (!has_match_rule(opcode)) {
3496     return false;
3497   }
3498 
3499   return true;  // Per default match rules are supported.
3500 }
3501 
3502 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3503 
3504   // TODO
3505   // identify extra cases that we might want to provide match rules for
3506   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3507   bool ret_value = match_rule_supported(opcode);
3508   // Add rules here.
3509 
3510   return ret_value;  // Per default match rules are supported.
3511 }
3512 
3513 const bool Matcher::has_predicated_vectors(void) {
3514   return false;
3515 }
3516 
3517 const int Matcher::float_pressure(int default_pressure_threshold) {
3518   return default_pressure_threshold;
3519 }
3520 
3521 int Matcher::regnum_to_fpu_offset(int regnum)
3522 {
3523   Unimplemented();
3524   return 0;
3525 }
3526 
3527 // Is this branch offset short enough that a short branch can be used?
3528 //
3529 // NOTE: If the platform does not provide any short branch variants, then
3530 //       this method should return false for offset 0.
3531 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3532   // The passed offset is relative to address of the branch.
3533 
3534   return (-32768 <= offset && offset < 32768);
3535 }
3536 
3537 const bool Matcher::isSimpleConstant64(jlong value) {
3538   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3539   // Probably always true, even if a temp register is required.
3540   return true;
3541 }
3542 
3543 // true just means we have fast l2f conversion
3544 const bool Matcher::convL2FSupported(void) {
3545   return true;
3546 }
3547 
3548 // Vector width in bytes.
3549 const int Matcher::vector_width_in_bytes(BasicType bt) {
3550   int size = MIN2(16,(int)MaxVectorSize);
3551   // Minimum 2 values in vector
3552   if (size < 2*type2aelembytes(bt)) size = 0;
3553   // But never < 4
3554   if (size < 4) size = 0;
3555   return size;
3556 }
3557 
3558 // Limits on vector size (number of elements) loaded into vector.
3559 const int Matcher::max_vector_size(const BasicType bt) {
3560   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3561 }
3562 const int Matcher::min_vector_size(const BasicType bt) {
3563 //  For the moment limit the vector size to 8 bytes
3564     int size = 8 / type2aelembytes(bt);
3565     if (size < 2) size = 2;
3566     return size;
3567 }
3568 
3569 // Vector ideal reg.
3570 const uint Matcher::vector_ideal_reg(int len) {
3571   switch(len) {
3572     case  8: return Op_VecD;
3573     case 16: return Op_VecX;
3574   }
3575   ShouldNotReachHere();
3576   return 0;
3577 }
3578 
3579 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3580   return Op_VecX;
3581 }
3582 
3583 // AES support not yet implemented
3584 const bool Matcher::pass_original_key_for_aes() {
3585   return false;
3586 }
3587 
3588 // x86 supports misaligned vectors store/load.
3589 const bool Matcher::misaligned_vectors_ok() {
3590   return !AlignVector; // can be changed by flag
3591 }
3592 
3593 // false => size gets scaled to BytesPerLong, ok.
3594 const bool Matcher::init_array_count_is_in_bytes = false;
3595 
3596 // Use conditional move (CMOVL)
3597 const int Matcher::long_cmove_cost() {
3598   // long cmoves are no more expensive than int cmoves
3599   return 0;
3600 }
3601 
3602 const int Matcher::float_cmove_cost() {
3603   // float cmoves are no more expensive than int cmoves
3604   return 0;
3605 }
3606 
3607 // Does the CPU require late expand (see block.cpp for description of late expand)?
3608 const bool Matcher::require_postalloc_expand = false;
3609 
3610 // Do we need to mask the count passed to shift instructions or does
3611 // the cpu only look at the lower 5/6 bits anyway?
3612 const bool Matcher::need_masked_shift_count = false;
3613 
3614 // This affects two different things:
3615 //  - how Decode nodes are matched
3616 //  - how ImplicitNullCheck opportunities are recognized
3617 // If true, the matcher will try to remove all Decodes and match them
3618 // (as operands) into nodes. NullChecks are not prepared to deal with
3619 // Decodes by final_graph_reshaping().
3620 // If false, final_graph_reshaping() forces the decode behind the Cmp
3621 // for a NullCheck. The matcher matches the Decode node into a register.
3622 // Implicit_null_check optimization moves the Decode along with the
3623 // memory operation back up before the NullCheck.
3624 bool Matcher::narrow_oop_use_complex_address() {
3625   return Universe::narrow_oop_shift() == 0;
3626 }
3627 
3628 bool Matcher::narrow_klass_use_complex_address() {
3629 // TODO
3630 // decide whether we need to set this to true
3631   return false;
3632 }
3633 
3634 bool Matcher::const_oop_prefer_decode() {
3635   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3636   return Universe::narrow_oop_base() == NULL;
3637 }
3638 
3639 bool Matcher::const_klass_prefer_decode() {
3640   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3641   return Universe::narrow_klass_base() == NULL;
3642 }
3643 
3644 // Is it better to copy float constants, or load them directly from
3645 // memory?  Intel can load a float constant from a direct address,
3646 // requiring no extra registers.  Most RISCs will have to materialize
3647 // an address into a register first, so they would do better to copy
3648 // the constant from stack.
3649 const bool Matcher::rematerialize_float_constants = false;
3650 
3651 // If CPU can load and store mis-aligned doubles directly then no
3652 // fixup is needed.  Else we split the double into 2 integer pieces
3653 // and move it piece-by-piece.  Only happens when passing doubles into
3654 // C code as the Java calling convention forces doubles to be aligned.
3655 const bool Matcher::misaligned_doubles_ok = true;
3656 
3657 // No-op on amd64
3658 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3659   Unimplemented();
3660 }
3661 
3662 // Advertise here if the CPU requires explicit rounding operations to
3663 // implement the UseStrictFP mode.
3664 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3665 
3666 // Are floats converted to double when stored to stack during
3667 // deoptimization?
3668 bool Matcher::float_in_double() { return false; }
3669 
3670 // Do ints take an entire long register or just half?
3671 // The relevant question is how the int is callee-saved:
3672 // the whole long is written but de-opt'ing will have to extract
3673 // the relevant 32 bits.
3674 const bool Matcher::int_in_long = true;
3675 
3676 // Return whether or not this register is ever used as an argument.
3677 // This function is used on startup to build the trampoline stubs in
3678 // generateOptoStub.  Registers not mentioned will be killed by the VM
3679 // call in the trampoline, and arguments in those registers not be
3680 // available to the callee.
3681 bool Matcher::can_be_java_arg(int reg)
3682 {
3683   return
3684     reg ==  R0_num || reg == R0_H_num ||
3685     reg ==  R1_num || reg == R1_H_num ||
3686     reg ==  R2_num || reg == R2_H_num ||
3687     reg ==  R3_num || reg == R3_H_num ||
3688     reg ==  R4_num || reg == R4_H_num ||
3689     reg ==  R5_num || reg == R5_H_num ||
3690     reg ==  R6_num || reg == R6_H_num ||
3691     reg ==  R7_num || reg == R7_H_num ||
3692     reg ==  V0_num || reg == V0_H_num ||
3693     reg ==  V1_num || reg == V1_H_num ||
3694     reg ==  V2_num || reg == V2_H_num ||
3695     reg ==  V3_num || reg == V3_H_num ||
3696     reg ==  V4_num || reg == V4_H_num ||
3697     reg ==  V5_num || reg == V5_H_num ||
3698     reg ==  V6_num || reg == V6_H_num ||
3699     reg ==  V7_num || reg == V7_H_num;
3700 }
3701 
3702 bool Matcher::is_spillable_arg(int reg)
3703 {
3704   return can_be_java_arg(reg);
3705 }
3706 
3707 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3708   return false;
3709 }
3710 
3711 RegMask Matcher::divI_proj_mask() {
3712   ShouldNotReachHere();
3713   return RegMask();
3714 }
3715 
3716 // Register for MODI projection of divmodI.
3717 RegMask Matcher::modI_proj_mask() {
3718   ShouldNotReachHere();
3719   return RegMask();
3720 }
3721 
3722 // Register for DIVL projection of divmodL.
3723 RegMask Matcher::divL_proj_mask() {
3724   ShouldNotReachHere();
3725   return RegMask();
3726 }
3727 
3728 // Register for MODL projection of divmodL.
3729 RegMask Matcher::modL_proj_mask() {
3730   ShouldNotReachHere();
3731   return RegMask();
3732 }
3733 
3734 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3735   return FP_REG_mask();
3736 }
3737 
3738 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3739   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3740     Node* u = addp->fast_out(i);
3741     if (u->is_Mem()) {
3742       int opsize = u->as_Mem()->memory_size();
3743       assert(opsize > 0, "unexpected memory operand size");
3744       if (u->as_Mem()->memory_size() != (1<<shift)) {
3745         return false;
3746       }
3747     }
3748   }
3749   return true;
3750 }
3751 
3752 const bool Matcher::convi2l_type_required = false;
3753 
3754 // Should the Matcher clone shifts on addressing modes, expecting them
3755 // to be subsumed into complex addressing expressions or compute them
3756 // into registers?
3757 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3758   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3759     return true;
3760   }
3761 
3762   Node *off = m->in(AddPNode::Offset);
3763   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3764       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3765       // Are there other uses besides address expressions?
3766       !is_visited(off)) {
3767     address_visited.set(off->_idx); // Flag as address_visited
3768     mstack.push(off->in(2), Visit);
3769     Node *conv = off->in(1);
3770     if (conv->Opcode() == Op_ConvI2L &&
3771         // Are there other uses besides address expressions?
3772         !is_visited(conv)) {
3773       address_visited.set(conv->_idx); // Flag as address_visited
3774       mstack.push(conv->in(1), Pre_Visit);
3775     } else {
3776       mstack.push(conv, Pre_Visit);
3777     }
3778     address_visited.test_set(m->_idx); // Flag as address_visited
3779     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3780     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3781     return true;
3782   } else if (off->Opcode() == Op_ConvI2L &&
3783              // Are there other uses besides address expressions?
3784              !is_visited(off)) {
3785     address_visited.test_set(m->_idx); // Flag as address_visited
3786     address_visited.set(off->_idx); // Flag as address_visited
3787     mstack.push(off->in(1), Pre_Visit);
3788     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3789     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3790     return true;
3791   }
3792   return false;
3793 }
3794 
3795 void Compile::reshape_address(AddPNode* addp) {
3796 }
3797 
3798 // helper for encoding java_to_runtime calls on sim
3799 //
3800 // this is needed to compute the extra arguments required when
3801 // planting a call to the simulator blrt instruction. the TypeFunc
3802 // can be queried to identify the counts for integral, and floating
3803 // arguments and the return type
3804 
3805 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3806 {
3807   int gps = 0;
3808   int fps = 0;
3809   const TypeTuple *domain = tf->domain();
3810   int max = domain->cnt();
3811   for (int i = TypeFunc::Parms; i < max; i++) {
3812     const Type *t = domain->field_at(i);
3813     switch(t->basic_type()) {
3814     case T_FLOAT:
3815     case T_DOUBLE:
3816       fps++;
3817     default:
3818       gps++;
3819     }
3820   }
3821   gpcnt = gps;
3822   fpcnt = fps;
3823   BasicType rt = tf->return_type();
3824   switch (rt) {
3825   case T_VOID:
3826     rtype = MacroAssembler::ret_type_void;
3827     break;
3828   default:
3829     rtype = MacroAssembler::ret_type_integral;
3830     break;
3831   case T_FLOAT:
3832     rtype = MacroAssembler::ret_type_float;
3833     break;
3834   case T_DOUBLE:
3835     rtype = MacroAssembler::ret_type_double;
3836     break;
3837   }
3838 }
3839 
3840 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3841   MacroAssembler _masm(&cbuf);                                          \
3842   {                                                                     \
3843     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3844     guarantee(DISP == 0, "mode not permitted for volatile");            \
3845     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3846     __ INSN(REG, as_Register(BASE));                                    \
3847   }
3848 
3849 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3850 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3851 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3852                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3853 
3854   // Used for all non-volatile memory accesses.  The use of
3855   // $mem->opcode() to discover whether this pattern uses sign-extended
3856   // offsets is something of a kludge.
3857   static void loadStore(MacroAssembler masm, mem_insn insn,
3858                          Register reg, int opcode,
3859                          Register base, int index, int size, int disp)
3860   {
3861     Address::extend scale;
3862 
3863     // Hooboy, this is fugly.  We need a way to communicate to the
3864     // encoder that the index needs to be sign extended, so we have to
3865     // enumerate all the cases.
3866     switch (opcode) {
3867     case INDINDEXSCALEDI2L:
3868     case INDINDEXSCALEDI2LN:
3869     case INDINDEXI2L:
3870     case INDINDEXI2LN:
3871       scale = Address::sxtw(size);
3872       break;
3873     default:
3874       scale = Address::lsl(size);
3875     }
3876 
3877     if (index == -1) {
3878       (masm.*insn)(reg, Address(base, disp));
3879     } else {
3880       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3881       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3882     }
3883   }
3884 
3885   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3886                          FloatRegister reg, int opcode,
3887                          Register base, int index, int size, int disp)
3888   {
3889     Address::extend scale;
3890 
3891     switch (opcode) {
3892     case INDINDEXSCALEDI2L:
3893     case INDINDEXSCALEDI2LN:
3894       scale = Address::sxtw(size);
3895       break;
3896     default:
3897       scale = Address::lsl(size);
3898     }
3899 
3900      if (index == -1) {
3901       (masm.*insn)(reg, Address(base, disp));
3902     } else {
3903       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3904       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3905     }
3906   }
3907 
3908   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3909                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3910                          int opcode, Register base, int index, int size, int disp)
3911   {
3912     if (index == -1) {
3913       (masm.*insn)(reg, T, Address(base, disp));
3914     } else {
3915       assert(disp == 0, "unsupported address mode");
3916       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3917     }
3918   }
3919 
3920 %}
3921 
3922 
3923 
3924 //----------ENCODING BLOCK-----------------------------------------------------
3925 // This block specifies the encoding classes used by the compiler to
3926 // output byte streams.  Encoding classes are parameterized macros
3927 // used by Machine Instruction Nodes in order to generate the bit
3928 // encoding of the instruction.  Operands specify their base encoding
3929 // interface with the interface keyword.  There are currently
3930 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3931 // COND_INTER.  REG_INTER causes an operand to generate a function
3932 // which returns its register number when queried.  CONST_INTER causes
3933 // an operand to generate a function which returns the value of the
3934 // constant when queried.  MEMORY_INTER causes an operand to generate
3935 // four functions which return the Base Register, the Index Register,
3936 // the Scale Value, and the Offset Value of the operand when queried.
3937 // COND_INTER causes an operand to generate six functions which return
3938 // the encoding code (ie - encoding bits for the instruction)
3939 // associated with each basic boolean condition for a conditional
3940 // instruction.
3941 //
3942 // Instructions specify two basic values for encoding.  Again, a
3943 // function is available to check if the constant displacement is an
3944 // oop. They use the ins_encode keyword to specify their encoding
3945 // classes (which must be a sequence of enc_class names, and their
3946 // parameters, specified in the encoding block), and they use the
3947 // opcode keyword to specify, in order, their primary, secondary, and
3948 // tertiary opcode.  Only the opcode sections which a particular
3949 // instruction needs for encoding need to be specified.
3950 encode %{
3951   // Build emit functions for each basic byte or larger field in the
3952   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3953   // from C++ code in the enc_class source block.  Emit functions will
3954   // live in the main source block for now.  In future, we can
3955   // generalize this by adding a syntax that specifies the sizes of
3956   // fields in an order, so that the adlc can build the emit functions
3957   // automagically
3958 
3959   // catch all for unimplemented encodings
3960   enc_class enc_unimplemented %{
3961     MacroAssembler _masm(&cbuf);
3962     __ unimplemented("C2 catch all");
3963   %}
3964 
3965   // BEGIN Non-volatile memory access
3966 
3967   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3968     Register dst_reg = as_Register($dst$$reg);
3969     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3970                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3971   %}
3972 
3973   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3974     Register dst_reg = as_Register($dst$$reg);
3975     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3976                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3977   %}
3978 
3979   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3980     Register dst_reg = as_Register($dst$$reg);
3981     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3982                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3983   %}
3984 
3985   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3986     Register dst_reg = as_Register($dst$$reg);
3987     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3992     Register dst_reg = as_Register($dst$$reg);
3993     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3994                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3995   %}
3996 
3997   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3998     Register dst_reg = as_Register($dst$$reg);
3999     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
4000                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4001   %}
4002 
4003   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
4004     Register dst_reg = as_Register($dst$$reg);
4005     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4006                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4007   %}
4008 
4009   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
4010     Register dst_reg = as_Register($dst$$reg);
4011     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4012                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4013   %}
4014 
4015   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
4016     Register dst_reg = as_Register($dst$$reg);
4017     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4019   %}
4020 
4021   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
4022     Register dst_reg = as_Register($dst$$reg);
4023     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4025   %}
4026 
4027   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
4028     Register dst_reg = as_Register($dst$$reg);
4029     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
4030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4031   %}
4032 
4033   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4034     Register dst_reg = as_Register($dst$$reg);
4035     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4036                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4037   %}
4038 
4039   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4040     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4041     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4042                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4043   %}
4044 
4045   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4046     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4047     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4048                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4049   %}
4050 
4051   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4052     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4053     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4054        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4055   %}
4056 
4057   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4058     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4059     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4060        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4061   %}
4062 
4063   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4064     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4065     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4066        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4067   %}
4068 
4069   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4070     Register src_reg = as_Register($src$$reg);
4071     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4072                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4073   %}
4074 
4075   enc_class aarch64_enc_strb0(memory mem) %{
4076     MacroAssembler _masm(&cbuf);
4077     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4078                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4079   %}
4080 
4081   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4082     MacroAssembler _masm(&cbuf);
4083     __ membar(Assembler::StoreStore);
4084     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4085                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4086   %}
4087 
4088   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4089     Register src_reg = as_Register($src$$reg);
4090     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4091                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4092   %}
4093 
4094   enc_class aarch64_enc_strh0(memory mem) %{
4095     MacroAssembler _masm(&cbuf);
4096     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4097                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4098   %}
4099 
4100   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4101     Register src_reg = as_Register($src$$reg);
4102     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4103                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4104   %}
4105 
4106   enc_class aarch64_enc_strw0(memory mem) %{
4107     MacroAssembler _masm(&cbuf);
4108     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4109                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4110   %}
4111 
4112   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4113     Register src_reg = as_Register($src$$reg);
4114     // we sometimes get asked to store the stack pointer into the
4115     // current thread -- we cannot do that directly on AArch64
4116     if (src_reg == r31_sp) {
4117       MacroAssembler _masm(&cbuf);
4118       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4119       __ mov(rscratch2, sp);
4120       src_reg = rscratch2;
4121     }
4122     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4123                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4124   %}
4125 
4126   enc_class aarch64_enc_str0(memory mem) %{
4127     MacroAssembler _masm(&cbuf);
4128     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4129                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4130   %}
4131 
4132   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4133     FloatRegister src_reg = as_FloatRegister($src$$reg);
4134     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4135                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4136   %}
4137 
4138   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4139     FloatRegister src_reg = as_FloatRegister($src$$reg);
4140     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4141                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4142   %}
4143 
4144   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4145     FloatRegister src_reg = as_FloatRegister($src$$reg);
4146     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4147        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4148   %}
4149 
4150   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4151     FloatRegister src_reg = as_FloatRegister($src$$reg);
4152     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4153        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4154   %}
4155 
4156   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4157     FloatRegister src_reg = as_FloatRegister($src$$reg);
4158     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4159        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4160   %}
4161 
4162   // END Non-volatile memory access
4163 
4164   // volatile loads and stores
4165 
4166   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4167     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4168                  rscratch1, stlrb);
4169   %}
4170 
4171   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4172     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4173                  rscratch1, stlrh);
4174   %}
4175 
4176   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4177     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4178                  rscratch1, stlrw);
4179   %}
4180 
4181 
4182   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4183     Register dst_reg = as_Register($dst$$reg);
4184     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4185              rscratch1, ldarb);
4186     __ sxtbw(dst_reg, dst_reg);
4187   %}
4188 
4189   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4190     Register dst_reg = as_Register($dst$$reg);
4191     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4192              rscratch1, ldarb);
4193     __ sxtb(dst_reg, dst_reg);
4194   %}
4195 
4196   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4197     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4198              rscratch1, ldarb);
4199   %}
4200 
4201   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4202     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4203              rscratch1, ldarb);
4204   %}
4205 
4206   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4207     Register dst_reg = as_Register($dst$$reg);
4208     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4209              rscratch1, ldarh);
4210     __ sxthw(dst_reg, dst_reg);
4211   %}
4212 
4213   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4214     Register dst_reg = as_Register($dst$$reg);
4215     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4216              rscratch1, ldarh);
4217     __ sxth(dst_reg, dst_reg);
4218   %}
4219 
4220   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4221     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4222              rscratch1, ldarh);
4223   %}
4224 
4225   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4226     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4227              rscratch1, ldarh);
4228   %}
4229 
4230   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4232              rscratch1, ldarw);
4233   %}
4234 
4235   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4237              rscratch1, ldarw);
4238   %}
4239 
4240   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4241     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4242              rscratch1, ldar);
4243   %}
4244 
4245   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4246     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4247              rscratch1, ldarw);
4248     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4249   %}
4250 
4251   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4252     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4253              rscratch1, ldar);
4254     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4255   %}
4256 
4257   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4258     Register src_reg = as_Register($src$$reg);
4259     // we sometimes get asked to store the stack pointer into the
4260     // current thread -- we cannot do that directly on AArch64
4261     if (src_reg == r31_sp) {
4262         MacroAssembler _masm(&cbuf);
4263       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4264       __ mov(rscratch2, sp);
4265       src_reg = rscratch2;
4266     }
4267     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4268                  rscratch1, stlr);
4269   %}
4270 
4271   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4272     {
4273       MacroAssembler _masm(&cbuf);
4274       FloatRegister src_reg = as_FloatRegister($src$$reg);
4275       __ fmovs(rscratch2, src_reg);
4276     }
4277     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4278                  rscratch1, stlrw);
4279   %}
4280 
4281   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4282     {
4283       MacroAssembler _masm(&cbuf);
4284       FloatRegister src_reg = as_FloatRegister($src$$reg);
4285       __ fmovd(rscratch2, src_reg);
4286     }
4287     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4288                  rscratch1, stlr);
4289   %}
4290 
4291   // synchronized read/update encodings
4292 
4293   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4294     MacroAssembler _masm(&cbuf);
4295     Register dst_reg = as_Register($dst$$reg);
4296     Register base = as_Register($mem$$base);
4297     int index = $mem$$index;
4298     int scale = $mem$$scale;
4299     int disp = $mem$$disp;
4300     if (index == -1) {
4301        if (disp != 0) {
4302         __ lea(rscratch1, Address(base, disp));
4303         __ ldaxr(dst_reg, rscratch1);
4304       } else {
4305         // TODO
4306         // should we ever get anything other than this case?
4307         __ ldaxr(dst_reg, base);
4308       }
4309     } else {
4310       Register index_reg = as_Register(index);
4311       if (disp == 0) {
4312         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4313         __ ldaxr(dst_reg, rscratch1);
4314       } else {
4315         __ lea(rscratch1, Address(base, disp));
4316         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4317         __ ldaxr(dst_reg, rscratch1);
4318       }
4319     }
4320   %}
4321 
4322   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4323     MacroAssembler _masm(&cbuf);
4324     Register src_reg = as_Register($src$$reg);
4325     Register base = as_Register($mem$$base);
4326     int index = $mem$$index;
4327     int scale = $mem$$scale;
4328     int disp = $mem$$disp;
4329     if (index == -1) {
4330        if (disp != 0) {
4331         __ lea(rscratch2, Address(base, disp));
4332         __ stlxr(rscratch1, src_reg, rscratch2);
4333       } else {
4334         // TODO
4335         // should we ever get anything other than this case?
4336         __ stlxr(rscratch1, src_reg, base);
4337       }
4338     } else {
4339       Register index_reg = as_Register(index);
4340       if (disp == 0) {
4341         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4342         __ stlxr(rscratch1, src_reg, rscratch2);
4343       } else {
4344         __ lea(rscratch2, Address(base, disp));
4345         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4346         __ stlxr(rscratch1, src_reg, rscratch2);
4347       }
4348     }
4349     __ cmpw(rscratch1, zr);
4350   %}
4351 
4352   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4353     MacroAssembler _masm(&cbuf);
4354     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4355     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4356                Assembler::xword, /*acquire*/ false, /*release*/ true,
4357                /*weak*/ false, noreg);
4358   %}
4359 
4360   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4361     MacroAssembler _masm(&cbuf);
4362     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4363     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4364                Assembler::word, /*acquire*/ false, /*release*/ true,
4365                /*weak*/ false, noreg);
4366   %}
4367 
4368   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4369     MacroAssembler _masm(&cbuf);
4370     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4371     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4372                Assembler::halfword, /*acquire*/ false, /*release*/ true,
4373                /*weak*/ false, noreg);
4374   %}
4375 
4376   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4377     MacroAssembler _masm(&cbuf);
4378     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4379     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4380                Assembler::byte, /*acquire*/ false, /*release*/ true,
4381                /*weak*/ false, noreg);
4382   %}
4383 
4384 
4385   // The only difference between aarch64_enc_cmpxchg and
4386   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4387   // CompareAndSwap sequence to serve as a barrier on acquiring a
4388   // lock.
4389   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4390     MacroAssembler _masm(&cbuf);
4391     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4392     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4393                Assembler::xword, /*acquire*/ true, /*release*/ true,
4394                /*weak*/ false, noreg);
4395   %}
4396 
4397   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4398     MacroAssembler _masm(&cbuf);
4399     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4400     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4401                Assembler::word, /*acquire*/ true, /*release*/ true,
4402                /*weak*/ false, noreg);
4403   %}
4404 
4405 
4406   // auxiliary used for CompareAndSwapX to set result register
4407   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4408     MacroAssembler _masm(&cbuf);
4409     Register res_reg = as_Register($res$$reg);
4410     __ cset(res_reg, Assembler::EQ);
4411   %}
4412 
4413   // prefetch encodings
4414 
4415   enc_class aarch64_enc_prefetchw(memory mem) %{
4416     MacroAssembler _masm(&cbuf);
4417     Register base = as_Register($mem$$base);
4418     int index = $mem$$index;
4419     int scale = $mem$$scale;
4420     int disp = $mem$$disp;
4421     if (index == -1) {
4422       __ prfm(Address(base, disp), PSTL1KEEP);
4423     } else {
4424       Register index_reg = as_Register(index);
4425       if (disp == 0) {
4426         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4427       } else {
4428         __ lea(rscratch1, Address(base, disp));
4429         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4430       }
4431     }
4432   %}
4433 
4434   /// mov envcodings
4435 
4436   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4437     MacroAssembler _masm(&cbuf);
4438     u_int32_t con = (u_int32_t)$src$$constant;
4439     Register dst_reg = as_Register($dst$$reg);
4440     if (con == 0) {
4441       __ movw(dst_reg, zr);
4442     } else {
4443       __ movw(dst_reg, con);
4444     }
4445   %}
4446 
4447   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4448     MacroAssembler _masm(&cbuf);
4449     Register dst_reg = as_Register($dst$$reg);
4450     u_int64_t con = (u_int64_t)$src$$constant;
4451     if (con == 0) {
4452       __ mov(dst_reg, zr);
4453     } else {
4454       __ mov(dst_reg, con);
4455     }
4456   %}
4457 
4458   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4459     MacroAssembler _masm(&cbuf);
4460     Register dst_reg = as_Register($dst$$reg);
4461     address con = (address)$src$$constant;
4462     if (con == NULL || con == (address)1) {
4463       ShouldNotReachHere();
4464     } else {
4465       relocInfo::relocType rtype = $src->constant_reloc();
4466       if (rtype == relocInfo::oop_type) {
4467         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4468       } else if (rtype == relocInfo::metadata_type) {
4469         __ mov_metadata(dst_reg, (Metadata*)con);
4470       } else {
4471         assert(rtype == relocInfo::none, "unexpected reloc type");
4472         if (con < (address)(uintptr_t)os::vm_page_size()) {
4473           __ mov(dst_reg, con);
4474         } else {
4475           unsigned long offset;
4476           __ adrp(dst_reg, con, offset);
4477           __ add(dst_reg, dst_reg, offset);
4478         }
4479       }
4480     }
4481   %}
4482 
4483   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4484     MacroAssembler _masm(&cbuf);
4485     Register dst_reg = as_Register($dst$$reg);
4486     __ mov(dst_reg, zr);
4487   %}
4488 
4489   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4490     MacroAssembler _masm(&cbuf);
4491     Register dst_reg = as_Register($dst$$reg);
4492     __ mov(dst_reg, (u_int64_t)1);
4493   %}
4494 
4495   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4496     MacroAssembler _masm(&cbuf);
4497     address page = (address)$src$$constant;
4498     Register dst_reg = as_Register($dst$$reg);
4499     unsigned long off;
4500     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4501     assert(off == 0, "assumed offset == 0");
4502   %}
4503 
4504   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4505     MacroAssembler _masm(&cbuf);
4506     __ load_byte_map_base($dst$$Register);
4507   %}
4508 
4509   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4510     MacroAssembler _masm(&cbuf);
4511     Register dst_reg = as_Register($dst$$reg);
4512     address con = (address)$src$$constant;
4513     if (con == NULL) {
4514       ShouldNotReachHere();
4515     } else {
4516       relocInfo::relocType rtype = $src->constant_reloc();
4517       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4518       __ set_narrow_oop(dst_reg, (jobject)con);
4519     }
4520   %}
4521 
4522   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4523     MacroAssembler _masm(&cbuf);
4524     Register dst_reg = as_Register($dst$$reg);
4525     __ mov(dst_reg, zr);
4526   %}
4527 
4528   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4529     MacroAssembler _masm(&cbuf);
4530     Register dst_reg = as_Register($dst$$reg);
4531     address con = (address)$src$$constant;
4532     if (con == NULL) {
4533       ShouldNotReachHere();
4534     } else {
4535       relocInfo::relocType rtype = $src->constant_reloc();
4536       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4537       __ set_narrow_klass(dst_reg, (Klass *)con);
4538     }
4539   %}
4540 
4541   // arithmetic encodings
4542 
4543   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4544     MacroAssembler _masm(&cbuf);
4545     Register dst_reg = as_Register($dst$$reg);
4546     Register src_reg = as_Register($src1$$reg);
4547     int32_t con = (int32_t)$src2$$constant;
4548     // add has primary == 0, subtract has primary == 1
4549     if ($primary) { con = -con; }
4550     if (con < 0) {
4551       __ subw(dst_reg, src_reg, -con);
4552     } else {
4553       __ addw(dst_reg, src_reg, con);
4554     }
4555   %}
4556 
4557   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4558     MacroAssembler _masm(&cbuf);
4559     Register dst_reg = as_Register($dst$$reg);
4560     Register src_reg = as_Register($src1$$reg);
4561     int32_t con = (int32_t)$src2$$constant;
4562     // add has primary == 0, subtract has primary == 1
4563     if ($primary) { con = -con; }
4564     if (con < 0) {
4565       __ sub(dst_reg, src_reg, -con);
4566     } else {
4567       __ add(dst_reg, src_reg, con);
4568     }
4569   %}
4570 
4571   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4572     MacroAssembler _masm(&cbuf);
4573    Register dst_reg = as_Register($dst$$reg);
4574    Register src1_reg = as_Register($src1$$reg);
4575    Register src2_reg = as_Register($src2$$reg);
4576     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4577   %}
4578 
4579   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4580     MacroAssembler _masm(&cbuf);
4581    Register dst_reg = as_Register($dst$$reg);
4582    Register src1_reg = as_Register($src1$$reg);
4583    Register src2_reg = as_Register($src2$$reg);
4584     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4585   %}
4586 
4587   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4588     MacroAssembler _masm(&cbuf);
4589    Register dst_reg = as_Register($dst$$reg);
4590    Register src1_reg = as_Register($src1$$reg);
4591    Register src2_reg = as_Register($src2$$reg);
4592     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4593   %}
4594 
4595   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4596     MacroAssembler _masm(&cbuf);
4597    Register dst_reg = as_Register($dst$$reg);
4598    Register src1_reg = as_Register($src1$$reg);
4599    Register src2_reg = as_Register($src2$$reg);
4600     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4601   %}
4602 
4603   // compare instruction encodings
4604 
4605   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4606     MacroAssembler _masm(&cbuf);
4607     Register reg1 = as_Register($src1$$reg);
4608     Register reg2 = as_Register($src2$$reg);
4609     __ cmpw(reg1, reg2);
4610   %}
4611 
4612   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4613     MacroAssembler _masm(&cbuf);
4614     Register reg = as_Register($src1$$reg);
4615     int32_t val = $src2$$constant;
4616     if (val >= 0) {
4617       __ subsw(zr, reg, val);
4618     } else {
4619       __ addsw(zr, reg, -val);
4620     }
4621   %}
4622 
4623   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4624     MacroAssembler _masm(&cbuf);
4625     Register reg1 = as_Register($src1$$reg);
4626     u_int32_t val = (u_int32_t)$src2$$constant;
4627     __ movw(rscratch1, val);
4628     __ cmpw(reg1, rscratch1);
4629   %}
4630 
4631   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4632     MacroAssembler _masm(&cbuf);
4633     Register reg1 = as_Register($src1$$reg);
4634     Register reg2 = as_Register($src2$$reg);
4635     __ cmp(reg1, reg2);
4636   %}
4637 
4638   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4639     MacroAssembler _masm(&cbuf);
4640     Register reg = as_Register($src1$$reg);
4641     int64_t val = $src2$$constant;
4642     if (val >= 0) {
4643       __ subs(zr, reg, val);
4644     } else if (val != -val) {
4645       __ adds(zr, reg, -val);
4646     } else {
4647     // aargh, Long.MIN_VALUE is a special case
4648       __ orr(rscratch1, zr, (u_int64_t)val);
4649       __ subs(zr, reg, rscratch1);
4650     }
4651   %}
4652 
4653   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4654     MacroAssembler _masm(&cbuf);
4655     Register reg1 = as_Register($src1$$reg);
4656     u_int64_t val = (u_int64_t)$src2$$constant;
4657     __ mov(rscratch1, val);
4658     __ cmp(reg1, rscratch1);
4659   %}
4660 
4661   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4662     MacroAssembler _masm(&cbuf);
4663     Register reg1 = as_Register($src1$$reg);
4664     Register reg2 = as_Register($src2$$reg);
4665     __ cmp(reg1, reg2);
4666   %}
4667 
4668   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4669     MacroAssembler _masm(&cbuf);
4670     Register reg1 = as_Register($src1$$reg);
4671     Register reg2 = as_Register($src2$$reg);
4672     __ cmpw(reg1, reg2);
4673   %}
4674 
4675   enc_class aarch64_enc_testp(iRegP src) %{
4676     MacroAssembler _masm(&cbuf);
4677     Register reg = as_Register($src$$reg);
4678     __ cmp(reg, zr);
4679   %}
4680 
4681   enc_class aarch64_enc_testn(iRegN src) %{
4682     MacroAssembler _masm(&cbuf);
4683     Register reg = as_Register($src$$reg);
4684     __ cmpw(reg, zr);
4685   %}
4686 
4687   enc_class aarch64_enc_b(label lbl) %{
4688     MacroAssembler _masm(&cbuf);
4689     Label *L = $lbl$$label;
4690     __ b(*L);
4691   %}
4692 
4693   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4694     MacroAssembler _masm(&cbuf);
4695     Label *L = $lbl$$label;
4696     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4697   %}
4698 
4699   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4700     MacroAssembler _masm(&cbuf);
4701     Label *L = $lbl$$label;
4702     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4703   %}
4704 
4705   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4706   %{
4707      Register sub_reg = as_Register($sub$$reg);
4708      Register super_reg = as_Register($super$$reg);
4709      Register temp_reg = as_Register($temp$$reg);
4710      Register result_reg = as_Register($result$$reg);
4711 
4712      Label miss;
4713      MacroAssembler _masm(&cbuf);
4714      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4715                                      NULL, &miss,
4716                                      /*set_cond_codes:*/ true);
4717      if ($primary) {
4718        __ mov(result_reg, zr);
4719      }
4720      __ bind(miss);
4721   %}
4722 
4723   enc_class aarch64_enc_java_static_call(method meth) %{
4724     MacroAssembler _masm(&cbuf);
4725 
4726     address addr = (address)$meth$$method;
4727     address call;
4728     if (!_method) {
4729       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4730       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4731     } else {
4732       int method_index = resolved_method_index(cbuf);
4733       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4734                                                   : static_call_Relocation::spec(method_index);
4735       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4736 
4737       // Emit stub for static call
4738       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4739       if (stub == NULL) {
4740         ciEnv::current()->record_failure("CodeCache is full");
4741         return;
4742       }
4743     }
4744     if (call == NULL) {
4745       ciEnv::current()->record_failure("CodeCache is full");
4746       return;
4747     }
4748   %}
4749 
4750   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4751     MacroAssembler _masm(&cbuf);
4752     int method_index = resolved_method_index(cbuf);
4753     address call = __ ic_call((address)$meth$$method, method_index);
4754     if (call == NULL) {
4755       ciEnv::current()->record_failure("CodeCache is full");
4756       return;
4757     }
4758   %}
4759 
4760   enc_class aarch64_enc_call_epilog() %{
4761     MacroAssembler _masm(&cbuf);
4762     if (VerifyStackAtCalls) {
4763       // Check that stack depth is unchanged: find majik cookie on stack
4764       __ call_Unimplemented();
4765     }
4766   %}
4767 
4768   enc_class aarch64_enc_java_to_runtime(method meth) %{
4769     MacroAssembler _masm(&cbuf);
4770 
4771     // some calls to generated routines (arraycopy code) are scheduled
4772     // by C2 as runtime calls. if so we can call them using a br (they
4773     // will be in a reachable segment) otherwise we have to use a blrt
4774     // which loads the absolute address into a register.
4775     address entry = (address)$meth$$method;
4776     CodeBlob *cb = CodeCache::find_blob(entry);
4777     if (cb) {
4778       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4779       if (call == NULL) {
4780         ciEnv::current()->record_failure("CodeCache is full");
4781         return;
4782       }
4783     } else {
4784       int gpcnt;
4785       int fpcnt;
4786       int rtype;
4787       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4788       Label retaddr;
4789       __ adr(rscratch2, retaddr);
4790       __ lea(rscratch1, RuntimeAddress(entry));
4791       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4792       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4793       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4794       __ bind(retaddr);
4795       __ add(sp, sp, 2 * wordSize);
4796     }
4797   %}
4798 
4799   enc_class aarch64_enc_rethrow() %{
4800     MacroAssembler _masm(&cbuf);
4801     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4802   %}
4803 
4804   enc_class aarch64_enc_ret() %{
4805     MacroAssembler _masm(&cbuf);
4806     __ ret(lr);
4807   %}
4808 
4809   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4810     MacroAssembler _masm(&cbuf);
4811     Register target_reg = as_Register($jump_target$$reg);
4812     __ br(target_reg);
4813   %}
4814 
4815   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4816     MacroAssembler _masm(&cbuf);
4817     Register target_reg = as_Register($jump_target$$reg);
4818     // exception oop should be in r0
4819     // ret addr has been popped into lr
4820     // callee expects it in r3
4821     __ mov(r3, lr);
4822     __ br(target_reg);
4823   %}
4824 
4825   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4826     MacroAssembler _masm(&cbuf);
4827     Register oop = as_Register($object$$reg);
4828     Register box = as_Register($box$$reg);
4829     Register disp_hdr = as_Register($tmp$$reg);
4830     Register tmp = as_Register($tmp2$$reg);
4831     Label cont;
4832     Label object_has_monitor;
4833     Label cas_failed;
4834 
4835     assert_different_registers(oop, box, tmp, disp_hdr);
4836 
4837     // Load markOop from object into displaced_header.
4838     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4839 
4840     // Always do locking in runtime.
4841     if (EmitSync & 0x01) {
4842       __ cmp(oop, zr);
4843       return;
4844     }
4845 
4846     if (UseBiasedLocking && !UseOptoBiasInlining) {
4847       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4848     }
4849 
4850     // Handle existing monitor
4851     if ((EmitSync & 0x02) == 0) {
4852       // we can use AArch64's bit test and branch here but
4853       // markoopDesc does not define a bit index just the bit value
4854       // so assert in case the bit pos changes
4855 #     define __monitor_value_log2 1
4856       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4857       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4858 #     undef __monitor_value_log2
4859     }
4860 
4861     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4862     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4863 
4864     // Load Compare Value application register.
4865 
4866     // Initialize the box. (Must happen before we update the object mark!)
4867     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4868 
4869     // Compare object markOop with mark and if equal exchange scratch1
4870     // with object markOop.
4871     if (UseLSE) {
4872       __ mov(tmp, disp_hdr);
4873       __ casal(Assembler::xword, tmp, box, oop);
4874       __ cmp(tmp, disp_hdr);
4875       __ br(Assembler::EQ, cont);
4876     } else {
4877       Label retry_load;
4878       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4879         __ prfm(Address(oop), PSTL1STRM);
4880       __ bind(retry_load);
4881       __ ldaxr(tmp, oop);
4882       __ cmp(tmp, disp_hdr);
4883       __ br(Assembler::NE, cas_failed);
4884       // use stlxr to ensure update is immediately visible
4885       __ stlxr(tmp, box, oop);
4886       __ cbzw(tmp, cont);
4887       __ b(retry_load);
4888     }
4889 
4890     // Formerly:
4891     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4892     //               /*newv=*/box,
4893     //               /*addr=*/oop,
4894     //               /*tmp=*/tmp,
4895     //               cont,
4896     //               /*fail*/NULL);
4897 
4898     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4899 
4900     // If the compare-and-exchange succeeded, then we found an unlocked
4901     // object, will have now locked it will continue at label cont
4902 
4903     __ bind(cas_failed);
4904     // We did not see an unlocked object so try the fast recursive case.
4905 
4906     // Check if the owner is self by comparing the value in the
4907     // markOop of object (disp_hdr) with the stack pointer.
4908     __ mov(rscratch1, sp);
4909     __ sub(disp_hdr, disp_hdr, rscratch1);
4910     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4911     // If condition is true we are cont and hence we can store 0 as the
4912     // displaced header in the box, which indicates that it is a recursive lock.
4913     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4914     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4915 
4916     // Handle existing monitor.
4917     if ((EmitSync & 0x02) == 0) {
4918       __ b(cont);
4919 
4920       __ bind(object_has_monitor);
4921       // The object's monitor m is unlocked iff m->owner == NULL,
4922       // otherwise m->owner may contain a thread or a stack address.
4923       //
4924       // Try to CAS m->owner from NULL to current thread.
4925       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4926       __ mov(disp_hdr, zr);
4927 
4928       if (UseLSE) {
4929         __ mov(rscratch1, disp_hdr);
4930         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4931         __ cmp(rscratch1, disp_hdr);
4932       } else {
4933         Label retry_load, fail;
4934         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4935           __ prfm(Address(tmp), PSTL1STRM);
4936         __ bind(retry_load);
4937         __ ldaxr(rscratch1, tmp);
4938         __ cmp(disp_hdr, rscratch1);
4939         __ br(Assembler::NE, fail);
4940         // use stlxr to ensure update is immediately visible
4941         __ stlxr(rscratch1, rthread, tmp);
4942         __ cbnzw(rscratch1, retry_load);
4943         __ bind(fail);
4944       }
4945 
4946       // Label next;
4947       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4948       //               /*newv=*/rthread,
4949       //               /*addr=*/tmp,
4950       //               /*tmp=*/rscratch1,
4951       //               /*succeed*/next,
4952       //               /*fail*/NULL);
4953       // __ bind(next);
4954 
4955       // store a non-null value into the box.
4956       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4957 
4958       // PPC port checks the following invariants
4959       // #ifdef ASSERT
4960       // bne(flag, cont);
4961       // We have acquired the monitor, check some invariants.
4962       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4963       // Invariant 1: _recursions should be 0.
4964       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4965       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4966       //                        "monitor->_recursions should be 0", -1);
4967       // Invariant 2: OwnerIsThread shouldn't be 0.
4968       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4969       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4970       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4971       // #endif
4972     }
4973 
4974     __ bind(cont);
4975     // flag == EQ indicates success
4976     // flag == NE indicates failure
4977 
4978   %}
4979 
4980   // TODO
4981   // reimplement this with custom cmpxchgptr code
4982   // which avoids some of the unnecessary branching
4983   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4984     MacroAssembler _masm(&cbuf);
4985     Register oop = as_Register($object$$reg);
4986     Register box = as_Register($box$$reg);
4987     Register disp_hdr = as_Register($tmp$$reg);
4988     Register tmp = as_Register($tmp2$$reg);
4989     Label cont;
4990     Label object_has_monitor;
4991     Label cas_failed;
4992 
4993     assert_different_registers(oop, box, tmp, disp_hdr);
4994 
4995     // Always do locking in runtime.
4996     if (EmitSync & 0x01) {
4997       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4998       return;
4999     }
5000 
5001     if (UseBiasedLocking && !UseOptoBiasInlining) {
5002       __ biased_locking_exit(oop, tmp, cont);
5003     }
5004 
5005     // Find the lock address and load the displaced header from the stack.
5006     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5007 
5008     // If the displaced header is 0, we have a recursive unlock.
5009     __ cmp(disp_hdr, zr);
5010     __ br(Assembler::EQ, cont);
5011 
5012 
5013     // Handle existing monitor.
5014     if ((EmitSync & 0x02) == 0) {
5015       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5016       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5017     }
5018 
5019     // Check if it is still a light weight lock, this is is true if we
5020     // see the stack address of the basicLock in the markOop of the
5021     // object.
5022 
5023       if (UseLSE) {
5024         __ mov(tmp, box);
5025         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5026         __ cmp(tmp, box);
5027       } else {
5028         Label retry_load;
5029         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
5030           __ prfm(Address(oop), PSTL1STRM);
5031         __ bind(retry_load);
5032         __ ldxr(tmp, oop);
5033         __ cmp(box, tmp);
5034         __ br(Assembler::NE, cas_failed);
5035         // use stlxr to ensure update is immediately visible
5036         __ stlxr(tmp, disp_hdr, oop);
5037         __ cbzw(tmp, cont);
5038         __ b(retry_load);
5039       }
5040 
5041     // __ cmpxchgptr(/*compare_value=*/box,
5042     //               /*exchange_value=*/disp_hdr,
5043     //               /*where=*/oop,
5044     //               /*result=*/tmp,
5045     //               cont,
5046     //               /*cas_failed*/NULL);
5047     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5048 
5049     __ bind(cas_failed);
5050 
5051     // Handle existing monitor.
5052     if ((EmitSync & 0x02) == 0) {
5053       __ b(cont);
5054 
5055       __ bind(object_has_monitor);
5056       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5057       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5058       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5059       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5060       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5061       __ cmp(rscratch1, zr);
5062       __ br(Assembler::NE, cont);
5063 
5064       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5065       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5066       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5067       __ cmp(rscratch1, zr);
5068       __ cbnz(rscratch1, cont);
5069       // need a release store here
5070       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5071       __ stlr(rscratch1, tmp); // rscratch1 is zero
5072     }
5073 
5074     __ bind(cont);
5075     // flag == EQ indicates success
5076     // flag == NE indicates failure
5077   %}
5078 
5079 %}
5080 
5081 //----------FRAME--------------------------------------------------------------
5082 // Definition of frame structure and management information.
5083 //
5084 //  S T A C K   L A Y O U T    Allocators stack-slot number
5085 //                             |   (to get allocators register number
5086 //  G  Owned by    |        |  v    add OptoReg::stack0())
5087 //  r   CALLER     |        |
5088 //  o     |        +--------+      pad to even-align allocators stack-slot
5089 //  w     V        |  pad0  |        numbers; owned by CALLER
5090 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5091 //  h     ^        |   in   |  5
5092 //        |        |  args  |  4   Holes in incoming args owned by SELF
5093 //  |     |        |        |  3
5094 //  |     |        +--------+
5095 //  V     |        | old out|      Empty on Intel, window on Sparc
5096 //        |    old |preserve|      Must be even aligned.
5097 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5098 //        |        |   in   |  3   area for Intel ret address
5099 //     Owned by    |preserve|      Empty on Sparc.
5100 //       SELF      +--------+
5101 //        |        |  pad2  |  2   pad to align old SP
5102 //        |        +--------+  1
5103 //        |        | locks  |  0
5104 //        |        +--------+----> OptoReg::stack0(), even aligned
5105 //        |        |  pad1  | 11   pad to align new SP
5106 //        |        +--------+
5107 //        |        |        | 10
5108 //        |        | spills |  9   spills
5109 //        V        |        |  8   (pad0 slot for callee)
5110 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5111 //        ^        |  out   |  7
5112 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5113 //     Owned by    +--------+
5114 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5115 //        |    new |preserve|      Must be even-aligned.
5116 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5117 //        |        |        |
5118 //
5119 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5120 //         known from SELF's arguments and the Java calling convention.
5121 //         Region 6-7 is determined per call site.
5122 // Note 2: If the calling convention leaves holes in the incoming argument
5123 //         area, those holes are owned by SELF.  Holes in the outgoing area
5124 //         are owned by the CALLEE.  Holes should not be nessecary in the
5125 //         incoming area, as the Java calling convention is completely under
5126 //         the control of the AD file.  Doubles can be sorted and packed to
5127 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5128 //         varargs C calling conventions.
5129 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5130 //         even aligned with pad0 as needed.
5131 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5132 //           (the latter is true on Intel but is it false on AArch64?)
5133 //         region 6-11 is even aligned; it may be padded out more so that
5134 //         the region from SP to FP meets the minimum stack alignment.
5135 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5136 //         alignment.  Region 11, pad1, may be dynamically extended so that
5137 //         SP meets the minimum alignment.
5138 
5139 frame %{
5140   // What direction does stack grow in (assumed to be same for C & Java)
5141   stack_direction(TOWARDS_LOW);
5142 
5143   // These three registers define part of the calling convention
5144   // between compiled code and the interpreter.
5145 
5146   // Inline Cache Register or methodOop for I2C.
5147   inline_cache_reg(R12);
5148 
5149   // Method Oop Register when calling interpreter.
5150   interpreter_method_oop_reg(R12);
5151 
5152   // Number of stack slots consumed by locking an object
5153   sync_stack_slots(2);
5154 
5155   // Compiled code's Frame Pointer
5156   frame_pointer(R31);
5157 
5158   // Interpreter stores its frame pointer in a register which is
5159   // stored to the stack by I2CAdaptors.
5160   // I2CAdaptors convert from interpreted java to compiled java.
5161   interpreter_frame_pointer(R29);
5162 
5163   // Stack alignment requirement
5164   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5165 
5166   // Number of stack slots between incoming argument block and the start of
5167   // a new frame.  The PROLOG must add this many slots to the stack.  The
5168   // EPILOG must remove this many slots. aarch64 needs two slots for
5169   // return address and fp.
5170   // TODO think this is correct but check
5171   in_preserve_stack_slots(4);
5172 
5173   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5174   // for calls to C.  Supports the var-args backing area for register parms.
5175   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5176 
5177   // The after-PROLOG location of the return address.  Location of
5178   // return address specifies a type (REG or STACK) and a number
5179   // representing the register number (i.e. - use a register name) or
5180   // stack slot.
5181   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5182   // Otherwise, it is above the locks and verification slot and alignment word
5183   // TODO this may well be correct but need to check why that - 2 is there
5184   // ppc port uses 0 but we definitely need to allow for fixed_slots
5185   // which folds in the space used for monitors
5186   return_addr(STACK - 2 +
5187               align_up((Compile::current()->in_preserve_stack_slots() +
5188                         Compile::current()->fixed_slots()),
5189                        stack_alignment_in_slots()));
5190 
5191   // Body of function which returns an integer array locating
5192   // arguments either in registers or in stack slots.  Passed an array
5193   // of ideal registers called "sig" and a "length" count.  Stack-slot
5194   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5195   // arguments for a CALLEE.  Incoming stack arguments are
5196   // automatically biased by the preserve_stack_slots field above.
5197 
5198   calling_convention
5199   %{
5200     // No difference between ingoing/outgoing just pass false
5201     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5202   %}
5203 
5204   c_calling_convention
5205   %{
5206     // This is obviously always outgoing
5207     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5208   %}
5209 
5210   // Location of compiled Java return values.  Same as C for now.
5211   return_value
5212   %{
5213     // TODO do we allow ideal_reg == Op_RegN???
5214     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5215            "only return normal values");
5216 
5217     static const int lo[Op_RegL + 1] = { // enum name
5218       0,                                 // Op_Node
5219       0,                                 // Op_Set
5220       R0_num,                            // Op_RegN
5221       R0_num,                            // Op_RegI
5222       R0_num,                            // Op_RegP
5223       V0_num,                            // Op_RegF
5224       V0_num,                            // Op_RegD
5225       R0_num                             // Op_RegL
5226     };
5227 
5228     static const int hi[Op_RegL + 1] = { // enum name
5229       0,                                 // Op_Node
5230       0,                                 // Op_Set
5231       OptoReg::Bad,                       // Op_RegN
5232       OptoReg::Bad,                      // Op_RegI
5233       R0_H_num,                          // Op_RegP
5234       OptoReg::Bad,                      // Op_RegF
5235       V0_H_num,                          // Op_RegD
5236       R0_H_num                           // Op_RegL
5237     };
5238 
5239     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5240   %}
5241 %}
5242 
5243 //----------ATTRIBUTES---------------------------------------------------------
5244 //----------Operand Attributes-------------------------------------------------
5245 op_attrib op_cost(1);        // Required cost attribute
5246 
5247 //----------Instruction Attributes---------------------------------------------
5248 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5249 ins_attrib ins_size(32);        // Required size attribute (in bits)
5250 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5251                                 // a non-matching short branch variant
5252                                 // of some long branch?
5253 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5254                                 // be a power of 2) specifies the
5255                                 // alignment that some part of the
5256                                 // instruction (not necessarily the
5257                                 // start) requires.  If > 1, a
5258                                 // compute_padding() function must be
5259                                 // provided for the instruction
5260 
5261 //----------OPERANDS-----------------------------------------------------------
5262 // Operand definitions must precede instruction definitions for correct parsing
5263 // in the ADLC because operands constitute user defined types which are used in
5264 // instruction definitions.
5265 
5266 //----------Simple Operands----------------------------------------------------
5267 
5268 // Integer operands 32 bit
5269 // 32 bit immediate
5270 operand immI()
5271 %{
5272   match(ConI);
5273 
5274   op_cost(0);
5275   format %{ %}
5276   interface(CONST_INTER);
5277 %}
5278 
5279 // 32 bit zero
5280 operand immI0()
5281 %{
5282   predicate(n->get_int() == 0);
5283   match(ConI);
5284 
5285   op_cost(0);
5286   format %{ %}
5287   interface(CONST_INTER);
5288 %}
5289 
5290 // 32 bit unit increment
5291 operand immI_1()
5292 %{
5293   predicate(n->get_int() == 1);
5294   match(ConI);
5295 
5296   op_cost(0);
5297   format %{ %}
5298   interface(CONST_INTER);
5299 %}
5300 
5301 // 32 bit unit decrement
5302 operand immI_M1()
5303 %{
5304   predicate(n->get_int() == -1);
5305   match(ConI);
5306 
5307   op_cost(0);
5308   format %{ %}
5309   interface(CONST_INTER);
5310 %}
5311 
5312 // Shift values for add/sub extension shift
5313 operand immIExt()
5314 %{
5315   predicate(0 <= n->get_int() && (n->get_int() <= 4));
5316   match(ConI);
5317 
5318   op_cost(0);
5319   format %{ %}
5320   interface(CONST_INTER);
5321 %}
5322 
5323 operand immI_le_4()
5324 %{
5325   predicate(n->get_int() <= 4);
5326   match(ConI);
5327 
5328   op_cost(0);
5329   format %{ %}
5330   interface(CONST_INTER);
5331 %}
5332 
5333 operand immI_31()
5334 %{
5335   predicate(n->get_int() == 31);
5336   match(ConI);
5337 
5338   op_cost(0);
5339   format %{ %}
5340   interface(CONST_INTER);
5341 %}
5342 
5343 operand immI_8()
5344 %{
5345   predicate(n->get_int() == 8);
5346   match(ConI);
5347 
5348   op_cost(0);
5349   format %{ %}
5350   interface(CONST_INTER);
5351 %}
5352 
5353 operand immI_16()
5354 %{
5355   predicate(n->get_int() == 16);
5356   match(ConI);
5357 
5358   op_cost(0);
5359   format %{ %}
5360   interface(CONST_INTER);
5361 %}
5362 
5363 operand immI_24()
5364 %{
5365   predicate(n->get_int() == 24);
5366   match(ConI);
5367 
5368   op_cost(0);
5369   format %{ %}
5370   interface(CONST_INTER);
5371 %}
5372 
5373 operand immI_32()
5374 %{
5375   predicate(n->get_int() == 32);
5376   match(ConI);
5377 
5378   op_cost(0);
5379   format %{ %}
5380   interface(CONST_INTER);
5381 %}
5382 
5383 operand immI_48()
5384 %{
5385   predicate(n->get_int() == 48);
5386   match(ConI);
5387 
5388   op_cost(0);
5389   format %{ %}
5390   interface(CONST_INTER);
5391 %}
5392 
5393 operand immI_56()
5394 %{
5395   predicate(n->get_int() == 56);
5396   match(ConI);
5397 
5398   op_cost(0);
5399   format %{ %}
5400   interface(CONST_INTER);
5401 %}
5402 
5403 operand immI_63()
5404 %{
5405   predicate(n->get_int() == 63);
5406   match(ConI);
5407 
5408   op_cost(0);
5409   format %{ %}
5410   interface(CONST_INTER);
5411 %}
5412 
5413 operand immI_64()
5414 %{
5415   predicate(n->get_int() == 64);
5416   match(ConI);
5417 
5418   op_cost(0);
5419   format %{ %}
5420   interface(CONST_INTER);
5421 %}
5422 
5423 operand immI_255()
5424 %{
5425   predicate(n->get_int() == 255);
5426   match(ConI);
5427 
5428   op_cost(0);
5429   format %{ %}
5430   interface(CONST_INTER);
5431 %}
5432 
5433 operand immI_65535()
5434 %{
5435   predicate(n->get_int() == 65535);
5436   match(ConI);
5437 
5438   op_cost(0);
5439   format %{ %}
5440   interface(CONST_INTER);
5441 %}
5442 
5443 operand immL_255()
5444 %{
5445   predicate(n->get_long() == 255L);
5446   match(ConL);
5447 
5448   op_cost(0);
5449   format %{ %}
5450   interface(CONST_INTER);
5451 %}
5452 
5453 operand immL_65535()
5454 %{
5455   predicate(n->get_long() == 65535L);
5456   match(ConL);
5457 
5458   op_cost(0);
5459   format %{ %}
5460   interface(CONST_INTER);
5461 %}
5462 
5463 operand immL_4294967295()
5464 %{
5465   predicate(n->get_long() == 4294967295L);
5466   match(ConL);
5467 
5468   op_cost(0);
5469   format %{ %}
5470   interface(CONST_INTER);
5471 %}
5472 
5473 operand immL_bitmask()
5474 %{
5475   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5476             && is_power_of_2(n->get_long() + 1));
5477   match(ConL);
5478 
5479   op_cost(0);
5480   format %{ %}
5481   interface(CONST_INTER);
5482 %}
5483 
5484 operand immI_bitmask()
5485 %{
5486   predicate(((n->get_int() & 0xc0000000) == 0)
5487             && is_power_of_2(n->get_int() + 1));
5488   match(ConI);
5489 
5490   op_cost(0);
5491   format %{ %}
5492   interface(CONST_INTER);
5493 %}
5494 
5495 // Scale values for scaled offset addressing modes (up to long but not quad)
5496 operand immIScale()
5497 %{
5498   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5499   match(ConI);
5500 
5501   op_cost(0);
5502   format %{ %}
5503   interface(CONST_INTER);
5504 %}
5505 
5506 // 26 bit signed offset -- for pc-relative branches
5507 operand immI26()
5508 %{
5509   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5510   match(ConI);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // 19 bit signed offset -- for pc-relative loads
5518 operand immI19()
5519 %{
5520   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5521   match(ConI);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 // 12 bit unsigned offset -- for base plus immediate loads
5529 operand immIU12()
5530 %{
5531   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5532   match(ConI);
5533 
5534   op_cost(0);
5535   format %{ %}
5536   interface(CONST_INTER);
5537 %}
5538 
5539 operand immLU12()
5540 %{
5541   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5542   match(ConL);
5543 
5544   op_cost(0);
5545   format %{ %}
5546   interface(CONST_INTER);
5547 %}
5548 
5549 // Offset for scaled or unscaled immediate loads and stores
5550 operand immIOffset()
5551 %{
5552   predicate(Address::offset_ok_for_immed(n->get_int()));
5553   match(ConI);
5554 
5555   op_cost(0);
5556   format %{ %}
5557   interface(CONST_INTER);
5558 %}
5559 
5560 operand immIOffset4()
5561 %{
5562   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5563   match(ConI);
5564 
5565   op_cost(0);
5566   format %{ %}
5567   interface(CONST_INTER);
5568 %}
5569 
5570 operand immIOffset8()
5571 %{
5572   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5573   match(ConI);
5574 
5575   op_cost(0);
5576   format %{ %}
5577   interface(CONST_INTER);
5578 %}
5579 
5580 operand immIOffset16()
5581 %{
5582   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5583   match(ConI);
5584 
5585   op_cost(0);
5586   format %{ %}
5587   interface(CONST_INTER);
5588 %}
5589 
5590 operand immLoffset()
5591 %{
5592   predicate(Address::offset_ok_for_immed(n->get_long()));
5593   match(ConL);
5594 
5595   op_cost(0);
5596   format %{ %}
5597   interface(CONST_INTER);
5598 %}
5599 
5600 operand immLoffset4()
5601 %{
5602   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5603   match(ConL);
5604 
5605   op_cost(0);
5606   format %{ %}
5607   interface(CONST_INTER);
5608 %}
5609 
5610 operand immLoffset8()
5611 %{
5612   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5613   match(ConL);
5614 
5615   op_cost(0);
5616   format %{ %}
5617   interface(CONST_INTER);
5618 %}
5619 
5620 operand immLoffset16()
5621 %{
5622   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5623   match(ConL);
5624 
5625   op_cost(0);
5626   format %{ %}
5627   interface(CONST_INTER);
5628 %}
5629 
5630 // 32 bit integer valid for add sub immediate
5631 operand immIAddSub()
5632 %{
5633   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5634   match(ConI);
5635   op_cost(0);
5636   format %{ %}
5637   interface(CONST_INTER);
5638 %}
5639 
5640 // 32 bit unsigned integer valid for logical immediate
5641 // TODO -- check this is right when e.g the mask is 0x80000000
5642 operand immILog()
5643 %{
5644   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5645   match(ConI);
5646 
5647   op_cost(0);
5648   format %{ %}
5649   interface(CONST_INTER);
5650 %}
5651 
5652 // Integer operands 64 bit
5653 // 64 bit immediate
5654 operand immL()
5655 %{
5656   match(ConL);
5657 
5658   op_cost(0);
5659   format %{ %}
5660   interface(CONST_INTER);
5661 %}
5662 
5663 // 64 bit zero
5664 operand immL0()
5665 %{
5666   predicate(n->get_long() == 0);
5667   match(ConL);
5668 
5669   op_cost(0);
5670   format %{ %}
5671   interface(CONST_INTER);
5672 %}
5673 
5674 // 64 bit unit increment
5675 operand immL_1()
5676 %{
5677   predicate(n->get_long() == 1);
5678   match(ConL);
5679 
5680   op_cost(0);
5681   format %{ %}
5682   interface(CONST_INTER);
5683 %}
5684 
5685 // 64 bit unit decrement
5686 operand immL_M1()
5687 %{
5688   predicate(n->get_long() == -1);
5689   match(ConL);
5690 
5691   op_cost(0);
5692   format %{ %}
5693   interface(CONST_INTER);
5694 %}
5695 
5696 // 32 bit offset of pc in thread anchor
5697 
5698 operand immL_pc_off()
5699 %{
5700   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5701                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5702   match(ConL);
5703 
5704   op_cost(0);
5705   format %{ %}
5706   interface(CONST_INTER);
5707 %}
5708 
5709 // 64 bit integer valid for add sub immediate
5710 operand immLAddSub()
5711 %{
5712   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5713   match(ConL);
5714   op_cost(0);
5715   format %{ %}
5716   interface(CONST_INTER);
5717 %}
5718 
5719 // 64 bit integer valid for logical immediate
5720 operand immLLog()
5721 %{
5722   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5723   match(ConL);
5724   op_cost(0);
5725   format %{ %}
5726   interface(CONST_INTER);
5727 %}
5728 
5729 // Long Immediate: low 32-bit mask
5730 operand immL_32bits()
5731 %{
5732   predicate(n->get_long() == 0xFFFFFFFFL);
5733   match(ConL);
5734   op_cost(0);
5735   format %{ %}
5736   interface(CONST_INTER);
5737 %}
5738 
5739 // Pointer operands
5740 // Pointer Immediate
5741 operand immP()
5742 %{
5743   match(ConP);
5744 
5745   op_cost(0);
5746   format %{ %}
5747   interface(CONST_INTER);
5748 %}
5749 
5750 // NULL Pointer Immediate
5751 operand immP0()
5752 %{
5753   predicate(n->get_ptr() == 0);
5754   match(ConP);
5755 
5756   op_cost(0);
5757   format %{ %}
5758   interface(CONST_INTER);
5759 %}
5760 
5761 // Pointer Immediate One
5762 // this is used in object initialization (initial object header)
5763 operand immP_1()
5764 %{
5765   predicate(n->get_ptr() == 1);
5766   match(ConP);
5767 
5768   op_cost(0);
5769   format %{ %}
5770   interface(CONST_INTER);
5771 %}
5772 
5773 // Polling Page Pointer Immediate
5774 operand immPollPage()
5775 %{
5776   predicate((address)n->get_ptr() == os::get_polling_page());
5777   match(ConP);
5778 
5779   op_cost(0);
5780   format %{ %}
5781   interface(CONST_INTER);
5782 %}
5783 
5784 // Card Table Byte Map Base
5785 operand immByteMapBase()
5786 %{
5787   // Get base of card map
5788   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
5789             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
5790   match(ConP);
5791 
5792   op_cost(0);
5793   format %{ %}
5794   interface(CONST_INTER);
5795 %}
5796 
5797 // Pointer Immediate Minus One
5798 // this is used when we want to write the current PC to the thread anchor
5799 operand immP_M1()
5800 %{
5801   predicate(n->get_ptr() == -1);
5802   match(ConP);
5803 
5804   op_cost(0);
5805   format %{ %}
5806   interface(CONST_INTER);
5807 %}
5808 
5809 // Pointer Immediate Minus Two
5810 // this is used when we want to write the current PC to the thread anchor
5811 operand immP_M2()
5812 %{
5813   predicate(n->get_ptr() == -2);
5814   match(ConP);
5815 
5816   op_cost(0);
5817   format %{ %}
5818   interface(CONST_INTER);
5819 %}
5820 
5821 // Float and Double operands
5822 // Double Immediate
5823 operand immD()
5824 %{
5825   match(ConD);
5826   op_cost(0);
5827   format %{ %}
5828   interface(CONST_INTER);
5829 %}
5830 
5831 // Double Immediate: +0.0d
5832 operand immD0()
5833 %{
5834   predicate(jlong_cast(n->getd()) == 0);
5835   match(ConD);
5836 
5837   op_cost(0);
5838   format %{ %}
5839   interface(CONST_INTER);
5840 %}
5841 
5842 // constant 'double +0.0'.
5843 operand immDPacked()
5844 %{
5845   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5846   match(ConD);
5847   op_cost(0);
5848   format %{ %}
5849   interface(CONST_INTER);
5850 %}
5851 
5852 // Float Immediate
5853 operand immF()
5854 %{
5855   match(ConF);
5856   op_cost(0);
5857   format %{ %}
5858   interface(CONST_INTER);
5859 %}
5860 
5861 // Float Immediate: +0.0f.
5862 operand immF0()
5863 %{
5864   predicate(jint_cast(n->getf()) == 0);
5865   match(ConF);
5866 
5867   op_cost(0);
5868   format %{ %}
5869   interface(CONST_INTER);
5870 %}
5871 
5872 //
5873 operand immFPacked()
5874 %{
5875   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5876   match(ConF);
5877   op_cost(0);
5878   format %{ %}
5879   interface(CONST_INTER);
5880 %}
5881 
5882 // Narrow pointer operands
5883 // Narrow Pointer Immediate
5884 operand immN()
5885 %{
5886   match(ConN);
5887 
5888   op_cost(0);
5889   format %{ %}
5890   interface(CONST_INTER);
5891 %}
5892 
5893 // Narrow NULL Pointer Immediate
5894 operand immN0()
5895 %{
5896   predicate(n->get_narrowcon() == 0);
5897   match(ConN);
5898 
5899   op_cost(0);
5900   format %{ %}
5901   interface(CONST_INTER);
5902 %}
5903 
5904 operand immNKlass()
5905 %{
5906   match(ConNKlass);
5907 
5908   op_cost(0);
5909   format %{ %}
5910   interface(CONST_INTER);
5911 %}
5912 
5913 // Integer 32 bit Register Operands
5914 // Integer 32 bitRegister (excludes SP)
5915 operand iRegI()
5916 %{
5917   constraint(ALLOC_IN_RC(any_reg32));
5918   match(RegI);
5919   match(iRegINoSp);
5920   op_cost(0);
5921   format %{ %}
5922   interface(REG_INTER);
5923 %}
5924 
5925 // Integer 32 bit Register not Special
5926 operand iRegINoSp()
5927 %{
5928   constraint(ALLOC_IN_RC(no_special_reg32));
5929   match(RegI);
5930   op_cost(0);
5931   format %{ %}
5932   interface(REG_INTER);
5933 %}
5934 
5935 // Integer 64 bit Register Operands
5936 // Integer 64 bit Register (includes SP)
5937 operand iRegL()
5938 %{
5939   constraint(ALLOC_IN_RC(any_reg));
5940   match(RegL);
5941   match(iRegLNoSp);
5942   op_cost(0);
5943   format %{ %}
5944   interface(REG_INTER);
5945 %}
5946 
5947 // Integer 64 bit Register not Special
5948 operand iRegLNoSp()
5949 %{
5950   constraint(ALLOC_IN_RC(no_special_reg));
5951   match(RegL);
5952   match(iRegL_R0);
5953   format %{ %}
5954   interface(REG_INTER);
5955 %}
5956 
5957 // Pointer Register Operands
5958 // Pointer Register
5959 operand iRegP()
5960 %{
5961   constraint(ALLOC_IN_RC(ptr_reg));
5962   match(RegP);
5963   match(iRegPNoSp);
5964   match(iRegP_R0);
5965   //match(iRegP_R2);
5966   //match(iRegP_R4);
5967   //match(iRegP_R5);
5968   match(thread_RegP);
5969   op_cost(0);
5970   format %{ %}
5971   interface(REG_INTER);
5972 %}
5973 
5974 // Pointer 64 bit Register not Special
5975 operand iRegPNoSp()
5976 %{
5977   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5978   match(RegP);
5979   // match(iRegP);
5980   // match(iRegP_R0);
5981   // match(iRegP_R2);
5982   // match(iRegP_R4);
5983   // match(iRegP_R5);
5984   // match(thread_RegP);
5985   op_cost(0);
5986   format %{ %}
5987   interface(REG_INTER);
5988 %}
5989 
5990 // Pointer 64 bit Register R0 only
5991 operand iRegP_R0()
5992 %{
5993   constraint(ALLOC_IN_RC(r0_reg));
5994   match(RegP);
5995   // match(iRegP);
5996   match(iRegPNoSp);
5997   op_cost(0);
5998   format %{ %}
5999   interface(REG_INTER);
6000 %}
6001 
6002 // Pointer 64 bit Register R1 only
6003 operand iRegP_R1()
6004 %{
6005   constraint(ALLOC_IN_RC(r1_reg));
6006   match(RegP);
6007   // match(iRegP);
6008   match(iRegPNoSp);
6009   op_cost(0);
6010   format %{ %}
6011   interface(REG_INTER);
6012 %}
6013 
6014 // Pointer 64 bit Register R2 only
6015 operand iRegP_R2()
6016 %{
6017   constraint(ALLOC_IN_RC(r2_reg));
6018   match(RegP);
6019   // match(iRegP);
6020   match(iRegPNoSp);
6021   op_cost(0);
6022   format %{ %}
6023   interface(REG_INTER);
6024 %}
6025 
6026 // Pointer 64 bit Register R3 only
6027 operand iRegP_R3()
6028 %{
6029   constraint(ALLOC_IN_RC(r3_reg));
6030   match(RegP);
6031   // match(iRegP);
6032   match(iRegPNoSp);
6033   op_cost(0);
6034   format %{ %}
6035   interface(REG_INTER);
6036 %}
6037 
6038 // Pointer 64 bit Register R4 only
6039 operand iRegP_R4()
6040 %{
6041   constraint(ALLOC_IN_RC(r4_reg));
6042   match(RegP);
6043   // match(iRegP);
6044   match(iRegPNoSp);
6045   op_cost(0);
6046   format %{ %}
6047   interface(REG_INTER);
6048 %}
6049 
6050 // Pointer 64 bit Register R5 only
6051 operand iRegP_R5()
6052 %{
6053   constraint(ALLOC_IN_RC(r5_reg));
6054   match(RegP);
6055   // match(iRegP);
6056   match(iRegPNoSp);
6057   op_cost(0);
6058   format %{ %}
6059   interface(REG_INTER);
6060 %}
6061 
6062 // Pointer 64 bit Register R10 only
6063 operand iRegP_R10()
6064 %{
6065   constraint(ALLOC_IN_RC(r10_reg));
6066   match(RegP);
6067   // match(iRegP);
6068   match(iRegPNoSp);
6069   op_cost(0);
6070   format %{ %}
6071   interface(REG_INTER);
6072 %}
6073 
6074 // Long 64 bit Register R0 only
6075 operand iRegL_R0()
6076 %{
6077   constraint(ALLOC_IN_RC(r0_reg));
6078   match(RegL);
6079   match(iRegLNoSp);
6080   op_cost(0);
6081   format %{ %}
6082   interface(REG_INTER);
6083 %}
6084 
6085 // Long 64 bit Register R2 only
6086 operand iRegL_R2()
6087 %{
6088   constraint(ALLOC_IN_RC(r2_reg));
6089   match(RegL);
6090   match(iRegLNoSp);
6091   op_cost(0);
6092   format %{ %}
6093   interface(REG_INTER);
6094 %}
6095 
6096 // Long 64 bit Register R3 only
6097 operand iRegL_R3()
6098 %{
6099   constraint(ALLOC_IN_RC(r3_reg));
6100   match(RegL);
6101   match(iRegLNoSp);
6102   op_cost(0);
6103   format %{ %}
6104   interface(REG_INTER);
6105 %}
6106 
6107 // Long 64 bit Register R11 only
6108 operand iRegL_R11()
6109 %{
6110   constraint(ALLOC_IN_RC(r11_reg));
6111   match(RegL);
6112   match(iRegLNoSp);
6113   op_cost(0);
6114   format %{ %}
6115   interface(REG_INTER);
6116 %}
6117 
6118 // Pointer 64 bit Register FP only
6119 operand iRegP_FP()
6120 %{
6121   constraint(ALLOC_IN_RC(fp_reg));
6122   match(RegP);
6123   // match(iRegP);
6124   op_cost(0);
6125   format %{ %}
6126   interface(REG_INTER);
6127 %}
6128 
6129 // Register R0 only
6130 operand iRegI_R0()
6131 %{
6132   constraint(ALLOC_IN_RC(int_r0_reg));
6133   match(RegI);
6134   match(iRegINoSp);
6135   op_cost(0);
6136   format %{ %}
6137   interface(REG_INTER);
6138 %}
6139 
6140 // Register R2 only
6141 operand iRegI_R2()
6142 %{
6143   constraint(ALLOC_IN_RC(int_r2_reg));
6144   match(RegI);
6145   match(iRegINoSp);
6146   op_cost(0);
6147   format %{ %}
6148   interface(REG_INTER);
6149 %}
6150 
6151 // Register R3 only
6152 operand iRegI_R3()
6153 %{
6154   constraint(ALLOC_IN_RC(int_r3_reg));
6155   match(RegI);
6156   match(iRegINoSp);
6157   op_cost(0);
6158   format %{ %}
6159   interface(REG_INTER);
6160 %}
6161 
6162 
6163 // Register R4 only
6164 operand iRegI_R4()
6165 %{
6166   constraint(ALLOC_IN_RC(int_r4_reg));
6167   match(RegI);
6168   match(iRegINoSp);
6169   op_cost(0);
6170   format %{ %}
6171   interface(REG_INTER);
6172 %}
6173 
6174 
6175 // Pointer Register Operands
6176 // Narrow Pointer Register
6177 operand iRegN()
6178 %{
6179   constraint(ALLOC_IN_RC(any_reg32));
6180   match(RegN);
6181   match(iRegNNoSp);
6182   op_cost(0);
6183   format %{ %}
6184   interface(REG_INTER);
6185 %}
6186 
6187 operand iRegN_R0()
6188 %{
6189   constraint(ALLOC_IN_RC(r0_reg));
6190   match(iRegN);
6191   op_cost(0);
6192   format %{ %}
6193   interface(REG_INTER);
6194 %}
6195 
6196 operand iRegN_R2()
6197 %{
6198   constraint(ALLOC_IN_RC(r2_reg));
6199   match(iRegN);
6200   op_cost(0);
6201   format %{ %}
6202   interface(REG_INTER);
6203 %}
6204 
6205 operand iRegN_R3()
6206 %{
6207   constraint(ALLOC_IN_RC(r3_reg));
6208   match(iRegN);
6209   op_cost(0);
6210   format %{ %}
6211   interface(REG_INTER);
6212 %}
6213 
6214 // Integer 64 bit Register not Special
6215 operand iRegNNoSp()
6216 %{
6217   constraint(ALLOC_IN_RC(no_special_reg32));
6218   match(RegN);
6219   op_cost(0);
6220   format %{ %}
6221   interface(REG_INTER);
6222 %}
6223 
6224 // heap base register -- used for encoding immN0
6225 
6226 operand iRegIHeapbase()
6227 %{
6228   constraint(ALLOC_IN_RC(heapbase_reg));
6229   match(RegI);
6230   op_cost(0);
6231   format %{ %}
6232   interface(REG_INTER);
6233 %}
6234 
6235 // Float Register
6236 // Float register operands
6237 operand vRegF()
6238 %{
6239   constraint(ALLOC_IN_RC(float_reg));
6240   match(RegF);
6241 
6242   op_cost(0);
6243   format %{ %}
6244   interface(REG_INTER);
6245 %}
6246 
6247 // Double Register
6248 // Double register operands
6249 operand vRegD()
6250 %{
6251   constraint(ALLOC_IN_RC(double_reg));
6252   match(RegD);
6253 
6254   op_cost(0);
6255   format %{ %}
6256   interface(REG_INTER);
6257 %}
6258 
6259 operand vecD()
6260 %{
6261   constraint(ALLOC_IN_RC(vectord_reg));
6262   match(VecD);
6263 
6264   op_cost(0);
6265   format %{ %}
6266   interface(REG_INTER);
6267 %}
6268 
6269 operand vecX()
6270 %{
6271   constraint(ALLOC_IN_RC(vectorx_reg));
6272   match(VecX);
6273 
6274   op_cost(0);
6275   format %{ %}
6276   interface(REG_INTER);
6277 %}
6278 
6279 operand vRegD_V0()
6280 %{
6281   constraint(ALLOC_IN_RC(v0_reg));
6282   match(RegD);
6283   op_cost(0);
6284   format %{ %}
6285   interface(REG_INTER);
6286 %}
6287 
6288 operand vRegD_V1()
6289 %{
6290   constraint(ALLOC_IN_RC(v1_reg));
6291   match(RegD);
6292   op_cost(0);
6293   format %{ %}
6294   interface(REG_INTER);
6295 %}
6296 
6297 operand vRegD_V2()
6298 %{
6299   constraint(ALLOC_IN_RC(v2_reg));
6300   match(RegD);
6301   op_cost(0);
6302   format %{ %}
6303   interface(REG_INTER);
6304 %}
6305 
6306 operand vRegD_V3()
6307 %{
6308   constraint(ALLOC_IN_RC(v3_reg));
6309   match(RegD);
6310   op_cost(0);
6311   format %{ %}
6312   interface(REG_INTER);
6313 %}
6314 
6315 // Flags register, used as output of signed compare instructions
6316 
6317 // note that on AArch64 we also use this register as the output for
6318 // for floating point compare instructions (CmpF CmpD). this ensures
6319 // that ordered inequality tests use GT, GE, LT or LE none of which
6320 // pass through cases where the result is unordered i.e. one or both
6321 // inputs to the compare is a NaN. this means that the ideal code can
6322 // replace e.g. a GT with an LE and not end up capturing the NaN case
6323 // (where the comparison should always fail). EQ and NE tests are
6324 // always generated in ideal code so that unordered folds into the NE
6325 // case, matching the behaviour of AArch64 NE.
6326 //
6327 // This differs from x86 where the outputs of FP compares use a
6328 // special FP flags registers and where compares based on this
6329 // register are distinguished into ordered inequalities (cmpOpUCF) and
6330 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6331 // to explicitly handle the unordered case in branches. x86 also has
6332 // to include extra CMoveX rules to accept a cmpOpUCF input.
6333 
6334 operand rFlagsReg()
6335 %{
6336   constraint(ALLOC_IN_RC(int_flags));
6337   match(RegFlags);
6338 
6339   op_cost(0);
6340   format %{ "RFLAGS" %}
6341   interface(REG_INTER);
6342 %}
6343 
6344 // Flags register, used as output of unsigned compare instructions
6345 operand rFlagsRegU()
6346 %{
6347   constraint(ALLOC_IN_RC(int_flags));
6348   match(RegFlags);
6349 
6350   op_cost(0);
6351   format %{ "RFLAGSU" %}
6352   interface(REG_INTER);
6353 %}
6354 
6355 // Special Registers
6356 
6357 // Method Register
6358 operand inline_cache_RegP(iRegP reg)
6359 %{
6360   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6361   match(reg);
6362   match(iRegPNoSp);
6363   op_cost(0);
6364   format %{ %}
6365   interface(REG_INTER);
6366 %}
6367 
6368 operand interpreter_method_oop_RegP(iRegP reg)
6369 %{
6370   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6371   match(reg);
6372   match(iRegPNoSp);
6373   op_cost(0);
6374   format %{ %}
6375   interface(REG_INTER);
6376 %}
6377 
6378 // Thread Register
6379 operand thread_RegP(iRegP reg)
6380 %{
6381   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6382   match(reg);
6383   op_cost(0);
6384   format %{ %}
6385   interface(REG_INTER);
6386 %}
6387 
6388 operand lr_RegP(iRegP reg)
6389 %{
6390   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6391   match(reg);
6392   op_cost(0);
6393   format %{ %}
6394   interface(REG_INTER);
6395 %}
6396 
6397 //----------Memory Operands----------------------------------------------------
6398 
6399 operand indirect(iRegP reg)
6400 %{
6401   constraint(ALLOC_IN_RC(ptr_reg));
6402   match(reg);
6403   op_cost(0);
6404   format %{ "[$reg]" %}
6405   interface(MEMORY_INTER) %{
6406     base($reg);
6407     index(0xffffffff);
6408     scale(0x0);
6409     disp(0x0);
6410   %}
6411 %}
6412 
6413 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6414 %{
6415   constraint(ALLOC_IN_RC(ptr_reg));
6416   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6417   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6418   op_cost(0);
6419   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6420   interface(MEMORY_INTER) %{
6421     base($reg);
6422     index($ireg);
6423     scale($scale);
6424     disp(0x0);
6425   %}
6426 %}
6427 
6428 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6429 %{
6430   constraint(ALLOC_IN_RC(ptr_reg));
6431   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6432   match(AddP reg (LShiftL lreg scale));
6433   op_cost(0);
6434   format %{ "$reg, $lreg lsl($scale)" %}
6435   interface(MEMORY_INTER) %{
6436     base($reg);
6437     index($lreg);
6438     scale($scale);
6439     disp(0x0);
6440   %}
6441 %}
6442 
6443 operand indIndexI2L(iRegP reg, iRegI ireg)
6444 %{
6445   constraint(ALLOC_IN_RC(ptr_reg));
6446   match(AddP reg (ConvI2L ireg));
6447   op_cost(0);
6448   format %{ "$reg, $ireg, 0, I2L" %}
6449   interface(MEMORY_INTER) %{
6450     base($reg);
6451     index($ireg);
6452     scale(0x0);
6453     disp(0x0);
6454   %}
6455 %}
6456 
6457 operand indIndex(iRegP reg, iRegL lreg)
6458 %{
6459   constraint(ALLOC_IN_RC(ptr_reg));
6460   match(AddP reg lreg);
6461   op_cost(0);
6462   format %{ "$reg, $lreg" %}
6463   interface(MEMORY_INTER) %{
6464     base($reg);
6465     index($lreg);
6466     scale(0x0);
6467     disp(0x0);
6468   %}
6469 %}
6470 
6471 operand indOffI(iRegP reg, immIOffset off)
6472 %{
6473   constraint(ALLOC_IN_RC(ptr_reg));
6474   match(AddP reg off);
6475   op_cost(0);
6476   format %{ "[$reg, $off]" %}
6477   interface(MEMORY_INTER) %{
6478     base($reg);
6479     index(0xffffffff);
6480     scale(0x0);
6481     disp($off);
6482   %}
6483 %}
6484 
6485 operand indOffI4(iRegP reg, immIOffset4 off)
6486 %{
6487   constraint(ALLOC_IN_RC(ptr_reg));
6488   match(AddP reg off);
6489   op_cost(0);
6490   format %{ "[$reg, $off]" %}
6491   interface(MEMORY_INTER) %{
6492     base($reg);
6493     index(0xffffffff);
6494     scale(0x0);
6495     disp($off);
6496   %}
6497 %}
6498 
6499 operand indOffI8(iRegP reg, immIOffset8 off)
6500 %{
6501   constraint(ALLOC_IN_RC(ptr_reg));
6502   match(AddP reg off);
6503   op_cost(0);
6504   format %{ "[$reg, $off]" %}
6505   interface(MEMORY_INTER) %{
6506     base($reg);
6507     index(0xffffffff);
6508     scale(0x0);
6509     disp($off);
6510   %}
6511 %}
6512 
6513 operand indOffI16(iRegP reg, immIOffset16 off)
6514 %{
6515   constraint(ALLOC_IN_RC(ptr_reg));
6516   match(AddP reg off);
6517   op_cost(0);
6518   format %{ "[$reg, $off]" %}
6519   interface(MEMORY_INTER) %{
6520     base($reg);
6521     index(0xffffffff);
6522     scale(0x0);
6523     disp($off);
6524   %}
6525 %}
6526 
6527 operand indOffL(iRegP reg, immLoffset off)
6528 %{
6529   constraint(ALLOC_IN_RC(ptr_reg));
6530   match(AddP reg off);
6531   op_cost(0);
6532   format %{ "[$reg, $off]" %}
6533   interface(MEMORY_INTER) %{
6534     base($reg);
6535     index(0xffffffff);
6536     scale(0x0);
6537     disp($off);
6538   %}
6539 %}
6540 
6541 operand indOffL4(iRegP reg, immLoffset4 off)
6542 %{
6543   constraint(ALLOC_IN_RC(ptr_reg));
6544   match(AddP reg off);
6545   op_cost(0);
6546   format %{ "[$reg, $off]" %}
6547   interface(MEMORY_INTER) %{
6548     base($reg);
6549     index(0xffffffff);
6550     scale(0x0);
6551     disp($off);
6552   %}
6553 %}
6554 
6555 operand indOffL8(iRegP reg, immLoffset8 off)
6556 %{
6557   constraint(ALLOC_IN_RC(ptr_reg));
6558   match(AddP reg off);
6559   op_cost(0);
6560   format %{ "[$reg, $off]" %}
6561   interface(MEMORY_INTER) %{
6562     base($reg);
6563     index(0xffffffff);
6564     scale(0x0);
6565     disp($off);
6566   %}
6567 %}
6568 
6569 operand indOffL16(iRegP reg, immLoffset16 off)
6570 %{
6571   constraint(ALLOC_IN_RC(ptr_reg));
6572   match(AddP reg off);
6573   op_cost(0);
6574   format %{ "[$reg, $off]" %}
6575   interface(MEMORY_INTER) %{
6576     base($reg);
6577     index(0xffffffff);
6578     scale(0x0);
6579     disp($off);
6580   %}
6581 %}
6582 
6583 operand indirectN(iRegN reg)
6584 %{
6585   predicate(Universe::narrow_oop_shift() == 0);
6586   constraint(ALLOC_IN_RC(ptr_reg));
6587   match(DecodeN reg);
6588   op_cost(0);
6589   format %{ "[$reg]\t# narrow" %}
6590   interface(MEMORY_INTER) %{
6591     base($reg);
6592     index(0xffffffff);
6593     scale(0x0);
6594     disp(0x0);
6595   %}
6596 %}
6597 
6598 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6599 %{
6600   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6601   constraint(ALLOC_IN_RC(ptr_reg));
6602   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6603   op_cost(0);
6604   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6605   interface(MEMORY_INTER) %{
6606     base($reg);
6607     index($ireg);
6608     scale($scale);
6609     disp(0x0);
6610   %}
6611 %}
6612 
6613 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6614 %{
6615   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6616   constraint(ALLOC_IN_RC(ptr_reg));
6617   match(AddP (DecodeN reg) (LShiftL lreg scale));
6618   op_cost(0);
6619   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6620   interface(MEMORY_INTER) %{
6621     base($reg);
6622     index($lreg);
6623     scale($scale);
6624     disp(0x0);
6625   %}
6626 %}
6627 
6628 operand indIndexI2LN(iRegN reg, iRegI ireg)
6629 %{
6630   predicate(Universe::narrow_oop_shift() == 0);
6631   constraint(ALLOC_IN_RC(ptr_reg));
6632   match(AddP (DecodeN reg) (ConvI2L ireg));
6633   op_cost(0);
6634   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6635   interface(MEMORY_INTER) %{
6636     base($reg);
6637     index($ireg);
6638     scale(0x0);
6639     disp(0x0);
6640   %}
6641 %}
6642 
6643 operand indIndexN(iRegN reg, iRegL lreg)
6644 %{
6645   predicate(Universe::narrow_oop_shift() == 0);
6646   constraint(ALLOC_IN_RC(ptr_reg));
6647   match(AddP (DecodeN reg) lreg);
6648   op_cost(0);
6649   format %{ "$reg, $lreg\t# narrow" %}
6650   interface(MEMORY_INTER) %{
6651     base($reg);
6652     index($lreg);
6653     scale(0x0);
6654     disp(0x0);
6655   %}
6656 %}
6657 
6658 operand indOffIN(iRegN reg, immIOffset off)
6659 %{
6660   predicate(Universe::narrow_oop_shift() == 0);
6661   constraint(ALLOC_IN_RC(ptr_reg));
6662   match(AddP (DecodeN reg) off);
6663   op_cost(0);
6664   format %{ "[$reg, $off]\t# narrow" %}
6665   interface(MEMORY_INTER) %{
6666     base($reg);
6667     index(0xffffffff);
6668     scale(0x0);
6669     disp($off);
6670   %}
6671 %}
6672 
6673 operand indOffLN(iRegN reg, immLoffset off)
6674 %{
6675   predicate(Universe::narrow_oop_shift() == 0);
6676   constraint(ALLOC_IN_RC(ptr_reg));
6677   match(AddP (DecodeN reg) off);
6678   op_cost(0);
6679   format %{ "[$reg, $off]\t# narrow" %}
6680   interface(MEMORY_INTER) %{
6681     base($reg);
6682     index(0xffffffff);
6683     scale(0x0);
6684     disp($off);
6685   %}
6686 %}
6687 
6688 
6689 
6690 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6691 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6692 %{
6693   constraint(ALLOC_IN_RC(ptr_reg));
6694   match(AddP reg off);
6695   op_cost(0);
6696   format %{ "[$reg, $off]" %}
6697   interface(MEMORY_INTER) %{
6698     base($reg);
6699     index(0xffffffff);
6700     scale(0x0);
6701     disp($off);
6702   %}
6703 %}
6704 
6705 //----------Special Memory Operands--------------------------------------------
6706 // Stack Slot Operand - This operand is used for loading and storing temporary
6707 //                      values on the stack where a match requires a value to
6708 //                      flow through memory.
6709 operand stackSlotP(sRegP reg)
6710 %{
6711   constraint(ALLOC_IN_RC(stack_slots));
6712   op_cost(100);
6713   // No match rule because this operand is only generated in matching
6714   // match(RegP);
6715   format %{ "[$reg]" %}
6716   interface(MEMORY_INTER) %{
6717     base(0x1e);  // RSP
6718     index(0x0);  // No Index
6719     scale(0x0);  // No Scale
6720     disp($reg);  // Stack Offset
6721   %}
6722 %}
6723 
6724 operand stackSlotI(sRegI reg)
6725 %{
6726   constraint(ALLOC_IN_RC(stack_slots));
6727   // No match rule because this operand is only generated in matching
6728   // match(RegI);
6729   format %{ "[$reg]" %}
6730   interface(MEMORY_INTER) %{
6731     base(0x1e);  // RSP
6732     index(0x0);  // No Index
6733     scale(0x0);  // No Scale
6734     disp($reg);  // Stack Offset
6735   %}
6736 %}
6737 
6738 operand stackSlotF(sRegF reg)
6739 %{
6740   constraint(ALLOC_IN_RC(stack_slots));
6741   // No match rule because this operand is only generated in matching
6742   // match(RegF);
6743   format %{ "[$reg]" %}
6744   interface(MEMORY_INTER) %{
6745     base(0x1e);  // RSP
6746     index(0x0);  // No Index
6747     scale(0x0);  // No Scale
6748     disp($reg);  // Stack Offset
6749   %}
6750 %}
6751 
6752 operand stackSlotD(sRegD reg)
6753 %{
6754   constraint(ALLOC_IN_RC(stack_slots));
6755   // No match rule because this operand is only generated in matching
6756   // match(RegD);
6757   format %{ "[$reg]" %}
6758   interface(MEMORY_INTER) %{
6759     base(0x1e);  // RSP
6760     index(0x0);  // No Index
6761     scale(0x0);  // No Scale
6762     disp($reg);  // Stack Offset
6763   %}
6764 %}
6765 
6766 operand stackSlotL(sRegL reg)
6767 %{
6768   constraint(ALLOC_IN_RC(stack_slots));
6769   // No match rule because this operand is only generated in matching
6770   // match(RegL);
6771   format %{ "[$reg]" %}
6772   interface(MEMORY_INTER) %{
6773     base(0x1e);  // RSP
6774     index(0x0);  // No Index
6775     scale(0x0);  // No Scale
6776     disp($reg);  // Stack Offset
6777   %}
6778 %}
6779 
6780 // Operands for expressing Control Flow
6781 // NOTE: Label is a predefined operand which should not be redefined in
6782 //       the AD file. It is generically handled within the ADLC.
6783 
6784 //----------Conditional Branch Operands----------------------------------------
6785 // Comparison Op  - This is the operation of the comparison, and is limited to
6786 //                  the following set of codes:
6787 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6788 //
6789 // Other attributes of the comparison, such as unsignedness, are specified
6790 // by the comparison instruction that sets a condition code flags register.
6791 // That result is represented by a flags operand whose subtype is appropriate
6792 // to the unsignedness (etc.) of the comparison.
6793 //
6794 // Later, the instruction which matches both the Comparison Op (a Bool) and
6795 // the flags (produced by the Cmp) specifies the coding of the comparison op
6796 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6797 
6798 // used for signed integral comparisons and fp comparisons
6799 
6800 operand cmpOp()
6801 %{
6802   match(Bool);
6803 
6804   format %{ "" %}
6805   interface(COND_INTER) %{
6806     equal(0x0, "eq");
6807     not_equal(0x1, "ne");
6808     less(0xb, "lt");
6809     greater_equal(0xa, "ge");
6810     less_equal(0xd, "le");
6811     greater(0xc, "gt");
6812     overflow(0x6, "vs");
6813     no_overflow(0x7, "vc");
6814   %}
6815 %}
6816 
6817 // used for unsigned integral comparisons
6818 
6819 operand cmpOpU()
6820 %{
6821   match(Bool);
6822 
6823   format %{ "" %}
6824   interface(COND_INTER) %{
6825     equal(0x0, "eq");
6826     not_equal(0x1, "ne");
6827     less(0x3, "lo");
6828     greater_equal(0x2, "hs");
6829     less_equal(0x9, "ls");
6830     greater(0x8, "hi");
6831     overflow(0x6, "vs");
6832     no_overflow(0x7, "vc");
6833   %}
6834 %}
6835 
6836 // used for certain integral comparisons which can be
6837 // converted to cbxx or tbxx instructions
6838 
6839 operand cmpOpEqNe()
6840 %{
6841   match(Bool);
6842   match(CmpOp);
6843   op_cost(0);
6844   predicate(n->as_Bool()->_test._test == BoolTest::ne
6845             || n->as_Bool()->_test._test == BoolTest::eq);
6846 
6847   format %{ "" %}
6848   interface(COND_INTER) %{
6849     equal(0x0, "eq");
6850     not_equal(0x1, "ne");
6851     less(0xb, "lt");
6852     greater_equal(0xa, "ge");
6853     less_equal(0xd, "le");
6854     greater(0xc, "gt");
6855     overflow(0x6, "vs");
6856     no_overflow(0x7, "vc");
6857   %}
6858 %}
6859 
6860 // used for certain integral comparisons which can be
6861 // converted to cbxx or tbxx instructions
6862 
6863 operand cmpOpLtGe()
6864 %{
6865   match(Bool);
6866   match(CmpOp);
6867   op_cost(0);
6868 
6869   predicate(n->as_Bool()->_test._test == BoolTest::lt
6870             || n->as_Bool()->_test._test == BoolTest::ge);
6871 
6872   format %{ "" %}
6873   interface(COND_INTER) %{
6874     equal(0x0, "eq");
6875     not_equal(0x1, "ne");
6876     less(0xb, "lt");
6877     greater_equal(0xa, "ge");
6878     less_equal(0xd, "le");
6879     greater(0xc, "gt");
6880     overflow(0x6, "vs");
6881     no_overflow(0x7, "vc");
6882   %}
6883 %}
6884 
6885 // used for certain unsigned integral comparisons which can be
6886 // converted to cbxx or tbxx instructions
6887 
6888 operand cmpOpUEqNeLtGe()
6889 %{
6890   match(Bool);
6891   match(CmpOp);
6892   op_cost(0);
6893 
6894   predicate(n->as_Bool()->_test._test == BoolTest::eq
6895             || n->as_Bool()->_test._test == BoolTest::ne
6896             || n->as_Bool()->_test._test == BoolTest::lt
6897             || n->as_Bool()->_test._test == BoolTest::ge);
6898 
6899   format %{ "" %}
6900   interface(COND_INTER) %{
6901     equal(0x0, "eq");
6902     not_equal(0x1, "ne");
6903     less(0xb, "lt");
6904     greater_equal(0xa, "ge");
6905     less_equal(0xd, "le");
6906     greater(0xc, "gt");
6907     overflow(0x6, "vs");
6908     no_overflow(0x7, "vc");
6909   %}
6910 %}
6911 
6912 // Special operand allowing long args to int ops to be truncated for free
6913 
6914 operand iRegL2I(iRegL reg) %{
6915 
6916   op_cost(0);
6917 
6918   match(ConvL2I reg);
6919 
6920   format %{ "l2i($reg)" %}
6921 
6922   interface(REG_INTER)
6923 %}
6924 
6925 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6926 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6927 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6928 
6929 //----------OPERAND CLASSES----------------------------------------------------
6930 // Operand Classes are groups of operands that are used as to simplify
6931 // instruction definitions by not requiring the AD writer to specify
6932 // separate instructions for every form of operand when the
6933 // instruction accepts multiple operand types with the same basic
6934 // encoding and format. The classic case of this is memory operands.
6935 
6936 // memory is used to define read/write location for load/store
6937 // instruction defs. we can turn a memory op into an Address
6938 
6939 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6940                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6941 
6942 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6943 // operations. it allows the src to be either an iRegI or a (ConvL2I
6944 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6945 // can be elided because the 32-bit instruction will just employ the
6946 // lower 32 bits anyway.
6947 //
6948 // n.b. this does not elide all L2I conversions. if the truncated
6949 // value is consumed by more than one operation then the ConvL2I
6950 // cannot be bundled into the consuming nodes so an l2i gets planted
6951 // (actually a movw $dst $src) and the downstream instructions consume
6952 // the result of the l2i as an iRegI input. That's a shame since the
6953 // movw is actually redundant but its not too costly.
6954 
6955 opclass iRegIorL2I(iRegI, iRegL2I);
6956 
6957 //----------PIPELINE-----------------------------------------------------------
6958 // Rules which define the behavior of the target architectures pipeline.
6959 
6960 // For specific pipelines, eg A53, define the stages of that pipeline
6961 //pipe_desc(ISS, EX1, EX2, WR);
6962 #define ISS S0
6963 #define EX1 S1
6964 #define EX2 S2
6965 #define WR  S3
6966 
6967 // Integer ALU reg operation
6968 pipeline %{
6969 
6970 attributes %{
6971   // ARM instructions are of fixed length
6972   fixed_size_instructions;        // Fixed size instructions TODO does
6973   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6974   // ARM instructions come in 32-bit word units
6975   instruction_unit_size = 4;         // An instruction is 4 bytes long
6976   instruction_fetch_unit_size = 64;  // The processor fetches one line
6977   instruction_fetch_units = 1;       // of 64 bytes
6978 
6979   // List of nop instructions
6980   nops( MachNop );
6981 %}
6982 
6983 // We don't use an actual pipeline model so don't care about resources
6984 // or description. we do use pipeline classes to introduce fixed
6985 // latencies
6986 
6987 //----------RESOURCES----------------------------------------------------------
6988 // Resources are the functional units available to the machine
6989 
6990 resources( INS0, INS1, INS01 = INS0 | INS1,
6991            ALU0, ALU1, ALU = ALU0 | ALU1,
6992            MAC,
6993            DIV,
6994            BRANCH,
6995            LDST,
6996            NEON_FP);
6997 
6998 //----------PIPELINE DESCRIPTION-----------------------------------------------
6999 // Pipeline Description specifies the stages in the machine's pipeline
7000 
7001 // Define the pipeline as a generic 6 stage pipeline
7002 pipe_desc(S0, S1, S2, S3, S4, S5);
7003 
7004 //----------PIPELINE CLASSES---------------------------------------------------
7005 // Pipeline Classes describe the stages in which input and output are
7006 // referenced by the hardware pipeline.
7007 
7008 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
7009 %{
7010   single_instruction;
7011   src1   : S1(read);
7012   src2   : S2(read);
7013   dst    : S5(write);
7014   INS01  : ISS;
7015   NEON_FP : S5;
7016 %}
7017 
7018 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
7019 %{
7020   single_instruction;
7021   src1   : S1(read);
7022   src2   : S2(read);
7023   dst    : S5(write);
7024   INS01  : ISS;
7025   NEON_FP : S5;
7026 %}
7027 
7028 pipe_class fp_uop_s(vRegF dst, vRegF src)
7029 %{
7030   single_instruction;
7031   src    : S1(read);
7032   dst    : S5(write);
7033   INS01  : ISS;
7034   NEON_FP : S5;
7035 %}
7036 
7037 pipe_class fp_uop_d(vRegD dst, vRegD src)
7038 %{
7039   single_instruction;
7040   src    : S1(read);
7041   dst    : S5(write);
7042   INS01  : ISS;
7043   NEON_FP : S5;
7044 %}
7045 
7046 pipe_class fp_d2f(vRegF dst, vRegD src)
7047 %{
7048   single_instruction;
7049   src    : S1(read);
7050   dst    : S5(write);
7051   INS01  : ISS;
7052   NEON_FP : S5;
7053 %}
7054 
7055 pipe_class fp_f2d(vRegD dst, vRegF src)
7056 %{
7057   single_instruction;
7058   src    : S1(read);
7059   dst    : S5(write);
7060   INS01  : ISS;
7061   NEON_FP : S5;
7062 %}
7063 
7064 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7065 %{
7066   single_instruction;
7067   src    : S1(read);
7068   dst    : S5(write);
7069   INS01  : ISS;
7070   NEON_FP : S5;
7071 %}
7072 
7073 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7074 %{
7075   single_instruction;
7076   src    : S1(read);
7077   dst    : S5(write);
7078   INS01  : ISS;
7079   NEON_FP : S5;
7080 %}
7081 
7082 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7083 %{
7084   single_instruction;
7085   src    : S1(read);
7086   dst    : S5(write);
7087   INS01  : ISS;
7088   NEON_FP : S5;
7089 %}
7090 
7091 pipe_class fp_l2f(vRegF dst, iRegL src)
7092 %{
7093   single_instruction;
7094   src    : S1(read);
7095   dst    : S5(write);
7096   INS01  : ISS;
7097   NEON_FP : S5;
7098 %}
7099 
7100 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7101 %{
7102   single_instruction;
7103   src    : S1(read);
7104   dst    : S5(write);
7105   INS01  : ISS;
7106   NEON_FP : S5;
7107 %}
7108 
7109 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7110 %{
7111   single_instruction;
7112   src    : S1(read);
7113   dst    : S5(write);
7114   INS01  : ISS;
7115   NEON_FP : S5;
7116 %}
7117 
7118 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7119 %{
7120   single_instruction;
7121   src    : S1(read);
7122   dst    : S5(write);
7123   INS01  : ISS;
7124   NEON_FP : S5;
7125 %}
7126 
7127 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7128 %{
7129   single_instruction;
7130   src    : S1(read);
7131   dst    : S5(write);
7132   INS01  : ISS;
7133   NEON_FP : S5;
7134 %}
7135 
7136 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7137 %{
7138   single_instruction;
7139   src1   : S1(read);
7140   src2   : S2(read);
7141   dst    : S5(write);
7142   INS0   : ISS;
7143   NEON_FP : S5;
7144 %}
7145 
7146 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7147 %{
7148   single_instruction;
7149   src1   : S1(read);
7150   src2   : S2(read);
7151   dst    : S5(write);
7152   INS0   : ISS;
7153   NEON_FP : S5;
7154 %}
7155 
7156 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7157 %{
7158   single_instruction;
7159   cr     : S1(read);
7160   src1   : S1(read);
7161   src2   : S1(read);
7162   dst    : S3(write);
7163   INS01  : ISS;
7164   NEON_FP : S3;
7165 %}
7166 
7167 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7168 %{
7169   single_instruction;
7170   cr     : S1(read);
7171   src1   : S1(read);
7172   src2   : S1(read);
7173   dst    : S3(write);
7174   INS01  : ISS;
7175   NEON_FP : S3;
7176 %}
7177 
7178 pipe_class fp_imm_s(vRegF dst)
7179 %{
7180   single_instruction;
7181   dst    : S3(write);
7182   INS01  : ISS;
7183   NEON_FP : S3;
7184 %}
7185 
7186 pipe_class fp_imm_d(vRegD dst)
7187 %{
7188   single_instruction;
7189   dst    : S3(write);
7190   INS01  : ISS;
7191   NEON_FP : S3;
7192 %}
7193 
7194 pipe_class fp_load_constant_s(vRegF dst)
7195 %{
7196   single_instruction;
7197   dst    : S4(write);
7198   INS01  : ISS;
7199   NEON_FP : S4;
7200 %}
7201 
7202 pipe_class fp_load_constant_d(vRegD dst)
7203 %{
7204   single_instruction;
7205   dst    : S4(write);
7206   INS01  : ISS;
7207   NEON_FP : S4;
7208 %}
7209 
7210 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7211 %{
7212   single_instruction;
7213   dst    : S5(write);
7214   src1   : S1(read);
7215   src2   : S1(read);
7216   INS01  : ISS;
7217   NEON_FP : S5;
7218 %}
7219 
7220 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7221 %{
7222   single_instruction;
7223   dst    : S5(write);
7224   src1   : S1(read);
7225   src2   : S1(read);
7226   INS0   : ISS;
7227   NEON_FP : S5;
7228 %}
7229 
7230 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7231 %{
7232   single_instruction;
7233   dst    : S5(write);
7234   src1   : S1(read);
7235   src2   : S1(read);
7236   dst    : S1(read);
7237   INS01  : ISS;
7238   NEON_FP : S5;
7239 %}
7240 
7241 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7242 %{
7243   single_instruction;
7244   dst    : S5(write);
7245   src1   : S1(read);
7246   src2   : S1(read);
7247   dst    : S1(read);
7248   INS0   : ISS;
7249   NEON_FP : S5;
7250 %}
7251 
7252 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7253 %{
7254   single_instruction;
7255   dst    : S4(write);
7256   src1   : S2(read);
7257   src2   : S2(read);
7258   INS01  : ISS;
7259   NEON_FP : S4;
7260 %}
7261 
7262 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7263 %{
7264   single_instruction;
7265   dst    : S4(write);
7266   src1   : S2(read);
7267   src2   : S2(read);
7268   INS0   : ISS;
7269   NEON_FP : S4;
7270 %}
7271 
7272 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7273 %{
7274   single_instruction;
7275   dst    : S3(write);
7276   src1   : S2(read);
7277   src2   : S2(read);
7278   INS01  : ISS;
7279   NEON_FP : S3;
7280 %}
7281 
7282 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7283 %{
7284   single_instruction;
7285   dst    : S3(write);
7286   src1   : S2(read);
7287   src2   : S2(read);
7288   INS0   : ISS;
7289   NEON_FP : S3;
7290 %}
7291 
7292 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7293 %{
7294   single_instruction;
7295   dst    : S3(write);
7296   src    : S1(read);
7297   shift  : S1(read);
7298   INS01  : ISS;
7299   NEON_FP : S3;
7300 %}
7301 
7302 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7303 %{
7304   single_instruction;
7305   dst    : S3(write);
7306   src    : S1(read);
7307   shift  : S1(read);
7308   INS0   : ISS;
7309   NEON_FP : S3;
7310 %}
7311 
7312 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7313 %{
7314   single_instruction;
7315   dst    : S3(write);
7316   src    : S1(read);
7317   INS01  : ISS;
7318   NEON_FP : S3;
7319 %}
7320 
7321 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7322 %{
7323   single_instruction;
7324   dst    : S3(write);
7325   src    : S1(read);
7326   INS0   : ISS;
7327   NEON_FP : S3;
7328 %}
7329 
7330 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7331 %{
7332   single_instruction;
7333   dst    : S5(write);
7334   src1   : S1(read);
7335   src2   : S1(read);
7336   INS01  : ISS;
7337   NEON_FP : S5;
7338 %}
7339 
7340 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7341 %{
7342   single_instruction;
7343   dst    : S5(write);
7344   src1   : S1(read);
7345   src2   : S1(read);
7346   INS0   : ISS;
7347   NEON_FP : S5;
7348 %}
7349 
7350 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7351 %{
7352   single_instruction;
7353   dst    : S5(write);
7354   src1   : S1(read);
7355   src2   : S1(read);
7356   INS0   : ISS;
7357   NEON_FP : S5;
7358 %}
7359 
7360 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7361 %{
7362   single_instruction;
7363   dst    : S5(write);
7364   src1   : S1(read);
7365   src2   : S1(read);
7366   INS0   : ISS;
7367   NEON_FP : S5;
7368 %}
7369 
7370 pipe_class vsqrt_fp128(vecX dst, vecX src)
7371 %{
7372   single_instruction;
7373   dst    : S5(write);
7374   src    : S1(read);
7375   INS0   : ISS;
7376   NEON_FP : S5;
7377 %}
7378 
7379 pipe_class vunop_fp64(vecD dst, vecD src)
7380 %{
7381   single_instruction;
7382   dst    : S5(write);
7383   src    : S1(read);
7384   INS01  : ISS;
7385   NEON_FP : S5;
7386 %}
7387 
7388 pipe_class vunop_fp128(vecX dst, vecX src)
7389 %{
7390   single_instruction;
7391   dst    : S5(write);
7392   src    : S1(read);
7393   INS0   : ISS;
7394   NEON_FP : S5;
7395 %}
7396 
7397 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7398 %{
7399   single_instruction;
7400   dst    : S3(write);
7401   src    : S1(read);
7402   INS01  : ISS;
7403   NEON_FP : S3;
7404 %}
7405 
7406 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7407 %{
7408   single_instruction;
7409   dst    : S3(write);
7410   src    : S1(read);
7411   INS01  : ISS;
7412   NEON_FP : S3;
7413 %}
7414 
7415 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7416 %{
7417   single_instruction;
7418   dst    : S3(write);
7419   src    : S1(read);
7420   INS01  : ISS;
7421   NEON_FP : S3;
7422 %}
7423 
7424 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7425 %{
7426   single_instruction;
7427   dst    : S3(write);
7428   src    : S1(read);
7429   INS01  : ISS;
7430   NEON_FP : S3;
7431 %}
7432 
7433 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7434 %{
7435   single_instruction;
7436   dst    : S3(write);
7437   src    : S1(read);
7438   INS01  : ISS;
7439   NEON_FP : S3;
7440 %}
7441 
7442 pipe_class vmovi_reg_imm64(vecD dst)
7443 %{
7444   single_instruction;
7445   dst    : S3(write);
7446   INS01  : ISS;
7447   NEON_FP : S3;
7448 %}
7449 
7450 pipe_class vmovi_reg_imm128(vecX dst)
7451 %{
7452   single_instruction;
7453   dst    : S3(write);
7454   INS0   : ISS;
7455   NEON_FP : S3;
7456 %}
7457 
7458 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7459 %{
7460   single_instruction;
7461   dst    : S5(write);
7462   mem    : ISS(read);
7463   INS01  : ISS;
7464   NEON_FP : S3;
7465 %}
7466 
7467 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7468 %{
7469   single_instruction;
7470   dst    : S5(write);
7471   mem    : ISS(read);
7472   INS01  : ISS;
7473   NEON_FP : S3;
7474 %}
7475 
7476 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7477 %{
7478   single_instruction;
7479   mem    : ISS(read);
7480   src    : S2(read);
7481   INS01  : ISS;
7482   NEON_FP : S3;
7483 %}
7484 
7485 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7486 %{
7487   single_instruction;
7488   mem    : ISS(read);
7489   src    : S2(read);
7490   INS01  : ISS;
7491   NEON_FP : S3;
7492 %}
7493 
7494 //------- Integer ALU operations --------------------------
7495 
7496 // Integer ALU reg-reg operation
7497 // Operands needed in EX1, result generated in EX2
7498 // Eg.  ADD     x0, x1, x2
7499 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7500 %{
7501   single_instruction;
7502   dst    : EX2(write);
7503   src1   : EX1(read);
7504   src2   : EX1(read);
7505   INS01  : ISS; // Dual issue as instruction 0 or 1
7506   ALU    : EX2;
7507 %}
7508 
7509 // Integer ALU reg-reg operation with constant shift
7510 // Shifted register must be available in LATE_ISS instead of EX1
7511 // Eg.  ADD     x0, x1, x2, LSL #2
7512 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7513 %{
7514   single_instruction;
7515   dst    : EX2(write);
7516   src1   : EX1(read);
7517   src2   : ISS(read);
7518   INS01  : ISS;
7519   ALU    : EX2;
7520 %}
7521 
7522 // Integer ALU reg operation with constant shift
7523 // Eg.  LSL     x0, x1, #shift
7524 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7525 %{
7526   single_instruction;
7527   dst    : EX2(write);
7528   src1   : ISS(read);
7529   INS01  : ISS;
7530   ALU    : EX2;
7531 %}
7532 
7533 // Integer ALU reg-reg operation with variable shift
7534 // Both operands must be available in LATE_ISS instead of EX1
7535 // Result is available in EX1 instead of EX2
7536 // Eg.  LSLV    x0, x1, x2
7537 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7538 %{
7539   single_instruction;
7540   dst    : EX1(write);
7541   src1   : ISS(read);
7542   src2   : ISS(read);
7543   INS01  : ISS;
7544   ALU    : EX1;
7545 %}
7546 
7547 // Integer ALU reg-reg operation with extract
7548 // As for _vshift above, but result generated in EX2
7549 // Eg.  EXTR    x0, x1, x2, #N
7550 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7551 %{
7552   single_instruction;
7553   dst    : EX2(write);
7554   src1   : ISS(read);
7555   src2   : ISS(read);
7556   INS1   : ISS; // Can only dual issue as Instruction 1
7557   ALU    : EX1;
7558 %}
7559 
7560 // Integer ALU reg operation
7561 // Eg.  NEG     x0, x1
7562 pipe_class ialu_reg(iRegI dst, iRegI src)
7563 %{
7564   single_instruction;
7565   dst    : EX2(write);
7566   src    : EX1(read);
7567   INS01  : ISS;
7568   ALU    : EX2;
7569 %}
7570 
7571 // Integer ALU reg mmediate operation
7572 // Eg.  ADD     x0, x1, #N
7573 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7574 %{
7575   single_instruction;
7576   dst    : EX2(write);
7577   src1   : EX1(read);
7578   INS01  : ISS;
7579   ALU    : EX2;
7580 %}
7581 
7582 // Integer ALU immediate operation (no source operands)
7583 // Eg.  MOV     x0, #N
7584 pipe_class ialu_imm(iRegI dst)
7585 %{
7586   single_instruction;
7587   dst    : EX1(write);
7588   INS01  : ISS;
7589   ALU    : EX1;
7590 %}
7591 
7592 //------- Compare operation -------------------------------
7593 
7594 // Compare reg-reg
7595 // Eg.  CMP     x0, x1
7596 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7597 %{
7598   single_instruction;
7599 //  fixed_latency(16);
7600   cr     : EX2(write);
7601   op1    : EX1(read);
7602   op2    : EX1(read);
7603   INS01  : ISS;
7604   ALU    : EX2;
7605 %}
7606 
7607 // Compare reg-reg
7608 // Eg.  CMP     x0, #N
7609 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7610 %{
7611   single_instruction;
7612 //  fixed_latency(16);
7613   cr     : EX2(write);
7614   op1    : EX1(read);
7615   INS01  : ISS;
7616   ALU    : EX2;
7617 %}
7618 
7619 //------- Conditional instructions ------------------------
7620 
7621 // Conditional no operands
7622 // Eg.  CSINC   x0, zr, zr, <cond>
7623 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7624 %{
7625   single_instruction;
7626   cr     : EX1(read);
7627   dst    : EX2(write);
7628   INS01  : ISS;
7629   ALU    : EX2;
7630 %}
7631 
7632 // Conditional 2 operand
7633 // EG.  CSEL    X0, X1, X2, <cond>
7634 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7635 %{
7636   single_instruction;
7637   cr     : EX1(read);
7638   src1   : EX1(read);
7639   src2   : EX1(read);
7640   dst    : EX2(write);
7641   INS01  : ISS;
7642   ALU    : EX2;
7643 %}
7644 
7645 // Conditional 2 operand
7646 // EG.  CSEL    X0, X1, X2, <cond>
7647 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7648 %{
7649   single_instruction;
7650   cr     : EX1(read);
7651   src    : EX1(read);
7652   dst    : EX2(write);
7653   INS01  : ISS;
7654   ALU    : EX2;
7655 %}
7656 
7657 //------- Multiply pipeline operations --------------------
7658 
7659 // Multiply reg-reg
7660 // Eg.  MUL     w0, w1, w2
7661 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7662 %{
7663   single_instruction;
7664   dst    : WR(write);
7665   src1   : ISS(read);
7666   src2   : ISS(read);
7667   INS01  : ISS;
7668   MAC    : WR;
7669 %}
7670 
7671 // Multiply accumulate
7672 // Eg.  MADD    w0, w1, w2, w3
7673 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7674 %{
7675   single_instruction;
7676   dst    : WR(write);
7677   src1   : ISS(read);
7678   src2   : ISS(read);
7679   src3   : ISS(read);
7680   INS01  : ISS;
7681   MAC    : WR;
7682 %}
7683 
7684 // Eg.  MUL     w0, w1, w2
7685 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7686 %{
7687   single_instruction;
7688   fixed_latency(3); // Maximum latency for 64 bit mul
7689   dst    : WR(write);
7690   src1   : ISS(read);
7691   src2   : ISS(read);
7692   INS01  : ISS;
7693   MAC    : WR;
7694 %}
7695 
7696 // Multiply accumulate
7697 // Eg.  MADD    w0, w1, w2, w3
7698 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7699 %{
7700   single_instruction;
7701   fixed_latency(3); // Maximum latency for 64 bit mul
7702   dst    : WR(write);
7703   src1   : ISS(read);
7704   src2   : ISS(read);
7705   src3   : ISS(read);
7706   INS01  : ISS;
7707   MAC    : WR;
7708 %}
7709 
7710 //------- Divide pipeline operations --------------------
7711 
7712 // Eg.  SDIV    w0, w1, w2
7713 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7714 %{
7715   single_instruction;
7716   fixed_latency(8); // Maximum latency for 32 bit divide
7717   dst    : WR(write);
7718   src1   : ISS(read);
7719   src2   : ISS(read);
7720   INS0   : ISS; // Can only dual issue as instruction 0
7721   DIV    : WR;
7722 %}
7723 
7724 // Eg.  SDIV    x0, x1, x2
7725 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7726 %{
7727   single_instruction;
7728   fixed_latency(16); // Maximum latency for 64 bit divide
7729   dst    : WR(write);
7730   src1   : ISS(read);
7731   src2   : ISS(read);
7732   INS0   : ISS; // Can only dual issue as instruction 0
7733   DIV    : WR;
7734 %}
7735 
7736 //------- Load pipeline operations ------------------------
7737 
7738 // Load - prefetch
7739 // Eg.  PFRM    <mem>
7740 pipe_class iload_prefetch(memory mem)
7741 %{
7742   single_instruction;
7743   mem    : ISS(read);
7744   INS01  : ISS;
7745   LDST   : WR;
7746 %}
7747 
7748 // Load - reg, mem
7749 // Eg.  LDR     x0, <mem>
7750 pipe_class iload_reg_mem(iRegI dst, memory mem)
7751 %{
7752   single_instruction;
7753   dst    : WR(write);
7754   mem    : ISS(read);
7755   INS01  : ISS;
7756   LDST   : WR;
7757 %}
7758 
7759 // Load - reg, reg
7760 // Eg.  LDR     x0, [sp, x1]
7761 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7762 %{
7763   single_instruction;
7764   dst    : WR(write);
7765   src    : ISS(read);
7766   INS01  : ISS;
7767   LDST   : WR;
7768 %}
7769 
7770 //------- Store pipeline operations -----------------------
7771 
7772 // Store - zr, mem
7773 // Eg.  STR     zr, <mem>
7774 pipe_class istore_mem(memory mem)
7775 %{
7776   single_instruction;
7777   mem    : ISS(read);
7778   INS01  : ISS;
7779   LDST   : WR;
7780 %}
7781 
7782 // Store - reg, mem
7783 // Eg.  STR     x0, <mem>
7784 pipe_class istore_reg_mem(iRegI src, memory mem)
7785 %{
7786   single_instruction;
7787   mem    : ISS(read);
7788   src    : EX2(read);
7789   INS01  : ISS;
7790   LDST   : WR;
7791 %}
7792 
7793 // Store - reg, reg
7794 // Eg. STR      x0, [sp, x1]
7795 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7796 %{
7797   single_instruction;
7798   dst    : ISS(read);
7799   src    : EX2(read);
7800   INS01  : ISS;
7801   LDST   : WR;
7802 %}
7803 
7804 //------- Store pipeline operations -----------------------
7805 
7806 // Branch
7807 pipe_class pipe_branch()
7808 %{
7809   single_instruction;
7810   INS01  : ISS;
7811   BRANCH : EX1;
7812 %}
7813 
7814 // Conditional branch
7815 pipe_class pipe_branch_cond(rFlagsReg cr)
7816 %{
7817   single_instruction;
7818   cr     : EX1(read);
7819   INS01  : ISS;
7820   BRANCH : EX1;
7821 %}
7822 
7823 // Compare & Branch
7824 // EG.  CBZ/CBNZ
7825 pipe_class pipe_cmp_branch(iRegI op1)
7826 %{
7827   single_instruction;
7828   op1    : EX1(read);
7829   INS01  : ISS;
7830   BRANCH : EX1;
7831 %}
7832 
7833 //------- Synchronisation operations ----------------------
7834 
7835 // Any operation requiring serialization.
7836 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7837 pipe_class pipe_serial()
7838 %{
7839   single_instruction;
7840   force_serialization;
7841   fixed_latency(16);
7842   INS01  : ISS(2); // Cannot dual issue with any other instruction
7843   LDST   : WR;
7844 %}
7845 
7846 // Generic big/slow expanded idiom - also serialized
7847 pipe_class pipe_slow()
7848 %{
7849   instruction_count(10);
7850   multiple_bundles;
7851   force_serialization;
7852   fixed_latency(16);
7853   INS01  : ISS(2); // Cannot dual issue with any other instruction
7854   LDST   : WR;
7855 %}
7856 
7857 // Empty pipeline class
7858 pipe_class pipe_class_empty()
7859 %{
7860   single_instruction;
7861   fixed_latency(0);
7862 %}
7863 
7864 // Default pipeline class.
7865 pipe_class pipe_class_default()
7866 %{
7867   single_instruction;
7868   fixed_latency(2);
7869 %}
7870 
7871 // Pipeline class for compares.
7872 pipe_class pipe_class_compare()
7873 %{
7874   single_instruction;
7875   fixed_latency(16);
7876 %}
7877 
7878 // Pipeline class for memory operations.
7879 pipe_class pipe_class_memory()
7880 %{
7881   single_instruction;
7882   fixed_latency(16);
7883 %}
7884 
7885 // Pipeline class for call.
7886 pipe_class pipe_class_call()
7887 %{
7888   single_instruction;
7889   fixed_latency(100);
7890 %}
7891 
7892 // Define the class for the Nop node.
7893 define %{
7894    MachNop = pipe_class_empty;
7895 %}
7896 
7897 %}
7898 //----------INSTRUCTIONS-------------------------------------------------------
7899 //
7900 // match      -- States which machine-independent subtree may be replaced
7901 //               by this instruction.
7902 // ins_cost   -- The estimated cost of this instruction is used by instruction
7903 //               selection to identify a minimum cost tree of machine
7904 //               instructions that matches a tree of machine-independent
7905 //               instructions.
7906 // format     -- A string providing the disassembly for this instruction.
7907 //               The value of an instruction's operand may be inserted
7908 //               by referring to it with a '$' prefix.
7909 // opcode     -- Three instruction opcodes may be provided.  These are referred
7910 //               to within an encode class as $primary, $secondary, and $tertiary
7911 //               rrspectively.  The primary opcode is commonly used to
7912 //               indicate the type of machine instruction, while secondary
7913 //               and tertiary are often used for prefix options or addressing
7914 //               modes.
7915 // ins_encode -- A list of encode classes with parameters. The encode class
7916 //               name must have been defined in an 'enc_class' specification
7917 //               in the encode section of the architecture description.
7918 
7919 // ============================================================================
7920 // Memory (Load/Store) Instructions
7921 
7922 // Load Instructions
7923 
7924 // Load Byte (8 bit signed)
7925 instruct loadB(iRegINoSp dst, memory mem)
7926 %{
7927   match(Set dst (LoadB mem));
7928   predicate(!needs_acquiring_load(n));
7929 
7930   ins_cost(4 * INSN_COST);
7931   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7932 
7933   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7934 
7935   ins_pipe(iload_reg_mem);
7936 %}
7937 
7938 // Load Byte (8 bit signed) into long
7939 instruct loadB2L(iRegLNoSp dst, memory mem)
7940 %{
7941   match(Set dst (ConvI2L (LoadB mem)));
7942   predicate(!needs_acquiring_load(n->in(1)));
7943 
7944   ins_cost(4 * INSN_COST);
7945   format %{ "ldrsb  $dst, $mem\t# byte" %}
7946 
7947   ins_encode(aarch64_enc_ldrsb(dst, mem));
7948 
7949   ins_pipe(iload_reg_mem);
7950 %}
7951 
7952 // Load Byte (8 bit unsigned)
7953 instruct loadUB(iRegINoSp dst, memory mem)
7954 %{
7955   match(Set dst (LoadUB mem));
7956   predicate(!needs_acquiring_load(n));
7957 
7958   ins_cost(4 * INSN_COST);
7959   format %{ "ldrbw  $dst, $mem\t# byte" %}
7960 
7961   ins_encode(aarch64_enc_ldrb(dst, mem));
7962 
7963   ins_pipe(iload_reg_mem);
7964 %}
7965 
7966 // Load Byte (8 bit unsigned) into long
7967 instruct loadUB2L(iRegLNoSp dst, memory mem)
7968 %{
7969   match(Set dst (ConvI2L (LoadUB mem)));
7970   predicate(!needs_acquiring_load(n->in(1)));
7971 
7972   ins_cost(4 * INSN_COST);
7973   format %{ "ldrb  $dst, $mem\t# byte" %}
7974 
7975   ins_encode(aarch64_enc_ldrb(dst, mem));
7976 
7977   ins_pipe(iload_reg_mem);
7978 %}
7979 
7980 // Load Short (16 bit signed)
7981 instruct loadS(iRegINoSp dst, memory mem)
7982 %{
7983   match(Set dst (LoadS mem));
7984   predicate(!needs_acquiring_load(n));
7985 
7986   ins_cost(4 * INSN_COST);
7987   format %{ "ldrshw  $dst, $mem\t# short" %}
7988 
7989   ins_encode(aarch64_enc_ldrshw(dst, mem));
7990 
7991   ins_pipe(iload_reg_mem);
7992 %}
7993 
7994 // Load Short (16 bit signed) into long
7995 instruct loadS2L(iRegLNoSp dst, memory mem)
7996 %{
7997   match(Set dst (ConvI2L (LoadS mem)));
7998   predicate(!needs_acquiring_load(n->in(1)));
7999 
8000   ins_cost(4 * INSN_COST);
8001   format %{ "ldrsh  $dst, $mem\t# short" %}
8002 
8003   ins_encode(aarch64_enc_ldrsh(dst, mem));
8004 
8005   ins_pipe(iload_reg_mem);
8006 %}
8007 
8008 // Load Char (16 bit unsigned)
8009 instruct loadUS(iRegINoSp dst, memory mem)
8010 %{
8011   match(Set dst (LoadUS mem));
8012   predicate(!needs_acquiring_load(n));
8013 
8014   ins_cost(4 * INSN_COST);
8015   format %{ "ldrh  $dst, $mem\t# short" %}
8016 
8017   ins_encode(aarch64_enc_ldrh(dst, mem));
8018 
8019   ins_pipe(iload_reg_mem);
8020 %}
8021 
8022 // Load Short/Char (16 bit unsigned) into long
8023 instruct loadUS2L(iRegLNoSp dst, memory mem)
8024 %{
8025   match(Set dst (ConvI2L (LoadUS mem)));
8026   predicate(!needs_acquiring_load(n->in(1)));
8027 
8028   ins_cost(4 * INSN_COST);
8029   format %{ "ldrh  $dst, $mem\t# short" %}
8030 
8031   ins_encode(aarch64_enc_ldrh(dst, mem));
8032 
8033   ins_pipe(iload_reg_mem);
8034 %}
8035 
8036 // Load Integer (32 bit signed)
8037 instruct loadI(iRegINoSp dst, memory mem)
8038 %{
8039   match(Set dst (LoadI mem));
8040   predicate(!needs_acquiring_load(n));
8041 
8042   ins_cost(4 * INSN_COST);
8043   format %{ "ldrw  $dst, $mem\t# int" %}
8044 
8045   ins_encode(aarch64_enc_ldrw(dst, mem));
8046 
8047   ins_pipe(iload_reg_mem);
8048 %}
8049 
8050 // Load Integer (32 bit signed) into long
8051 instruct loadI2L(iRegLNoSp dst, memory mem)
8052 %{
8053   match(Set dst (ConvI2L (LoadI mem)));
8054   predicate(!needs_acquiring_load(n->in(1)));
8055 
8056   ins_cost(4 * INSN_COST);
8057   format %{ "ldrsw  $dst, $mem\t# int" %}
8058 
8059   ins_encode(aarch64_enc_ldrsw(dst, mem));
8060 
8061   ins_pipe(iload_reg_mem);
8062 %}
8063 
8064 // Load Integer (32 bit unsigned) into long
8065 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8066 %{
8067   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8068   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8069 
8070   ins_cost(4 * INSN_COST);
8071   format %{ "ldrw  $dst, $mem\t# int" %}
8072 
8073   ins_encode(aarch64_enc_ldrw(dst, mem));
8074 
8075   ins_pipe(iload_reg_mem);
8076 %}
8077 
8078 // Load Long (64 bit signed)
8079 instruct loadL(iRegLNoSp dst, memory mem)
8080 %{
8081   match(Set dst (LoadL mem));
8082   predicate(!needs_acquiring_load(n));
8083 
8084   ins_cost(4 * INSN_COST);
8085   format %{ "ldr  $dst, $mem\t# int" %}
8086 
8087   ins_encode(aarch64_enc_ldr(dst, mem));
8088 
8089   ins_pipe(iload_reg_mem);
8090 %}
8091 
8092 // Load Range
8093 instruct loadRange(iRegINoSp dst, memory mem)
8094 %{
8095   match(Set dst (LoadRange mem));
8096 
8097   ins_cost(4 * INSN_COST);
8098   format %{ "ldrw  $dst, $mem\t# range" %}
8099 
8100   ins_encode(aarch64_enc_ldrw(dst, mem));
8101 
8102   ins_pipe(iload_reg_mem);
8103 %}
8104 
8105 // Load Pointer
8106 instruct loadP(iRegPNoSp dst, memory mem)
8107 %{
8108   match(Set dst (LoadP mem));
8109   predicate(!needs_acquiring_load(n));
8110 
8111   ins_cost(4 * INSN_COST);
8112   format %{ "ldr  $dst, $mem\t# ptr" %}
8113 
8114   ins_encode(aarch64_enc_ldr(dst, mem));
8115 
8116   ins_pipe(iload_reg_mem);
8117 %}
8118 
8119 // Load Compressed Pointer
8120 instruct loadN(iRegNNoSp dst, memory mem)
8121 %{
8122   match(Set dst (LoadN mem));
8123   predicate(!needs_acquiring_load(n));
8124 
8125   ins_cost(4 * INSN_COST);
8126   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8127 
8128   ins_encode(aarch64_enc_ldrw(dst, mem));
8129 
8130   ins_pipe(iload_reg_mem);
8131 %}
8132 
8133 // Load Klass Pointer
8134 instruct loadKlass(iRegPNoSp dst, memory mem)
8135 %{
8136   match(Set dst (LoadKlass mem));
8137   predicate(!needs_acquiring_load(n));
8138 
8139   ins_cost(4 * INSN_COST);
8140   format %{ "ldr  $dst, $mem\t# class" %}
8141 
8142   ins_encode(aarch64_enc_ldr(dst, mem));
8143 
8144   ins_pipe(iload_reg_mem);
8145 %}
8146 
8147 // Load Narrow Klass Pointer
8148 instruct loadNKlass(iRegNNoSp dst, memory mem)
8149 %{
8150   match(Set dst (LoadNKlass mem));
8151   predicate(!needs_acquiring_load(n));
8152 
8153   ins_cost(4 * INSN_COST);
8154   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8155 
8156   ins_encode(aarch64_enc_ldrw(dst, mem));
8157 
8158   ins_pipe(iload_reg_mem);
8159 %}
8160 
8161 // Load Float
8162 instruct loadF(vRegF dst, memory mem)
8163 %{
8164   match(Set dst (LoadF mem));
8165   predicate(!needs_acquiring_load(n));
8166 
8167   ins_cost(4 * INSN_COST);
8168   format %{ "ldrs  $dst, $mem\t# float" %}
8169 
8170   ins_encode( aarch64_enc_ldrs(dst, mem) );
8171 
8172   ins_pipe(pipe_class_memory);
8173 %}
8174 
8175 // Load Double
8176 instruct loadD(vRegD dst, memory mem)
8177 %{
8178   match(Set dst (LoadD mem));
8179   predicate(!needs_acquiring_load(n));
8180 
8181   ins_cost(4 * INSN_COST);
8182   format %{ "ldrd  $dst, $mem\t# double" %}
8183 
8184   ins_encode( aarch64_enc_ldrd(dst, mem) );
8185 
8186   ins_pipe(pipe_class_memory);
8187 %}
8188 
8189 
8190 // Load Int Constant
8191 instruct loadConI(iRegINoSp dst, immI src)
8192 %{
8193   match(Set dst src);
8194 
8195   ins_cost(INSN_COST);
8196   format %{ "mov $dst, $src\t# int" %}
8197 
8198   ins_encode( aarch64_enc_movw_imm(dst, src) );
8199 
8200   ins_pipe(ialu_imm);
8201 %}
8202 
8203 // Load Long Constant
8204 instruct loadConL(iRegLNoSp dst, immL src)
8205 %{
8206   match(Set dst src);
8207 
8208   ins_cost(INSN_COST);
8209   format %{ "mov $dst, $src\t# long" %}
8210 
8211   ins_encode( aarch64_enc_mov_imm(dst, src) );
8212 
8213   ins_pipe(ialu_imm);
8214 %}
8215 
8216 // Load Pointer Constant
8217 
8218 instruct loadConP(iRegPNoSp dst, immP con)
8219 %{
8220   match(Set dst con);
8221 
8222   ins_cost(INSN_COST * 4);
8223   format %{
8224     "mov  $dst, $con\t# ptr\n\t"
8225   %}
8226 
8227   ins_encode(aarch64_enc_mov_p(dst, con));
8228 
8229   ins_pipe(ialu_imm);
8230 %}
8231 
8232 // Load Null Pointer Constant
8233 
8234 instruct loadConP0(iRegPNoSp dst, immP0 con)
8235 %{
8236   match(Set dst con);
8237 
8238   ins_cost(INSN_COST);
8239   format %{ "mov  $dst, $con\t# NULL ptr" %}
8240 
8241   ins_encode(aarch64_enc_mov_p0(dst, con));
8242 
8243   ins_pipe(ialu_imm);
8244 %}
8245 
8246 // Load Pointer Constant One
8247 
8248 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8249 %{
8250   match(Set dst con);
8251 
8252   ins_cost(INSN_COST);
8253   format %{ "mov  $dst, $con\t# NULL ptr" %}
8254 
8255   ins_encode(aarch64_enc_mov_p1(dst, con));
8256 
8257   ins_pipe(ialu_imm);
8258 %}
8259 
8260 // Load Poll Page Constant
8261 
8262 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8263 %{
8264   match(Set dst con);
8265 
8266   ins_cost(INSN_COST);
8267   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8268 
8269   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8270 
8271   ins_pipe(ialu_imm);
8272 %}
8273 
8274 // Load Byte Map Base Constant
8275 
8276 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8277 %{
8278   match(Set dst con);
8279 
8280   ins_cost(INSN_COST);
8281   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8282 
8283   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8284 
8285   ins_pipe(ialu_imm);
8286 %}
8287 
8288 // Load Narrow Pointer Constant
8289 
8290 instruct loadConN(iRegNNoSp dst, immN con)
8291 %{
8292   match(Set dst con);
8293 
8294   ins_cost(INSN_COST * 4);
8295   format %{ "mov  $dst, $con\t# compressed ptr" %}
8296 
8297   ins_encode(aarch64_enc_mov_n(dst, con));
8298 
8299   ins_pipe(ialu_imm);
8300 %}
8301 
8302 // Load Narrow Null Pointer Constant
8303 
8304 instruct loadConN0(iRegNNoSp dst, immN0 con)
8305 %{
8306   match(Set dst con);
8307 
8308   ins_cost(INSN_COST);
8309   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8310 
8311   ins_encode(aarch64_enc_mov_n0(dst, con));
8312 
8313   ins_pipe(ialu_imm);
8314 %}
8315 
8316 // Load Narrow Klass Constant
8317 
8318 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8319 %{
8320   match(Set dst con);
8321 
8322   ins_cost(INSN_COST);
8323   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8324 
8325   ins_encode(aarch64_enc_mov_nk(dst, con));
8326 
8327   ins_pipe(ialu_imm);
8328 %}
8329 
8330 // Load Packed Float Constant
8331 
8332 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8333   match(Set dst con);
8334   ins_cost(INSN_COST * 4);
8335   format %{ "fmovs  $dst, $con"%}
8336   ins_encode %{
8337     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8338   %}
8339 
8340   ins_pipe(fp_imm_s);
8341 %}
8342 
8343 // Load Float Constant
8344 
8345 instruct loadConF(vRegF dst, immF con) %{
8346   match(Set dst con);
8347 
8348   ins_cost(INSN_COST * 4);
8349 
8350   format %{
8351     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8352   %}
8353 
8354   ins_encode %{
8355     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8356   %}
8357 
8358   ins_pipe(fp_load_constant_s);
8359 %}
8360 
8361 // Load Packed Double Constant
8362 
8363 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8364   match(Set dst con);
8365   ins_cost(INSN_COST);
8366   format %{ "fmovd  $dst, $con"%}
8367   ins_encode %{
8368     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8369   %}
8370 
8371   ins_pipe(fp_imm_d);
8372 %}
8373 
8374 // Load Double Constant
8375 
8376 instruct loadConD(vRegD dst, immD con) %{
8377   match(Set dst con);
8378 
8379   ins_cost(INSN_COST * 5);
8380   format %{
8381     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8382   %}
8383 
8384   ins_encode %{
8385     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8386   %}
8387 
8388   ins_pipe(fp_load_constant_d);
8389 %}
8390 
8391 // Store Instructions
8392 
8393 // Store CMS card-mark Immediate
8394 instruct storeimmCM0(immI0 zero, memory mem)
8395 %{
8396   match(Set mem (StoreCM mem zero));
8397   predicate(unnecessary_storestore(n));
8398 
8399   ins_cost(INSN_COST);
8400   format %{ "strb zr, $mem\t# byte" %}
8401 
8402   ins_encode(aarch64_enc_strb0(mem));
8403 
8404   ins_pipe(istore_mem);
8405 %}
8406 
8407 // Store CMS card-mark Immediate with intervening StoreStore
8408 // needed when using CMS with no conditional card marking
8409 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8410 %{
8411   match(Set mem (StoreCM mem zero));
8412 
8413   ins_cost(INSN_COST * 2);
8414   format %{ "dmb ishst"
8415       "\n\tstrb zr, $mem\t# byte" %}
8416 
8417   ins_encode(aarch64_enc_strb0_ordered(mem));
8418 
8419   ins_pipe(istore_mem);
8420 %}
8421 
8422 // Store Byte
8423 instruct storeB(iRegIorL2I src, memory mem)
8424 %{
8425   match(Set mem (StoreB mem src));
8426   predicate(!needs_releasing_store(n));
8427 
8428   ins_cost(INSN_COST);
8429   format %{ "strb  $src, $mem\t# byte" %}
8430 
8431   ins_encode(aarch64_enc_strb(src, mem));
8432 
8433   ins_pipe(istore_reg_mem);
8434 %}
8435 
8436 
8437 instruct storeimmB0(immI0 zero, memory mem)
8438 %{
8439   match(Set mem (StoreB mem zero));
8440   predicate(!needs_releasing_store(n));
8441 
8442   ins_cost(INSN_COST);
8443   format %{ "strb rscractch2, $mem\t# byte" %}
8444 
8445   ins_encode(aarch64_enc_strb0(mem));
8446 
8447   ins_pipe(istore_mem);
8448 %}
8449 
8450 // Store Char/Short
8451 instruct storeC(iRegIorL2I src, memory mem)
8452 %{
8453   match(Set mem (StoreC mem src));
8454   predicate(!needs_releasing_store(n));
8455 
8456   ins_cost(INSN_COST);
8457   format %{ "strh  $src, $mem\t# short" %}
8458 
8459   ins_encode(aarch64_enc_strh(src, mem));
8460 
8461   ins_pipe(istore_reg_mem);
8462 %}
8463 
8464 instruct storeimmC0(immI0 zero, memory mem)
8465 %{
8466   match(Set mem (StoreC mem zero));
8467   predicate(!needs_releasing_store(n));
8468 
8469   ins_cost(INSN_COST);
8470   format %{ "strh  zr, $mem\t# short" %}
8471 
8472   ins_encode(aarch64_enc_strh0(mem));
8473 
8474   ins_pipe(istore_mem);
8475 %}
8476 
8477 // Store Integer
8478 
8479 instruct storeI(iRegIorL2I src, memory mem)
8480 %{
8481   match(Set mem(StoreI mem src));
8482   predicate(!needs_releasing_store(n));
8483 
8484   ins_cost(INSN_COST);
8485   format %{ "strw  $src, $mem\t# int" %}
8486 
8487   ins_encode(aarch64_enc_strw(src, mem));
8488 
8489   ins_pipe(istore_reg_mem);
8490 %}
8491 
8492 instruct storeimmI0(immI0 zero, memory mem)
8493 %{
8494   match(Set mem(StoreI mem zero));
8495   predicate(!needs_releasing_store(n));
8496 
8497   ins_cost(INSN_COST);
8498   format %{ "strw  zr, $mem\t# int" %}
8499 
8500   ins_encode(aarch64_enc_strw0(mem));
8501 
8502   ins_pipe(istore_mem);
8503 %}
8504 
8505 // Store Long (64 bit signed)
8506 instruct storeL(iRegL src, memory mem)
8507 %{
8508   match(Set mem (StoreL mem src));
8509   predicate(!needs_releasing_store(n));
8510 
8511   ins_cost(INSN_COST);
8512   format %{ "str  $src, $mem\t# int" %}
8513 
8514   ins_encode(aarch64_enc_str(src, mem));
8515 
8516   ins_pipe(istore_reg_mem);
8517 %}
8518 
8519 // Store Long (64 bit signed)
8520 instruct storeimmL0(immL0 zero, memory mem)
8521 %{
8522   match(Set mem (StoreL mem zero));
8523   predicate(!needs_releasing_store(n));
8524 
8525   ins_cost(INSN_COST);
8526   format %{ "str  zr, $mem\t# int" %}
8527 
8528   ins_encode(aarch64_enc_str0(mem));
8529 
8530   ins_pipe(istore_mem);
8531 %}
8532 
8533 // Store Pointer
8534 instruct storeP(iRegP src, memory mem)
8535 %{
8536   match(Set mem (StoreP mem src));
8537   predicate(!needs_releasing_store(n));
8538 
8539   ins_cost(INSN_COST);
8540   format %{ "str  $src, $mem\t# ptr" %}
8541 
8542   ins_encode(aarch64_enc_str(src, mem));
8543 
8544   ins_pipe(istore_reg_mem);
8545 %}
8546 
8547 // Store Pointer
8548 instruct storeimmP0(immP0 zero, memory mem)
8549 %{
8550   match(Set mem (StoreP mem zero));
8551   predicate(!needs_releasing_store(n));
8552 
8553   ins_cost(INSN_COST);
8554   format %{ "str zr, $mem\t# ptr" %}
8555 
8556   ins_encode(aarch64_enc_str0(mem));
8557 
8558   ins_pipe(istore_mem);
8559 %}
8560 
8561 // Store Compressed Pointer
8562 instruct storeN(iRegN src, memory mem)
8563 %{
8564   match(Set mem (StoreN mem src));
8565   predicate(!needs_releasing_store(n));
8566 
8567   ins_cost(INSN_COST);
8568   format %{ "strw  $src, $mem\t# compressed ptr" %}
8569 
8570   ins_encode(aarch64_enc_strw(src, mem));
8571 
8572   ins_pipe(istore_reg_mem);
8573 %}
8574 
8575 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8576 %{
8577   match(Set mem (StoreN mem zero));
8578   predicate(Universe::narrow_oop_base() == NULL &&
8579             Universe::narrow_klass_base() == NULL &&
8580             (!needs_releasing_store(n)));
8581 
8582   ins_cost(INSN_COST);
8583   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8584 
8585   ins_encode(aarch64_enc_strw(heapbase, mem));
8586 
8587   ins_pipe(istore_reg_mem);
8588 %}
8589 
8590 // Store Float
8591 instruct storeF(vRegF src, memory mem)
8592 %{
8593   match(Set mem (StoreF mem src));
8594   predicate(!needs_releasing_store(n));
8595 
8596   ins_cost(INSN_COST);
8597   format %{ "strs  $src, $mem\t# float" %}
8598 
8599   ins_encode( aarch64_enc_strs(src, mem) );
8600 
8601   ins_pipe(pipe_class_memory);
8602 %}
8603 
8604 // TODO
8605 // implement storeImmF0 and storeFImmPacked
8606 
8607 // Store Double
8608 instruct storeD(vRegD src, memory mem)
8609 %{
8610   match(Set mem (StoreD mem src));
8611   predicate(!needs_releasing_store(n));
8612 
8613   ins_cost(INSN_COST);
8614   format %{ "strd  $src, $mem\t# double" %}
8615 
8616   ins_encode( aarch64_enc_strd(src, mem) );
8617 
8618   ins_pipe(pipe_class_memory);
8619 %}
8620 
8621 // Store Compressed Klass Pointer
8622 instruct storeNKlass(iRegN src, memory mem)
8623 %{
8624   predicate(!needs_releasing_store(n));
8625   match(Set mem (StoreNKlass mem src));
8626 
8627   ins_cost(INSN_COST);
8628   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8629 
8630   ins_encode(aarch64_enc_strw(src, mem));
8631 
8632   ins_pipe(istore_reg_mem);
8633 %}
8634 
8635 // TODO
8636 // implement storeImmD0 and storeDImmPacked
8637 
8638 // prefetch instructions
8639 // Must be safe to execute with invalid address (cannot fault).
8640 
8641 instruct prefetchalloc( memory mem ) %{
8642   match(PrefetchAllocation mem);
8643 
8644   ins_cost(INSN_COST);
8645   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8646 
8647   ins_encode( aarch64_enc_prefetchw(mem) );
8648 
8649   ins_pipe(iload_prefetch);
8650 %}
8651 
8652 //  ---------------- volatile loads and stores ----------------
8653 
8654 // Load Byte (8 bit signed)
8655 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8656 %{
8657   match(Set dst (LoadB mem));
8658 
8659   ins_cost(VOLATILE_REF_COST);
8660   format %{ "ldarsb  $dst, $mem\t# byte" %}
8661 
8662   ins_encode(aarch64_enc_ldarsb(dst, mem));
8663 
8664   ins_pipe(pipe_serial);
8665 %}
8666 
8667 // Load Byte (8 bit signed) into long
8668 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8669 %{
8670   match(Set dst (ConvI2L (LoadB mem)));
8671 
8672   ins_cost(VOLATILE_REF_COST);
8673   format %{ "ldarsb  $dst, $mem\t# byte" %}
8674 
8675   ins_encode(aarch64_enc_ldarsb(dst, mem));
8676 
8677   ins_pipe(pipe_serial);
8678 %}
8679 
8680 // Load Byte (8 bit unsigned)
8681 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8682 %{
8683   match(Set dst (LoadUB mem));
8684 
8685   ins_cost(VOLATILE_REF_COST);
8686   format %{ "ldarb  $dst, $mem\t# byte" %}
8687 
8688   ins_encode(aarch64_enc_ldarb(dst, mem));
8689 
8690   ins_pipe(pipe_serial);
8691 %}
8692 
8693 // Load Byte (8 bit unsigned) into long
8694 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8695 %{
8696   match(Set dst (ConvI2L (LoadUB mem)));
8697 
8698   ins_cost(VOLATILE_REF_COST);
8699   format %{ "ldarb  $dst, $mem\t# byte" %}
8700 
8701   ins_encode(aarch64_enc_ldarb(dst, mem));
8702 
8703   ins_pipe(pipe_serial);
8704 %}
8705 
8706 // Load Short (16 bit signed)
8707 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8708 %{
8709   match(Set dst (LoadS mem));
8710 
8711   ins_cost(VOLATILE_REF_COST);
8712   format %{ "ldarshw  $dst, $mem\t# short" %}
8713 
8714   ins_encode(aarch64_enc_ldarshw(dst, mem));
8715 
8716   ins_pipe(pipe_serial);
8717 %}
8718 
8719 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8720 %{
8721   match(Set dst (LoadUS mem));
8722 
8723   ins_cost(VOLATILE_REF_COST);
8724   format %{ "ldarhw  $dst, $mem\t# short" %}
8725 
8726   ins_encode(aarch64_enc_ldarhw(dst, mem));
8727 
8728   ins_pipe(pipe_serial);
8729 %}
8730 
8731 // Load Short/Char (16 bit unsigned) into long
8732 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8733 %{
8734   match(Set dst (ConvI2L (LoadUS mem)));
8735 
8736   ins_cost(VOLATILE_REF_COST);
8737   format %{ "ldarh  $dst, $mem\t# short" %}
8738 
8739   ins_encode(aarch64_enc_ldarh(dst, mem));
8740 
8741   ins_pipe(pipe_serial);
8742 %}
8743 
8744 // Load Short/Char (16 bit signed) into long
8745 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8746 %{
8747   match(Set dst (ConvI2L (LoadS mem)));
8748 
8749   ins_cost(VOLATILE_REF_COST);
8750   format %{ "ldarh  $dst, $mem\t# short" %}
8751 
8752   ins_encode(aarch64_enc_ldarsh(dst, mem));
8753 
8754   ins_pipe(pipe_serial);
8755 %}
8756 
8757 // Load Integer (32 bit signed)
8758 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8759 %{
8760   match(Set dst (LoadI mem));
8761 
8762   ins_cost(VOLATILE_REF_COST);
8763   format %{ "ldarw  $dst, $mem\t# int" %}
8764 
8765   ins_encode(aarch64_enc_ldarw(dst, mem));
8766 
8767   ins_pipe(pipe_serial);
8768 %}
8769 
8770 // Load Integer (32 bit unsigned) into long
8771 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8772 %{
8773   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8774 
8775   ins_cost(VOLATILE_REF_COST);
8776   format %{ "ldarw  $dst, $mem\t# int" %}
8777 
8778   ins_encode(aarch64_enc_ldarw(dst, mem));
8779 
8780   ins_pipe(pipe_serial);
8781 %}
8782 
8783 // Load Long (64 bit signed)
8784 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8785 %{
8786   match(Set dst (LoadL mem));
8787 
8788   ins_cost(VOLATILE_REF_COST);
8789   format %{ "ldar  $dst, $mem\t# int" %}
8790 
8791   ins_encode(aarch64_enc_ldar(dst, mem));
8792 
8793   ins_pipe(pipe_serial);
8794 %}
8795 
8796 // Load Pointer
8797 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8798 %{
8799   match(Set dst (LoadP mem));
8800 
8801   ins_cost(VOLATILE_REF_COST);
8802   format %{ "ldar  $dst, $mem\t# ptr" %}
8803 
8804   ins_encode(aarch64_enc_ldar(dst, mem));
8805 
8806   ins_pipe(pipe_serial);
8807 %}
8808 
8809 // Load Compressed Pointer
8810 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8811 %{
8812   match(Set dst (LoadN mem));
8813 
8814   ins_cost(VOLATILE_REF_COST);
8815   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8816 
8817   ins_encode(aarch64_enc_ldarw(dst, mem));
8818 
8819   ins_pipe(pipe_serial);
8820 %}
8821 
8822 // Load Float
8823 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8824 %{
8825   match(Set dst (LoadF mem));
8826 
8827   ins_cost(VOLATILE_REF_COST);
8828   format %{ "ldars  $dst, $mem\t# float" %}
8829 
8830   ins_encode( aarch64_enc_fldars(dst, mem) );
8831 
8832   ins_pipe(pipe_serial);
8833 %}
8834 
8835 // Load Double
8836 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8837 %{
8838   match(Set dst (LoadD mem));
8839 
8840   ins_cost(VOLATILE_REF_COST);
8841   format %{ "ldard  $dst, $mem\t# double" %}
8842 
8843   ins_encode( aarch64_enc_fldard(dst, mem) );
8844 
8845   ins_pipe(pipe_serial);
8846 %}
8847 
8848 // Store Byte
8849 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8850 %{
8851   match(Set mem (StoreB mem src));
8852 
8853   ins_cost(VOLATILE_REF_COST);
8854   format %{ "stlrb  $src, $mem\t# byte" %}
8855 
8856   ins_encode(aarch64_enc_stlrb(src, mem));
8857 
8858   ins_pipe(pipe_class_memory);
8859 %}
8860 
8861 // Store Char/Short
8862 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8863 %{
8864   match(Set mem (StoreC mem src));
8865 
8866   ins_cost(VOLATILE_REF_COST);
8867   format %{ "stlrh  $src, $mem\t# short" %}
8868 
8869   ins_encode(aarch64_enc_stlrh(src, mem));
8870 
8871   ins_pipe(pipe_class_memory);
8872 %}
8873 
8874 // Store Integer
8875 
8876 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8877 %{
8878   match(Set mem(StoreI mem src));
8879 
8880   ins_cost(VOLATILE_REF_COST);
8881   format %{ "stlrw  $src, $mem\t# int" %}
8882 
8883   ins_encode(aarch64_enc_stlrw(src, mem));
8884 
8885   ins_pipe(pipe_class_memory);
8886 %}
8887 
8888 // Store Long (64 bit signed)
8889 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8890 %{
8891   match(Set mem (StoreL mem src));
8892 
8893   ins_cost(VOLATILE_REF_COST);
8894   format %{ "stlr  $src, $mem\t# int" %}
8895 
8896   ins_encode(aarch64_enc_stlr(src, mem));
8897 
8898   ins_pipe(pipe_class_memory);
8899 %}
8900 
8901 // Store Pointer
8902 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8903 %{
8904   match(Set mem (StoreP mem src));
8905 
8906   ins_cost(VOLATILE_REF_COST);
8907   format %{ "stlr  $src, $mem\t# ptr" %}
8908 
8909   ins_encode(aarch64_enc_stlr(src, mem));
8910 
8911   ins_pipe(pipe_class_memory);
8912 %}
8913 
8914 // Store Compressed Pointer
8915 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8916 %{
8917   match(Set mem (StoreN mem src));
8918 
8919   ins_cost(VOLATILE_REF_COST);
8920   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8921 
8922   ins_encode(aarch64_enc_stlrw(src, mem));
8923 
8924   ins_pipe(pipe_class_memory);
8925 %}
8926 
8927 // Store Float
8928 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8929 %{
8930   match(Set mem (StoreF mem src));
8931 
8932   ins_cost(VOLATILE_REF_COST);
8933   format %{ "stlrs  $src, $mem\t# float" %}
8934 
8935   ins_encode( aarch64_enc_fstlrs(src, mem) );
8936 
8937   ins_pipe(pipe_class_memory);
8938 %}
8939 
8940 // TODO
8941 // implement storeImmF0 and storeFImmPacked
8942 
8943 // Store Double
8944 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8945 %{
8946   match(Set mem (StoreD mem src));
8947 
8948   ins_cost(VOLATILE_REF_COST);
8949   format %{ "stlrd  $src, $mem\t# double" %}
8950 
8951   ins_encode( aarch64_enc_fstlrd(src, mem) );
8952 
8953   ins_pipe(pipe_class_memory);
8954 %}
8955 
8956 //  ---------------- end of volatile loads and stores ----------------
8957 
8958 // ============================================================================
8959 // BSWAP Instructions
8960 
8961 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8962   match(Set dst (ReverseBytesI src));
8963 
8964   ins_cost(INSN_COST);
8965   format %{ "revw  $dst, $src" %}
8966 
8967   ins_encode %{
8968     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8969   %}
8970 
8971   ins_pipe(ialu_reg);
8972 %}
8973 
8974 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8975   match(Set dst (ReverseBytesL src));
8976 
8977   ins_cost(INSN_COST);
8978   format %{ "rev  $dst, $src" %}
8979 
8980   ins_encode %{
8981     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8982   %}
8983 
8984   ins_pipe(ialu_reg);
8985 %}
8986 
8987 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8988   match(Set dst (ReverseBytesUS src));
8989 
8990   ins_cost(INSN_COST);
8991   format %{ "rev16w  $dst, $src" %}
8992 
8993   ins_encode %{
8994     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8995   %}
8996 
8997   ins_pipe(ialu_reg);
8998 %}
8999 
9000 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
9001   match(Set dst (ReverseBytesS src));
9002 
9003   ins_cost(INSN_COST);
9004   format %{ "rev16w  $dst, $src\n\t"
9005             "sbfmw $dst, $dst, #0, #15" %}
9006 
9007   ins_encode %{
9008     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9009     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
9010   %}
9011 
9012   ins_pipe(ialu_reg);
9013 %}
9014 
9015 // ============================================================================
9016 // Zero Count Instructions
9017 
9018 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9019   match(Set dst (CountLeadingZerosI src));
9020 
9021   ins_cost(INSN_COST);
9022   format %{ "clzw  $dst, $src" %}
9023   ins_encode %{
9024     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9025   %}
9026 
9027   ins_pipe(ialu_reg);
9028 %}
9029 
9030 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9031   match(Set dst (CountLeadingZerosL src));
9032 
9033   ins_cost(INSN_COST);
9034   format %{ "clz   $dst, $src" %}
9035   ins_encode %{
9036     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9037   %}
9038 
9039   ins_pipe(ialu_reg);
9040 %}
9041 
9042 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9043   match(Set dst (CountTrailingZerosI src));
9044 
9045   ins_cost(INSN_COST * 2);
9046   format %{ "rbitw  $dst, $src\n\t"
9047             "clzw   $dst, $dst" %}
9048   ins_encode %{
9049     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9050     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9051   %}
9052 
9053   ins_pipe(ialu_reg);
9054 %}
9055 
9056 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9057   match(Set dst (CountTrailingZerosL src));
9058 
9059   ins_cost(INSN_COST * 2);
9060   format %{ "rbit   $dst, $src\n\t"
9061             "clz    $dst, $dst" %}
9062   ins_encode %{
9063     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9064     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9065   %}
9066 
9067   ins_pipe(ialu_reg);
9068 %}
9069 
9070 //---------- Population Count Instructions -------------------------------------
9071 //
9072 
9073 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9074   predicate(UsePopCountInstruction);
9075   match(Set dst (PopCountI src));
9076   effect(TEMP tmp);
9077   ins_cost(INSN_COST * 13);
9078 
9079   format %{ "movw   $src, $src\n\t"
9080             "mov    $tmp, $src\t# vector (1D)\n\t"
9081             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9082             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9083             "mov    $dst, $tmp\t# vector (1D)" %}
9084   ins_encode %{
9085     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9086     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9087     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9088     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9089     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9090   %}
9091 
9092   ins_pipe(pipe_class_default);
9093 %}
9094 
9095 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9096   predicate(UsePopCountInstruction);
9097   match(Set dst (PopCountI (LoadI mem)));
9098   effect(TEMP tmp);
9099   ins_cost(INSN_COST * 13);
9100 
9101   format %{ "ldrs   $tmp, $mem\n\t"
9102             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9103             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9104             "mov    $dst, $tmp\t# vector (1D)" %}
9105   ins_encode %{
9106     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9107     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9108                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9109     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9110     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9111     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9112   %}
9113 
9114   ins_pipe(pipe_class_default);
9115 %}
9116 
9117 // Note: Long.bitCount(long) returns an int.
9118 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9119   predicate(UsePopCountInstruction);
9120   match(Set dst (PopCountL src));
9121   effect(TEMP tmp);
9122   ins_cost(INSN_COST * 13);
9123 
9124   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9125             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9126             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9127             "mov    $dst, $tmp\t# vector (1D)" %}
9128   ins_encode %{
9129     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9130     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9131     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9132     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9133   %}
9134 
9135   ins_pipe(pipe_class_default);
9136 %}
9137 
9138 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9139   predicate(UsePopCountInstruction);
9140   match(Set dst (PopCountL (LoadL mem)));
9141   effect(TEMP tmp);
9142   ins_cost(INSN_COST * 13);
9143 
9144   format %{ "ldrd   $tmp, $mem\n\t"
9145             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9146             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9147             "mov    $dst, $tmp\t# vector (1D)" %}
9148   ins_encode %{
9149     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9150     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9151                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9152     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9153     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9154     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9155   %}
9156 
9157   ins_pipe(pipe_class_default);
9158 %}
9159 
9160 // ============================================================================
9161 // MemBar Instruction
9162 
9163 instruct load_fence() %{
9164   match(LoadFence);
9165   ins_cost(VOLATILE_REF_COST);
9166 
9167   format %{ "load_fence" %}
9168 
9169   ins_encode %{
9170     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9171   %}
9172   ins_pipe(pipe_serial);
9173 %}
9174 
9175 instruct unnecessary_membar_acquire() %{
9176   predicate(unnecessary_acquire(n));
9177   match(MemBarAcquire);
9178   ins_cost(0);
9179 
9180   format %{ "membar_acquire (elided)" %}
9181 
9182   ins_encode %{
9183     __ block_comment("membar_acquire (elided)");
9184   %}
9185 
9186   ins_pipe(pipe_class_empty);
9187 %}
9188 
9189 instruct membar_acquire() %{
9190   match(MemBarAcquire);
9191   ins_cost(VOLATILE_REF_COST);
9192 
9193   format %{ "membar_acquire" %}
9194 
9195   ins_encode %{
9196     __ block_comment("membar_acquire");
9197     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9198   %}
9199 
9200   ins_pipe(pipe_serial);
9201 %}
9202 
9203 
9204 instruct membar_acquire_lock() %{
9205   match(MemBarAcquireLock);
9206   ins_cost(VOLATILE_REF_COST);
9207 
9208   format %{ "membar_acquire_lock (elided)" %}
9209 
9210   ins_encode %{
9211     __ block_comment("membar_acquire_lock (elided)");
9212   %}
9213 
9214   ins_pipe(pipe_serial);
9215 %}
9216 
9217 instruct store_fence() %{
9218   match(StoreFence);
9219   ins_cost(VOLATILE_REF_COST);
9220 
9221   format %{ "store_fence" %}
9222 
9223   ins_encode %{
9224     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9225   %}
9226   ins_pipe(pipe_serial);
9227 %}
9228 
9229 instruct unnecessary_membar_release() %{
9230   predicate(unnecessary_release(n));
9231   match(MemBarRelease);
9232   ins_cost(0);
9233 
9234   format %{ "membar_release (elided)" %}
9235 
9236   ins_encode %{
9237     __ block_comment("membar_release (elided)");
9238   %}
9239   ins_pipe(pipe_serial);
9240 %}
9241 
9242 instruct membar_release() %{
9243   match(MemBarRelease);
9244   ins_cost(VOLATILE_REF_COST);
9245 
9246   format %{ "membar_release" %}
9247 
9248   ins_encode %{
9249     __ block_comment("membar_release");
9250     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9251   %}
9252   ins_pipe(pipe_serial);
9253 %}
9254 
9255 instruct membar_storestore() %{
9256   match(MemBarStoreStore);
9257   ins_cost(VOLATILE_REF_COST);
9258 
9259   format %{ "MEMBAR-store-store" %}
9260 
9261   ins_encode %{
9262     __ membar(Assembler::StoreStore);
9263   %}
9264   ins_pipe(pipe_serial);
9265 %}
9266 
9267 instruct membar_release_lock() %{
9268   match(MemBarReleaseLock);
9269   ins_cost(VOLATILE_REF_COST);
9270 
9271   format %{ "membar_release_lock (elided)" %}
9272 
9273   ins_encode %{
9274     __ block_comment("membar_release_lock (elided)");
9275   %}
9276 
9277   ins_pipe(pipe_serial);
9278 %}
9279 
9280 instruct unnecessary_membar_volatile() %{
9281   predicate(unnecessary_volatile(n));
9282   match(MemBarVolatile);
9283   ins_cost(0);
9284 
9285   format %{ "membar_volatile (elided)" %}
9286 
9287   ins_encode %{
9288     __ block_comment("membar_volatile (elided)");
9289   %}
9290 
9291   ins_pipe(pipe_serial);
9292 %}
9293 
9294 instruct membar_volatile() %{
9295   match(MemBarVolatile);
9296   ins_cost(VOLATILE_REF_COST*100);
9297 
9298   format %{ "membar_volatile" %}
9299 
9300   ins_encode %{
9301     __ block_comment("membar_volatile");
9302     __ membar(Assembler::StoreLoad);
9303   %}
9304 
9305   ins_pipe(pipe_serial);
9306 %}
9307 
9308 // ============================================================================
9309 // Cast/Convert Instructions
9310 
9311 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9312   match(Set dst (CastX2P src));
9313 
9314   ins_cost(INSN_COST);
9315   format %{ "mov $dst, $src\t# long -> ptr" %}
9316 
9317   ins_encode %{
9318     if ($dst$$reg != $src$$reg) {
9319       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9320     }
9321   %}
9322 
9323   ins_pipe(ialu_reg);
9324 %}
9325 
9326 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9327   match(Set dst (CastP2X src));
9328 
9329   ins_cost(INSN_COST);
9330   format %{ "mov $dst, $src\t# ptr -> long" %}
9331 
9332   ins_encode %{
9333     if ($dst$$reg != $src$$reg) {
9334       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9335     }
9336   %}
9337 
9338   ins_pipe(ialu_reg);
9339 %}
9340 
9341 // Convert oop into int for vectors alignment masking
9342 instruct convP2I(iRegINoSp dst, iRegP src) %{
9343   match(Set dst (ConvL2I (CastP2X src)));
9344 
9345   ins_cost(INSN_COST);
9346   format %{ "movw $dst, $src\t# ptr -> int" %}
9347   ins_encode %{
9348     __ movw($dst$$Register, $src$$Register);
9349   %}
9350 
9351   ins_pipe(ialu_reg);
9352 %}
9353 
9354 // Convert compressed oop into int for vectors alignment masking
9355 // in case of 32bit oops (heap < 4Gb).
9356 instruct convN2I(iRegINoSp dst, iRegN src)
9357 %{
9358   predicate(Universe::narrow_oop_shift() == 0);
9359   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9360 
9361   ins_cost(INSN_COST);
9362   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9363   ins_encode %{
9364     __ movw($dst$$Register, $src$$Register);
9365   %}
9366 
9367   ins_pipe(ialu_reg);
9368 %}
9369 
9370 
9371 // Convert oop pointer into compressed form
9372 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9373   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9374   match(Set dst (EncodeP src));
9375   effect(KILL cr);
9376   ins_cost(INSN_COST * 3);
9377   format %{ "encode_heap_oop $dst, $src" %}
9378   ins_encode %{
9379     Register s = $src$$Register;
9380     Register d = $dst$$Register;
9381     __ encode_heap_oop(d, s);
9382   %}
9383   ins_pipe(ialu_reg);
9384 %}
9385 
9386 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9387   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9388   match(Set dst (EncodeP src));
9389   ins_cost(INSN_COST * 3);
9390   format %{ "encode_heap_oop_not_null $dst, $src" %}
9391   ins_encode %{
9392     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9393   %}
9394   ins_pipe(ialu_reg);
9395 %}
9396 
9397 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9398   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9399             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9400   match(Set dst (DecodeN src));
9401   ins_cost(INSN_COST * 3);
9402   format %{ "decode_heap_oop $dst, $src" %}
9403   ins_encode %{
9404     Register s = $src$$Register;
9405     Register d = $dst$$Register;
9406     __ decode_heap_oop(d, s);
9407   %}
9408   ins_pipe(ialu_reg);
9409 %}
9410 
9411 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9412   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9413             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9414   match(Set dst (DecodeN src));
9415   ins_cost(INSN_COST * 3);
9416   format %{ "decode_heap_oop_not_null $dst, $src" %}
9417   ins_encode %{
9418     Register s = $src$$Register;
9419     Register d = $dst$$Register;
9420     __ decode_heap_oop_not_null(d, s);
9421   %}
9422   ins_pipe(ialu_reg);
9423 %}
9424 
9425 // n.b. AArch64 implementations of encode_klass_not_null and
9426 // decode_klass_not_null do not modify the flags register so, unlike
9427 // Intel, we don't kill CR as a side effect here
9428 
9429 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9430   match(Set dst (EncodePKlass src));
9431 
9432   ins_cost(INSN_COST * 3);
9433   format %{ "encode_klass_not_null $dst,$src" %}
9434 
9435   ins_encode %{
9436     Register src_reg = as_Register($src$$reg);
9437     Register dst_reg = as_Register($dst$$reg);
9438     __ encode_klass_not_null(dst_reg, src_reg);
9439   %}
9440 
9441    ins_pipe(ialu_reg);
9442 %}
9443 
9444 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9445   match(Set dst (DecodeNKlass src));
9446 
9447   ins_cost(INSN_COST * 3);
9448   format %{ "decode_klass_not_null $dst,$src" %}
9449 
9450   ins_encode %{
9451     Register src_reg = as_Register($src$$reg);
9452     Register dst_reg = as_Register($dst$$reg);
9453     if (dst_reg != src_reg) {
9454       __ decode_klass_not_null(dst_reg, src_reg);
9455     } else {
9456       __ decode_klass_not_null(dst_reg);
9457     }
9458   %}
9459 
9460    ins_pipe(ialu_reg);
9461 %}
9462 
9463 instruct checkCastPP(iRegPNoSp dst)
9464 %{
9465   match(Set dst (CheckCastPP dst));
9466 
9467   size(0);
9468   format %{ "# checkcastPP of $dst" %}
9469   ins_encode(/* empty encoding */);
9470   ins_pipe(pipe_class_empty);
9471 %}
9472 
9473 instruct castPP(iRegPNoSp dst)
9474 %{
9475   match(Set dst (CastPP dst));
9476 
9477   size(0);
9478   format %{ "# castPP of $dst" %}
9479   ins_encode(/* empty encoding */);
9480   ins_pipe(pipe_class_empty);
9481 %}
9482 
9483 instruct castII(iRegI dst)
9484 %{
9485   match(Set dst (CastII dst));
9486 
9487   size(0);
9488   format %{ "# castII of $dst" %}
9489   ins_encode(/* empty encoding */);
9490   ins_cost(0);
9491   ins_pipe(pipe_class_empty);
9492 %}
9493 
9494 // ============================================================================
9495 // Atomic operation instructions
9496 //
9497 // Intel and SPARC both implement Ideal Node LoadPLocked and
9498 // Store{PIL}Conditional instructions using a normal load for the
9499 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9500 //
9501 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9502 // pair to lock object allocations from Eden space when not using
9503 // TLABs.
9504 //
9505 // There does not appear to be a Load{IL}Locked Ideal Node and the
9506 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9507 // and to use StoreIConditional only for 32-bit and StoreLConditional
9508 // only for 64-bit.
9509 //
9510 // We implement LoadPLocked and StorePLocked instructions using,
9511 // respectively the AArch64 hw load-exclusive and store-conditional
9512 // instructions. Whereas we must implement each of
9513 // Store{IL}Conditional using a CAS which employs a pair of
9514 // instructions comprising a load-exclusive followed by a
9515 // store-conditional.
9516 
9517 
9518 // Locked-load (linked load) of the current heap-top
9519 // used when updating the eden heap top
9520 // implemented using ldaxr on AArch64
9521 
9522 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9523 %{
9524   match(Set dst (LoadPLocked mem));
9525 
9526   ins_cost(VOLATILE_REF_COST);
9527 
9528   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9529 
9530   ins_encode(aarch64_enc_ldaxr(dst, mem));
9531 
9532   ins_pipe(pipe_serial);
9533 %}
9534 
9535 // Conditional-store of the updated heap-top.
9536 // Used during allocation of the shared heap.
9537 // Sets flag (EQ) on success.
9538 // implemented using stlxr on AArch64.
9539 
9540 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9541 %{
9542   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9543 
9544   ins_cost(VOLATILE_REF_COST);
9545 
9546  // TODO
9547  // do we need to do a store-conditional release or can we just use a
9548  // plain store-conditional?
9549 
9550   format %{
9551     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9552     "cmpw rscratch1, zr\t# EQ on successful write"
9553   %}
9554 
9555   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9556 
9557   ins_pipe(pipe_serial);
9558 %}
9559 
9560 
9561 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9562 // when attempting to rebias a lock towards the current thread.  We
9563 // must use the acquire form of cmpxchg in order to guarantee acquire
9564 // semantics in this case.
9565 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9566 %{
9567   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9568 
9569   ins_cost(VOLATILE_REF_COST);
9570 
9571   format %{
9572     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9573     "cmpw rscratch1, zr\t# EQ on successful write"
9574   %}
9575 
9576   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9577 
9578   ins_pipe(pipe_slow);
9579 %}
9580 
9581 // storeIConditional also has acquire semantics, for no better reason
9582 // than matching storeLConditional.  At the time of writing this
9583 // comment storeIConditional was not used anywhere by AArch64.
9584 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9585 %{
9586   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9587 
9588   ins_cost(VOLATILE_REF_COST);
9589 
9590   format %{
9591     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9592     "cmpw rscratch1, zr\t# EQ on successful write"
9593   %}
9594 
9595   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9596 
9597   ins_pipe(pipe_slow);
9598 %}
9599 
9600 // standard CompareAndSwapX when we are using barriers
9601 // these have higher priority than the rules selected by a predicate
9602 
9603 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9604 // can't match them
9605 
9606 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9607 
9608   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
9609   ins_cost(2 * VOLATILE_REF_COST);
9610 
9611   effect(KILL cr);
9612 
9613   format %{
9614     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9615     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9616   %}
9617 
9618   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
9619             aarch64_enc_cset_eq(res));
9620 
9621   ins_pipe(pipe_slow);
9622 %}
9623 
9624 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9625 
9626   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
9627   ins_cost(2 * VOLATILE_REF_COST);
9628 
9629   effect(KILL cr);
9630 
9631   format %{
9632     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9633     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9634   %}
9635 
9636   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
9637             aarch64_enc_cset_eq(res));
9638 
9639   ins_pipe(pipe_slow);
9640 %}
9641 
9642 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9643 
9644   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9645   ins_cost(2 * VOLATILE_REF_COST);
9646 
9647   effect(KILL cr);
9648 
9649  format %{
9650     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9651     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9652  %}
9653 
9654  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9655             aarch64_enc_cset_eq(res));
9656 
9657   ins_pipe(pipe_slow);
9658 %}
9659 
9660 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9661 
9662   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9663   ins_cost(2 * VOLATILE_REF_COST);
9664 
9665   effect(KILL cr);
9666 
9667  format %{
9668     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9669     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9670  %}
9671 
9672  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9673             aarch64_enc_cset_eq(res));
9674 
9675   ins_pipe(pipe_slow);
9676 %}
9677 
9678 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9679 
9680   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9681   ins_cost(2 * VOLATILE_REF_COST);
9682 
9683   effect(KILL cr);
9684 
9685  format %{
9686     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9687     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9688  %}
9689 
9690  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9691             aarch64_enc_cset_eq(res));
9692 
9693   ins_pipe(pipe_slow);
9694 %}
9695 
9696 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9697 
9698   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9699   ins_cost(2 * VOLATILE_REF_COST);
9700 
9701   effect(KILL cr);
9702 
9703  format %{
9704     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9705     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9706  %}
9707 
9708  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9709             aarch64_enc_cset_eq(res));
9710 
9711   ins_pipe(pipe_slow);
9712 %}
9713 
9714 // alternative CompareAndSwapX when we are eliding barriers
9715 
9716 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9717 
9718   predicate(needs_acquiring_load_exclusive(n));
9719   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9720   ins_cost(VOLATILE_REF_COST);
9721 
9722   effect(KILL cr);
9723 
9724  format %{
9725     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9726     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9727  %}
9728 
9729  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9730             aarch64_enc_cset_eq(res));
9731 
9732   ins_pipe(pipe_slow);
9733 %}
9734 
9735 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9736 
9737   predicate(needs_acquiring_load_exclusive(n));
9738   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9739   ins_cost(VOLATILE_REF_COST);
9740 
9741   effect(KILL cr);
9742 
9743  format %{
9744     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9745     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9746  %}
9747 
9748  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9749             aarch64_enc_cset_eq(res));
9750 
9751   ins_pipe(pipe_slow);
9752 %}
9753 
9754 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9755 
9756   predicate(needs_acquiring_load_exclusive(n));
9757   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9758   ins_cost(VOLATILE_REF_COST);
9759 
9760   effect(KILL cr);
9761 
9762  format %{
9763     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9764     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9765  %}
9766 
9767  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9768             aarch64_enc_cset_eq(res));
9769 
9770   ins_pipe(pipe_slow);
9771 %}
9772 
9773 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9774 
9775   predicate(needs_acquiring_load_exclusive(n));
9776   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9777   ins_cost(VOLATILE_REF_COST);
9778 
9779   effect(KILL cr);
9780 
9781  format %{
9782     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9783     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9784  %}
9785 
9786  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9787             aarch64_enc_cset_eq(res));
9788 
9789   ins_pipe(pipe_slow);
9790 %}
9791 
9792 
9793 // ---------------------------------------------------------------------
9794 
9795 
9796 // BEGIN This section of the file is automatically generated. Do not edit --------------
9797 
9798 // Sundry CAS operations.  Note that release is always true,
9799 // regardless of the memory ordering of the CAS.  This is because we
9800 // need the volatile case to be sequentially consistent but there is
9801 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9802 // can't check the type of memory ordering here, so we always emit a
9803 // STLXR.
9804 
9805 // This section is generated from aarch64_ad_cas.m4
9806 
9807 
9808 
9809 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9810   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9811   ins_cost(2 * VOLATILE_REF_COST);
9812   effect(TEMP_DEF res, KILL cr);
9813   format %{
9814     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9815   %}
9816   ins_encode %{
9817     __ uxtbw(rscratch2, $oldval$$Register);
9818     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9819                Assembler::byte, /*acquire*/ false, /*release*/ true,
9820                /*weak*/ false, $res$$Register);
9821     __ sxtbw($res$$Register, $res$$Register);
9822   %}
9823   ins_pipe(pipe_slow);
9824 %}
9825 
9826 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9827   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9828   ins_cost(2 * VOLATILE_REF_COST);
9829   effect(TEMP_DEF res, KILL cr);
9830   format %{
9831     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9832   %}
9833   ins_encode %{
9834     __ uxthw(rscratch2, $oldval$$Register);
9835     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9836                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9837                /*weak*/ false, $res$$Register);
9838     __ sxthw($res$$Register, $res$$Register);
9839   %}
9840   ins_pipe(pipe_slow);
9841 %}
9842 
9843 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9844   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9845   ins_cost(2 * VOLATILE_REF_COST);
9846   effect(TEMP_DEF res, KILL cr);
9847   format %{
9848     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9849   %}
9850   ins_encode %{
9851     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9852                Assembler::word, /*acquire*/ false, /*release*/ true,
9853                /*weak*/ false, $res$$Register);
9854   %}
9855   ins_pipe(pipe_slow);
9856 %}
9857 
9858 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9859   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9860   ins_cost(2 * VOLATILE_REF_COST);
9861   effect(TEMP_DEF res, KILL cr);
9862   format %{
9863     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9864   %}
9865   ins_encode %{
9866     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9867                Assembler::xword, /*acquire*/ false, /*release*/ true,
9868                /*weak*/ false, $res$$Register);
9869   %}
9870   ins_pipe(pipe_slow);
9871 %}
9872 
9873 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9874   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9875   ins_cost(2 * VOLATILE_REF_COST);
9876   effect(TEMP_DEF res, KILL cr);
9877   format %{
9878     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9879   %}
9880   ins_encode %{
9881     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9882                Assembler::word, /*acquire*/ false, /*release*/ true,
9883                /*weak*/ false, $res$$Register);
9884   %}
9885   ins_pipe(pipe_slow);
9886 %}
9887 
9888 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9889   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9890   ins_cost(2 * VOLATILE_REF_COST);
9891   effect(TEMP_DEF res, KILL cr);
9892   format %{
9893     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9894   %}
9895   ins_encode %{
9896     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9897                Assembler::xword, /*acquire*/ false, /*release*/ true,
9898                /*weak*/ false, $res$$Register);
9899   %}
9900   ins_pipe(pipe_slow);
9901 %}
9902 
9903 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9904   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9905   ins_cost(2 * VOLATILE_REF_COST);
9906   effect(KILL cr);
9907   format %{
9908     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9909     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9910   %}
9911   ins_encode %{
9912     __ uxtbw(rscratch2, $oldval$$Register);
9913     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9914                Assembler::byte, /*acquire*/ false, /*release*/ true,
9915                /*weak*/ true, noreg);
9916     __ csetw($res$$Register, Assembler::EQ);
9917   %}
9918   ins_pipe(pipe_slow);
9919 %}
9920 
9921 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9922   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9923   ins_cost(2 * VOLATILE_REF_COST);
9924   effect(KILL cr);
9925   format %{
9926     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9927     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9928   %}
9929   ins_encode %{
9930     __ uxthw(rscratch2, $oldval$$Register);
9931     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9932                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9933                /*weak*/ true, noreg);
9934     __ csetw($res$$Register, Assembler::EQ);
9935   %}
9936   ins_pipe(pipe_slow);
9937 %}
9938 
9939 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9940   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9941   ins_cost(2 * VOLATILE_REF_COST);
9942   effect(KILL cr);
9943   format %{
9944     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9945     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9946   %}
9947   ins_encode %{
9948     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9949                Assembler::word, /*acquire*/ false, /*release*/ true,
9950                /*weak*/ true, noreg);
9951     __ csetw($res$$Register, Assembler::EQ);
9952   %}
9953   ins_pipe(pipe_slow);
9954 %}
9955 
9956 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9957   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9958   ins_cost(2 * VOLATILE_REF_COST);
9959   effect(KILL cr);
9960   format %{
9961     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9962     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9963   %}
9964   ins_encode %{
9965     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9966                Assembler::xword, /*acquire*/ false, /*release*/ true,
9967                /*weak*/ true, noreg);
9968     __ csetw($res$$Register, Assembler::EQ);
9969   %}
9970   ins_pipe(pipe_slow);
9971 %}
9972 
9973 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9974   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9975   ins_cost(2 * VOLATILE_REF_COST);
9976   effect(KILL cr);
9977   format %{
9978     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9979     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9980   %}
9981   ins_encode %{
9982     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9983                Assembler::word, /*acquire*/ false, /*release*/ true,
9984                /*weak*/ true, noreg);
9985     __ csetw($res$$Register, Assembler::EQ);
9986   %}
9987   ins_pipe(pipe_slow);
9988 %}
9989 
9990 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9991   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9992   ins_cost(2 * VOLATILE_REF_COST);
9993   effect(KILL cr);
9994   format %{
9995     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9996     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9997   %}
9998   ins_encode %{
9999     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10000                Assembler::xword, /*acquire*/ false, /*release*/ true,
10001                /*weak*/ true, noreg);
10002     __ csetw($res$$Register, Assembler::EQ);
10003   %}
10004   ins_pipe(pipe_slow);
10005 %}
10006 
10007 // END This section of the file is automatically generated. Do not edit --------------
10008 // ---------------------------------------------------------------------
10009 
10010 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
10011   match(Set prev (GetAndSetI mem newv));
10012   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10013   ins_encode %{
10014     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10015   %}
10016   ins_pipe(pipe_serial);
10017 %}
10018 
10019 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10020   match(Set prev (GetAndSetL mem newv));
10021   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10022   ins_encode %{
10023     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10024   %}
10025   ins_pipe(pipe_serial);
10026 %}
10027 
10028 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10029   match(Set prev (GetAndSetN mem newv));
10030   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10031   ins_encode %{
10032     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10033   %}
10034   ins_pipe(pipe_serial);
10035 %}
10036 
10037 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10038   match(Set prev (GetAndSetP mem newv));
10039   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10040   ins_encode %{
10041     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10042   %}
10043   ins_pipe(pipe_serial);
10044 %}
10045 
10046 
10047 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10048   match(Set newval (GetAndAddL mem incr));
10049   ins_cost(INSN_COST * 10);
10050   format %{ "get_and_addL $newval, [$mem], $incr" %}
10051   ins_encode %{
10052     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10053   %}
10054   ins_pipe(pipe_serial);
10055 %}
10056 
10057 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10058   predicate(n->as_LoadStore()->result_not_used());
10059   match(Set dummy (GetAndAddL mem incr));
10060   ins_cost(INSN_COST * 9);
10061   format %{ "get_and_addL [$mem], $incr" %}
10062   ins_encode %{
10063     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10064   %}
10065   ins_pipe(pipe_serial);
10066 %}
10067 
10068 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10069   match(Set newval (GetAndAddL mem incr));
10070   ins_cost(INSN_COST * 10);
10071   format %{ "get_and_addL $newval, [$mem], $incr" %}
10072   ins_encode %{
10073     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10074   %}
10075   ins_pipe(pipe_serial);
10076 %}
10077 
10078 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10079   predicate(n->as_LoadStore()->result_not_used());
10080   match(Set dummy (GetAndAddL mem incr));
10081   ins_cost(INSN_COST * 9);
10082   format %{ "get_and_addL [$mem], $incr" %}
10083   ins_encode %{
10084     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10085   %}
10086   ins_pipe(pipe_serial);
10087 %}
10088 
10089 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10090   match(Set newval (GetAndAddI mem incr));
10091   ins_cost(INSN_COST * 10);
10092   format %{ "get_and_addI $newval, [$mem], $incr" %}
10093   ins_encode %{
10094     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10095   %}
10096   ins_pipe(pipe_serial);
10097 %}
10098 
10099 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10100   predicate(n->as_LoadStore()->result_not_used());
10101   match(Set dummy (GetAndAddI mem incr));
10102   ins_cost(INSN_COST * 9);
10103   format %{ "get_and_addI [$mem], $incr" %}
10104   ins_encode %{
10105     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10106   %}
10107   ins_pipe(pipe_serial);
10108 %}
10109 
10110 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10111   match(Set newval (GetAndAddI mem incr));
10112   ins_cost(INSN_COST * 10);
10113   format %{ "get_and_addI $newval, [$mem], $incr" %}
10114   ins_encode %{
10115     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10116   %}
10117   ins_pipe(pipe_serial);
10118 %}
10119 
10120 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10121   predicate(n->as_LoadStore()->result_not_used());
10122   match(Set dummy (GetAndAddI mem incr));
10123   ins_cost(INSN_COST * 9);
10124   format %{ "get_and_addI [$mem], $incr" %}
10125   ins_encode %{
10126     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10127   %}
10128   ins_pipe(pipe_serial);
10129 %}
10130 
10131 // Manifest a CmpL result in an integer register.
10132 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10133 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10134 %{
10135   match(Set dst (CmpL3 src1 src2));
10136   effect(KILL flags);
10137 
10138   ins_cost(INSN_COST * 6);
10139   format %{
10140       "cmp $src1, $src2"
10141       "csetw $dst, ne"
10142       "cnegw $dst, lt"
10143   %}
10144   // format %{ "CmpL3 $dst, $src1, $src2" %}
10145   ins_encode %{
10146     __ cmp($src1$$Register, $src2$$Register);
10147     __ csetw($dst$$Register, Assembler::NE);
10148     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10149   %}
10150 
10151   ins_pipe(pipe_class_default);
10152 %}
10153 
10154 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10155 %{
10156   match(Set dst (CmpL3 src1 src2));
10157   effect(KILL flags);
10158 
10159   ins_cost(INSN_COST * 6);
10160   format %{
10161       "cmp $src1, $src2"
10162       "csetw $dst, ne"
10163       "cnegw $dst, lt"
10164   %}
10165   ins_encode %{
10166     int32_t con = (int32_t)$src2$$constant;
10167      if (con < 0) {
10168       __ adds(zr, $src1$$Register, -con);
10169     } else {
10170       __ subs(zr, $src1$$Register, con);
10171     }
10172     __ csetw($dst$$Register, Assembler::NE);
10173     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10174   %}
10175 
10176   ins_pipe(pipe_class_default);
10177 %}
10178 
10179 // ============================================================================
10180 // Conditional Move Instructions
10181 
10182 // n.b. we have identical rules for both a signed compare op (cmpOp)
10183 // and an unsigned compare op (cmpOpU). it would be nice if we could
10184 // define an op class which merged both inputs and use it to type the
10185 // argument to a single rule. unfortunatelyt his fails because the
10186 // opclass does not live up to the COND_INTER interface of its
10187 // component operands. When the generic code tries to negate the
10188 // operand it ends up running the generci Machoper::negate method
10189 // which throws a ShouldNotHappen. So, we have to provide two flavours
10190 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10191 
10192 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10193   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10194 
10195   ins_cost(INSN_COST * 2);
10196   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10197 
10198   ins_encode %{
10199     __ cselw(as_Register($dst$$reg),
10200              as_Register($src2$$reg),
10201              as_Register($src1$$reg),
10202              (Assembler::Condition)$cmp$$cmpcode);
10203   %}
10204 
10205   ins_pipe(icond_reg_reg);
10206 %}
10207 
10208 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10209   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10210 
10211   ins_cost(INSN_COST * 2);
10212   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10213 
10214   ins_encode %{
10215     __ cselw(as_Register($dst$$reg),
10216              as_Register($src2$$reg),
10217              as_Register($src1$$reg),
10218              (Assembler::Condition)$cmp$$cmpcode);
10219   %}
10220 
10221   ins_pipe(icond_reg_reg);
10222 %}
10223 
10224 // special cases where one arg is zero
10225 
10226 // n.b. this is selected in preference to the rule above because it
10227 // avoids loading constant 0 into a source register
10228 
10229 // TODO
10230 // we ought only to be able to cull one of these variants as the ideal
10231 // transforms ought always to order the zero consistently (to left/right?)
10232 
10233 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10234   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10235 
10236   ins_cost(INSN_COST * 2);
10237   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10238 
10239   ins_encode %{
10240     __ cselw(as_Register($dst$$reg),
10241              as_Register($src$$reg),
10242              zr,
10243              (Assembler::Condition)$cmp$$cmpcode);
10244   %}
10245 
10246   ins_pipe(icond_reg);
10247 %}
10248 
10249 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10250   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10251 
10252   ins_cost(INSN_COST * 2);
10253   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10254 
10255   ins_encode %{
10256     __ cselw(as_Register($dst$$reg),
10257              as_Register($src$$reg),
10258              zr,
10259              (Assembler::Condition)$cmp$$cmpcode);
10260   %}
10261 
10262   ins_pipe(icond_reg);
10263 %}
10264 
10265 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10266   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10267 
10268   ins_cost(INSN_COST * 2);
10269   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10270 
10271   ins_encode %{
10272     __ cselw(as_Register($dst$$reg),
10273              zr,
10274              as_Register($src$$reg),
10275              (Assembler::Condition)$cmp$$cmpcode);
10276   %}
10277 
10278   ins_pipe(icond_reg);
10279 %}
10280 
10281 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10282   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10283 
10284   ins_cost(INSN_COST * 2);
10285   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10286 
10287   ins_encode %{
10288     __ cselw(as_Register($dst$$reg),
10289              zr,
10290              as_Register($src$$reg),
10291              (Assembler::Condition)$cmp$$cmpcode);
10292   %}
10293 
10294   ins_pipe(icond_reg);
10295 %}
10296 
10297 // special case for creating a boolean 0 or 1
10298 
10299 // n.b. this is selected in preference to the rule above because it
10300 // avoids loading constants 0 and 1 into a source register
10301 
10302 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10303   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10304 
10305   ins_cost(INSN_COST * 2);
10306   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10307 
10308   ins_encode %{
10309     // equivalently
10310     // cset(as_Register($dst$$reg),
10311     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10312     __ csincw(as_Register($dst$$reg),
10313              zr,
10314              zr,
10315              (Assembler::Condition)$cmp$$cmpcode);
10316   %}
10317 
10318   ins_pipe(icond_none);
10319 %}
10320 
10321 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10322   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10323 
10324   ins_cost(INSN_COST * 2);
10325   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10326 
10327   ins_encode %{
10328     // equivalently
10329     // cset(as_Register($dst$$reg),
10330     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10331     __ csincw(as_Register($dst$$reg),
10332              zr,
10333              zr,
10334              (Assembler::Condition)$cmp$$cmpcode);
10335   %}
10336 
10337   ins_pipe(icond_none);
10338 %}
10339 
10340 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10341   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10342 
10343   ins_cost(INSN_COST * 2);
10344   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10345 
10346   ins_encode %{
10347     __ csel(as_Register($dst$$reg),
10348             as_Register($src2$$reg),
10349             as_Register($src1$$reg),
10350             (Assembler::Condition)$cmp$$cmpcode);
10351   %}
10352 
10353   ins_pipe(icond_reg_reg);
10354 %}
10355 
10356 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10357   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10358 
10359   ins_cost(INSN_COST * 2);
10360   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10361 
10362   ins_encode %{
10363     __ csel(as_Register($dst$$reg),
10364             as_Register($src2$$reg),
10365             as_Register($src1$$reg),
10366             (Assembler::Condition)$cmp$$cmpcode);
10367   %}
10368 
10369   ins_pipe(icond_reg_reg);
10370 %}
10371 
10372 // special cases where one arg is zero
10373 
10374 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10375   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10376 
10377   ins_cost(INSN_COST * 2);
10378   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10379 
10380   ins_encode %{
10381     __ csel(as_Register($dst$$reg),
10382             zr,
10383             as_Register($src$$reg),
10384             (Assembler::Condition)$cmp$$cmpcode);
10385   %}
10386 
10387   ins_pipe(icond_reg);
10388 %}
10389 
10390 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10391   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10392 
10393   ins_cost(INSN_COST * 2);
10394   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10395 
10396   ins_encode %{
10397     __ csel(as_Register($dst$$reg),
10398             zr,
10399             as_Register($src$$reg),
10400             (Assembler::Condition)$cmp$$cmpcode);
10401   %}
10402 
10403   ins_pipe(icond_reg);
10404 %}
10405 
10406 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10407   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10408 
10409   ins_cost(INSN_COST * 2);
10410   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10411 
10412   ins_encode %{
10413     __ csel(as_Register($dst$$reg),
10414             as_Register($src$$reg),
10415             zr,
10416             (Assembler::Condition)$cmp$$cmpcode);
10417   %}
10418 
10419   ins_pipe(icond_reg);
10420 %}
10421 
10422 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10423   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10424 
10425   ins_cost(INSN_COST * 2);
10426   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10427 
10428   ins_encode %{
10429     __ csel(as_Register($dst$$reg),
10430             as_Register($src$$reg),
10431             zr,
10432             (Assembler::Condition)$cmp$$cmpcode);
10433   %}
10434 
10435   ins_pipe(icond_reg);
10436 %}
10437 
10438 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10439   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10440 
10441   ins_cost(INSN_COST * 2);
10442   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10443 
10444   ins_encode %{
10445     __ csel(as_Register($dst$$reg),
10446             as_Register($src2$$reg),
10447             as_Register($src1$$reg),
10448             (Assembler::Condition)$cmp$$cmpcode);
10449   %}
10450 
10451   ins_pipe(icond_reg_reg);
10452 %}
10453 
10454 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10455   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10456 
10457   ins_cost(INSN_COST * 2);
10458   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10459 
10460   ins_encode %{
10461     __ csel(as_Register($dst$$reg),
10462             as_Register($src2$$reg),
10463             as_Register($src1$$reg),
10464             (Assembler::Condition)$cmp$$cmpcode);
10465   %}
10466 
10467   ins_pipe(icond_reg_reg);
10468 %}
10469 
10470 // special cases where one arg is zero
10471 
10472 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10473   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10474 
10475   ins_cost(INSN_COST * 2);
10476   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10477 
10478   ins_encode %{
10479     __ csel(as_Register($dst$$reg),
10480             zr,
10481             as_Register($src$$reg),
10482             (Assembler::Condition)$cmp$$cmpcode);
10483   %}
10484 
10485   ins_pipe(icond_reg);
10486 %}
10487 
10488 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10489   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10490 
10491   ins_cost(INSN_COST * 2);
10492   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10493 
10494   ins_encode %{
10495     __ csel(as_Register($dst$$reg),
10496             zr,
10497             as_Register($src$$reg),
10498             (Assembler::Condition)$cmp$$cmpcode);
10499   %}
10500 
10501   ins_pipe(icond_reg);
10502 %}
10503 
10504 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10505   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10506 
10507   ins_cost(INSN_COST * 2);
10508   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10509 
10510   ins_encode %{
10511     __ csel(as_Register($dst$$reg),
10512             as_Register($src$$reg),
10513             zr,
10514             (Assembler::Condition)$cmp$$cmpcode);
10515   %}
10516 
10517   ins_pipe(icond_reg);
10518 %}
10519 
10520 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10521   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10522 
10523   ins_cost(INSN_COST * 2);
10524   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10525 
10526   ins_encode %{
10527     __ csel(as_Register($dst$$reg),
10528             as_Register($src$$reg),
10529             zr,
10530             (Assembler::Condition)$cmp$$cmpcode);
10531   %}
10532 
10533   ins_pipe(icond_reg);
10534 %}
10535 
10536 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10537   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10538 
10539   ins_cost(INSN_COST * 2);
10540   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10541 
10542   ins_encode %{
10543     __ cselw(as_Register($dst$$reg),
10544              as_Register($src2$$reg),
10545              as_Register($src1$$reg),
10546              (Assembler::Condition)$cmp$$cmpcode);
10547   %}
10548 
10549   ins_pipe(icond_reg_reg);
10550 %}
10551 
10552 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10553   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10554 
10555   ins_cost(INSN_COST * 2);
10556   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10557 
10558   ins_encode %{
10559     __ cselw(as_Register($dst$$reg),
10560              as_Register($src2$$reg),
10561              as_Register($src1$$reg),
10562              (Assembler::Condition)$cmp$$cmpcode);
10563   %}
10564 
10565   ins_pipe(icond_reg_reg);
10566 %}
10567 
10568 // special cases where one arg is zero
10569 
10570 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10571   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10572 
10573   ins_cost(INSN_COST * 2);
10574   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10575 
10576   ins_encode %{
10577     __ cselw(as_Register($dst$$reg),
10578              zr,
10579              as_Register($src$$reg),
10580              (Assembler::Condition)$cmp$$cmpcode);
10581   %}
10582 
10583   ins_pipe(icond_reg);
10584 %}
10585 
10586 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10587   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10588 
10589   ins_cost(INSN_COST * 2);
10590   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10591 
10592   ins_encode %{
10593     __ cselw(as_Register($dst$$reg),
10594              zr,
10595              as_Register($src$$reg),
10596              (Assembler::Condition)$cmp$$cmpcode);
10597   %}
10598 
10599   ins_pipe(icond_reg);
10600 %}
10601 
10602 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10603   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10604 
10605   ins_cost(INSN_COST * 2);
10606   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10607 
10608   ins_encode %{
10609     __ cselw(as_Register($dst$$reg),
10610              as_Register($src$$reg),
10611              zr,
10612              (Assembler::Condition)$cmp$$cmpcode);
10613   %}
10614 
10615   ins_pipe(icond_reg);
10616 %}
10617 
10618 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10619   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10620 
10621   ins_cost(INSN_COST * 2);
10622   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10623 
10624   ins_encode %{
10625     __ cselw(as_Register($dst$$reg),
10626              as_Register($src$$reg),
10627              zr,
10628              (Assembler::Condition)$cmp$$cmpcode);
10629   %}
10630 
10631   ins_pipe(icond_reg);
10632 %}
10633 
10634 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10635 %{
10636   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10637 
10638   ins_cost(INSN_COST * 3);
10639 
10640   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10641   ins_encode %{
10642     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10643     __ fcsels(as_FloatRegister($dst$$reg),
10644               as_FloatRegister($src2$$reg),
10645               as_FloatRegister($src1$$reg),
10646               cond);
10647   %}
10648 
10649   ins_pipe(fp_cond_reg_reg_s);
10650 %}
10651 
10652 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10653 %{
10654   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10655 
10656   ins_cost(INSN_COST * 3);
10657 
10658   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10659   ins_encode %{
10660     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10661     __ fcsels(as_FloatRegister($dst$$reg),
10662               as_FloatRegister($src2$$reg),
10663               as_FloatRegister($src1$$reg),
10664               cond);
10665   %}
10666 
10667   ins_pipe(fp_cond_reg_reg_s);
10668 %}
10669 
10670 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10671 %{
10672   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10673 
10674   ins_cost(INSN_COST * 3);
10675 
10676   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10677   ins_encode %{
10678     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10679     __ fcseld(as_FloatRegister($dst$$reg),
10680               as_FloatRegister($src2$$reg),
10681               as_FloatRegister($src1$$reg),
10682               cond);
10683   %}
10684 
10685   ins_pipe(fp_cond_reg_reg_d);
10686 %}
10687 
10688 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10689 %{
10690   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10691 
10692   ins_cost(INSN_COST * 3);
10693 
10694   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10695   ins_encode %{
10696     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10697     __ fcseld(as_FloatRegister($dst$$reg),
10698               as_FloatRegister($src2$$reg),
10699               as_FloatRegister($src1$$reg),
10700               cond);
10701   %}
10702 
10703   ins_pipe(fp_cond_reg_reg_d);
10704 %}
10705 
10706 // ============================================================================
10707 // Arithmetic Instructions
10708 //
10709 
10710 // Integer Addition
10711 
10712 // TODO
10713 // these currently employ operations which do not set CR and hence are
10714 // not flagged as killing CR but we would like to isolate the cases
10715 // where we want to set flags from those where we don't. need to work
10716 // out how to do that.
10717 
10718 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10719   match(Set dst (AddI src1 src2));
10720 
10721   ins_cost(INSN_COST);
10722   format %{ "addw  $dst, $src1, $src2" %}
10723 
10724   ins_encode %{
10725     __ addw(as_Register($dst$$reg),
10726             as_Register($src1$$reg),
10727             as_Register($src2$$reg));
10728   %}
10729 
10730   ins_pipe(ialu_reg_reg);
10731 %}
10732 
10733 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10734   match(Set dst (AddI src1 src2));
10735 
10736   ins_cost(INSN_COST);
10737   format %{ "addw $dst, $src1, $src2" %}
10738 
10739   // use opcode to indicate that this is an add not a sub
10740   opcode(0x0);
10741 
10742   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10743 
10744   ins_pipe(ialu_reg_imm);
10745 %}
10746 
10747 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10748   match(Set dst (AddI (ConvL2I src1) src2));
10749 
10750   ins_cost(INSN_COST);
10751   format %{ "addw $dst, $src1, $src2" %}
10752 
10753   // use opcode to indicate that this is an add not a sub
10754   opcode(0x0);
10755 
10756   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10757 
10758   ins_pipe(ialu_reg_imm);
10759 %}
10760 
10761 // Pointer Addition
10762 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10763   match(Set dst (AddP src1 src2));
10764 
10765   ins_cost(INSN_COST);
10766   format %{ "add $dst, $src1, $src2\t# ptr" %}
10767 
10768   ins_encode %{
10769     __ add(as_Register($dst$$reg),
10770            as_Register($src1$$reg),
10771            as_Register($src2$$reg));
10772   %}
10773 
10774   ins_pipe(ialu_reg_reg);
10775 %}
10776 
10777 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10778   match(Set dst (AddP src1 (ConvI2L src2)));
10779 
10780   ins_cost(1.9 * INSN_COST);
10781   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10782 
10783   ins_encode %{
10784     __ add(as_Register($dst$$reg),
10785            as_Register($src1$$reg),
10786            as_Register($src2$$reg), ext::sxtw);
10787   %}
10788 
10789   ins_pipe(ialu_reg_reg);
10790 %}
10791 
10792 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10793   match(Set dst (AddP src1 (LShiftL src2 scale)));
10794 
10795   ins_cost(1.9 * INSN_COST);
10796   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10797 
10798   ins_encode %{
10799     __ lea(as_Register($dst$$reg),
10800            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10801                    Address::lsl($scale$$constant)));
10802   %}
10803 
10804   ins_pipe(ialu_reg_reg_shift);
10805 %}
10806 
10807 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10808   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10809 
10810   ins_cost(1.9 * INSN_COST);
10811   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10812 
10813   ins_encode %{
10814     __ lea(as_Register($dst$$reg),
10815            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10816                    Address::sxtw($scale$$constant)));
10817   %}
10818 
10819   ins_pipe(ialu_reg_reg_shift);
10820 %}
10821 
10822 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10823   match(Set dst (LShiftL (ConvI2L src) scale));
10824 
10825   ins_cost(INSN_COST);
10826   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10827 
10828   ins_encode %{
10829     __ sbfiz(as_Register($dst$$reg),
10830           as_Register($src$$reg),
10831           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10832   %}
10833 
10834   ins_pipe(ialu_reg_shift);
10835 %}
10836 
10837 // Pointer Immediate Addition
10838 // n.b. this needs to be more expensive than using an indirect memory
10839 // operand
10840 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10841   match(Set dst (AddP src1 src2));
10842 
10843   ins_cost(INSN_COST);
10844   format %{ "add $dst, $src1, $src2\t# ptr" %}
10845 
10846   // use opcode to indicate that this is an add not a sub
10847   opcode(0x0);
10848 
10849   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10850 
10851   ins_pipe(ialu_reg_imm);
10852 %}
10853 
10854 // Long Addition
10855 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10856 
10857   match(Set dst (AddL src1 src2));
10858 
10859   ins_cost(INSN_COST);
10860   format %{ "add  $dst, $src1, $src2" %}
10861 
10862   ins_encode %{
10863     __ add(as_Register($dst$$reg),
10864            as_Register($src1$$reg),
10865            as_Register($src2$$reg));
10866   %}
10867 
10868   ins_pipe(ialu_reg_reg);
10869 %}
10870 
10871 // No constant pool entries requiredLong Immediate Addition.
10872 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10873   match(Set dst (AddL src1 src2));
10874 
10875   ins_cost(INSN_COST);
10876   format %{ "add $dst, $src1, $src2" %}
10877 
10878   // use opcode to indicate that this is an add not a sub
10879   opcode(0x0);
10880 
10881   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10882 
10883   ins_pipe(ialu_reg_imm);
10884 %}
10885 
10886 // Integer Subtraction
10887 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10888   match(Set dst (SubI src1 src2));
10889 
10890   ins_cost(INSN_COST);
10891   format %{ "subw  $dst, $src1, $src2" %}
10892 
10893   ins_encode %{
10894     __ subw(as_Register($dst$$reg),
10895             as_Register($src1$$reg),
10896             as_Register($src2$$reg));
10897   %}
10898 
10899   ins_pipe(ialu_reg_reg);
10900 %}
10901 
10902 // Immediate Subtraction
10903 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10904   match(Set dst (SubI src1 src2));
10905 
10906   ins_cost(INSN_COST);
10907   format %{ "subw $dst, $src1, $src2" %}
10908 
10909   // use opcode to indicate that this is a sub not an add
10910   opcode(0x1);
10911 
10912   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10913 
10914   ins_pipe(ialu_reg_imm);
10915 %}
10916 
10917 // Long Subtraction
10918 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10919 
10920   match(Set dst (SubL src1 src2));
10921 
10922   ins_cost(INSN_COST);
10923   format %{ "sub  $dst, $src1, $src2" %}
10924 
10925   ins_encode %{
10926     __ sub(as_Register($dst$$reg),
10927            as_Register($src1$$reg),
10928            as_Register($src2$$reg));
10929   %}
10930 
10931   ins_pipe(ialu_reg_reg);
10932 %}
10933 
10934 // No constant pool entries requiredLong Immediate Subtraction.
10935 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10936   match(Set dst (SubL src1 src2));
10937 
10938   ins_cost(INSN_COST);
10939   format %{ "sub$dst, $src1, $src2" %}
10940 
10941   // use opcode to indicate that this is a sub not an add
10942   opcode(0x1);
10943 
10944   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10945 
10946   ins_pipe(ialu_reg_imm);
10947 %}
10948 
10949 // Integer Negation (special case for sub)
10950 
10951 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10952   match(Set dst (SubI zero src));
10953 
10954   ins_cost(INSN_COST);
10955   format %{ "negw $dst, $src\t# int" %}
10956 
10957   ins_encode %{
10958     __ negw(as_Register($dst$$reg),
10959             as_Register($src$$reg));
10960   %}
10961 
10962   ins_pipe(ialu_reg);
10963 %}
10964 
10965 // Long Negation
10966 
10967 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10968   match(Set dst (SubL zero src));
10969 
10970   ins_cost(INSN_COST);
10971   format %{ "neg $dst, $src\t# long" %}
10972 
10973   ins_encode %{
10974     __ neg(as_Register($dst$$reg),
10975            as_Register($src$$reg));
10976   %}
10977 
10978   ins_pipe(ialu_reg);
10979 %}
10980 
10981 // Integer Multiply
10982 
10983 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10984   match(Set dst (MulI src1 src2));
10985 
10986   ins_cost(INSN_COST * 3);
10987   format %{ "mulw  $dst, $src1, $src2" %}
10988 
10989   ins_encode %{
10990     __ mulw(as_Register($dst$$reg),
10991             as_Register($src1$$reg),
10992             as_Register($src2$$reg));
10993   %}
10994 
10995   ins_pipe(imul_reg_reg);
10996 %}
10997 
10998 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10999   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
11000 
11001   ins_cost(INSN_COST * 3);
11002   format %{ "smull  $dst, $src1, $src2" %}
11003 
11004   ins_encode %{
11005     __ smull(as_Register($dst$$reg),
11006              as_Register($src1$$reg),
11007              as_Register($src2$$reg));
11008   %}
11009 
11010   ins_pipe(imul_reg_reg);
11011 %}
11012 
11013 // Long Multiply
11014 
11015 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11016   match(Set dst (MulL src1 src2));
11017 
11018   ins_cost(INSN_COST * 5);
11019   format %{ "mul  $dst, $src1, $src2" %}
11020 
11021   ins_encode %{
11022     __ mul(as_Register($dst$$reg),
11023            as_Register($src1$$reg),
11024            as_Register($src2$$reg));
11025   %}
11026 
11027   ins_pipe(lmul_reg_reg);
11028 %}
11029 
11030 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11031 %{
11032   match(Set dst (MulHiL src1 src2));
11033 
11034   ins_cost(INSN_COST * 7);
11035   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11036 
11037   ins_encode %{
11038     __ smulh(as_Register($dst$$reg),
11039              as_Register($src1$$reg),
11040              as_Register($src2$$reg));
11041   %}
11042 
11043   ins_pipe(lmul_reg_reg);
11044 %}
11045 
11046 // Combined Integer Multiply & Add/Sub
11047 
11048 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11049   match(Set dst (AddI src3 (MulI src1 src2)));
11050 
11051   ins_cost(INSN_COST * 3);
11052   format %{ "madd  $dst, $src1, $src2, $src3" %}
11053 
11054   ins_encode %{
11055     __ maddw(as_Register($dst$$reg),
11056              as_Register($src1$$reg),
11057              as_Register($src2$$reg),
11058              as_Register($src3$$reg));
11059   %}
11060 
11061   ins_pipe(imac_reg_reg);
11062 %}
11063 
11064 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11065   match(Set dst (SubI src3 (MulI src1 src2)));
11066 
11067   ins_cost(INSN_COST * 3);
11068   format %{ "msub  $dst, $src1, $src2, $src3" %}
11069 
11070   ins_encode %{
11071     __ msubw(as_Register($dst$$reg),
11072              as_Register($src1$$reg),
11073              as_Register($src2$$reg),
11074              as_Register($src3$$reg));
11075   %}
11076 
11077   ins_pipe(imac_reg_reg);
11078 %}
11079 
11080 // Combined Long Multiply & Add/Sub
11081 
11082 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11083   match(Set dst (AddL src3 (MulL src1 src2)));
11084 
11085   ins_cost(INSN_COST * 5);
11086   format %{ "madd  $dst, $src1, $src2, $src3" %}
11087 
11088   ins_encode %{
11089     __ madd(as_Register($dst$$reg),
11090             as_Register($src1$$reg),
11091             as_Register($src2$$reg),
11092             as_Register($src3$$reg));
11093   %}
11094 
11095   ins_pipe(lmac_reg_reg);
11096 %}
11097 
11098 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11099   match(Set dst (SubL src3 (MulL src1 src2)));
11100 
11101   ins_cost(INSN_COST * 5);
11102   format %{ "msub  $dst, $src1, $src2, $src3" %}
11103 
11104   ins_encode %{
11105     __ msub(as_Register($dst$$reg),
11106             as_Register($src1$$reg),
11107             as_Register($src2$$reg),
11108             as_Register($src3$$reg));
11109   %}
11110 
11111   ins_pipe(lmac_reg_reg);
11112 %}
11113 
11114 // Integer Divide
11115 
11116 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11117   match(Set dst (DivI src1 src2));
11118 
11119   ins_cost(INSN_COST * 19);
11120   format %{ "sdivw  $dst, $src1, $src2" %}
11121 
11122   ins_encode(aarch64_enc_divw(dst, src1, src2));
11123   ins_pipe(idiv_reg_reg);
11124 %}
11125 
11126 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11127   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11128   ins_cost(INSN_COST);
11129   format %{ "lsrw $dst, $src1, $div1" %}
11130   ins_encode %{
11131     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11132   %}
11133   ins_pipe(ialu_reg_shift);
11134 %}
11135 
11136 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11137   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11138   ins_cost(INSN_COST);
11139   format %{ "addw $dst, $src, LSR $div1" %}
11140 
11141   ins_encode %{
11142     __ addw(as_Register($dst$$reg),
11143               as_Register($src$$reg),
11144               as_Register($src$$reg),
11145               Assembler::LSR, 31);
11146   %}
11147   ins_pipe(ialu_reg);
11148 %}
11149 
11150 // Long Divide
11151 
11152 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11153   match(Set dst (DivL src1 src2));
11154 
11155   ins_cost(INSN_COST * 35);
11156   format %{ "sdiv   $dst, $src1, $src2" %}
11157 
11158   ins_encode(aarch64_enc_div(dst, src1, src2));
11159   ins_pipe(ldiv_reg_reg);
11160 %}
11161 
11162 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11163   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11164   ins_cost(INSN_COST);
11165   format %{ "lsr $dst, $src1, $div1" %}
11166   ins_encode %{
11167     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11168   %}
11169   ins_pipe(ialu_reg_shift);
11170 %}
11171 
11172 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11173   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11174   ins_cost(INSN_COST);
11175   format %{ "add $dst, $src, $div1" %}
11176 
11177   ins_encode %{
11178     __ add(as_Register($dst$$reg),
11179               as_Register($src$$reg),
11180               as_Register($src$$reg),
11181               Assembler::LSR, 63);
11182   %}
11183   ins_pipe(ialu_reg);
11184 %}
11185 
11186 // Integer Remainder
11187 
11188 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11189   match(Set dst (ModI src1 src2));
11190 
11191   ins_cost(INSN_COST * 22);
11192   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11193             "msubw($dst, rscratch1, $src2, $src1" %}
11194 
11195   ins_encode(aarch64_enc_modw(dst, src1, src2));
11196   ins_pipe(idiv_reg_reg);
11197 %}
11198 
11199 // Long Remainder
11200 
11201 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11202   match(Set dst (ModL src1 src2));
11203 
11204   ins_cost(INSN_COST * 38);
11205   format %{ "sdiv   rscratch1, $src1, $src2\n"
11206             "msub($dst, rscratch1, $src2, $src1" %}
11207 
11208   ins_encode(aarch64_enc_mod(dst, src1, src2));
11209   ins_pipe(ldiv_reg_reg);
11210 %}
11211 
11212 // Integer Shifts
11213 
11214 // Shift Left Register
11215 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11216   match(Set dst (LShiftI src1 src2));
11217 
11218   ins_cost(INSN_COST * 2);
11219   format %{ "lslvw  $dst, $src1, $src2" %}
11220 
11221   ins_encode %{
11222     __ lslvw(as_Register($dst$$reg),
11223              as_Register($src1$$reg),
11224              as_Register($src2$$reg));
11225   %}
11226 
11227   ins_pipe(ialu_reg_reg_vshift);
11228 %}
11229 
11230 // Shift Left Immediate
11231 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11232   match(Set dst (LShiftI src1 src2));
11233 
11234   ins_cost(INSN_COST);
11235   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11236 
11237   ins_encode %{
11238     __ lslw(as_Register($dst$$reg),
11239             as_Register($src1$$reg),
11240             $src2$$constant & 0x1f);
11241   %}
11242 
11243   ins_pipe(ialu_reg_shift);
11244 %}
11245 
11246 // Shift Right Logical Register
11247 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11248   match(Set dst (URShiftI src1 src2));
11249 
11250   ins_cost(INSN_COST * 2);
11251   format %{ "lsrvw  $dst, $src1, $src2" %}
11252 
11253   ins_encode %{
11254     __ lsrvw(as_Register($dst$$reg),
11255              as_Register($src1$$reg),
11256              as_Register($src2$$reg));
11257   %}
11258 
11259   ins_pipe(ialu_reg_reg_vshift);
11260 %}
11261 
11262 // Shift Right Logical Immediate
11263 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11264   match(Set dst (URShiftI src1 src2));
11265 
11266   ins_cost(INSN_COST);
11267   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11268 
11269   ins_encode %{
11270     __ lsrw(as_Register($dst$$reg),
11271             as_Register($src1$$reg),
11272             $src2$$constant & 0x1f);
11273   %}
11274 
11275   ins_pipe(ialu_reg_shift);
11276 %}
11277 
11278 // Shift Right Arithmetic Register
11279 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11280   match(Set dst (RShiftI src1 src2));
11281 
11282   ins_cost(INSN_COST * 2);
11283   format %{ "asrvw  $dst, $src1, $src2" %}
11284 
11285   ins_encode %{
11286     __ asrvw(as_Register($dst$$reg),
11287              as_Register($src1$$reg),
11288              as_Register($src2$$reg));
11289   %}
11290 
11291   ins_pipe(ialu_reg_reg_vshift);
11292 %}
11293 
11294 // Shift Right Arithmetic Immediate
11295 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11296   match(Set dst (RShiftI src1 src2));
11297 
11298   ins_cost(INSN_COST);
11299   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11300 
11301   ins_encode %{
11302     __ asrw(as_Register($dst$$reg),
11303             as_Register($src1$$reg),
11304             $src2$$constant & 0x1f);
11305   %}
11306 
11307   ins_pipe(ialu_reg_shift);
11308 %}
11309 
11310 // Combined Int Mask and Right Shift (using UBFM)
11311 // TODO
11312 
11313 // Long Shifts
11314 
11315 // Shift Left Register
11316 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11317   match(Set dst (LShiftL src1 src2));
11318 
11319   ins_cost(INSN_COST * 2);
11320   format %{ "lslv  $dst, $src1, $src2" %}
11321 
11322   ins_encode %{
11323     __ lslv(as_Register($dst$$reg),
11324             as_Register($src1$$reg),
11325             as_Register($src2$$reg));
11326   %}
11327 
11328   ins_pipe(ialu_reg_reg_vshift);
11329 %}
11330 
11331 // Shift Left Immediate
11332 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11333   match(Set dst (LShiftL src1 src2));
11334 
11335   ins_cost(INSN_COST);
11336   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11337 
11338   ins_encode %{
11339     __ lsl(as_Register($dst$$reg),
11340             as_Register($src1$$reg),
11341             $src2$$constant & 0x3f);
11342   %}
11343 
11344   ins_pipe(ialu_reg_shift);
11345 %}
11346 
11347 // Shift Right Logical Register
11348 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11349   match(Set dst (URShiftL src1 src2));
11350 
11351   ins_cost(INSN_COST * 2);
11352   format %{ "lsrv  $dst, $src1, $src2" %}
11353 
11354   ins_encode %{
11355     __ lsrv(as_Register($dst$$reg),
11356             as_Register($src1$$reg),
11357             as_Register($src2$$reg));
11358   %}
11359 
11360   ins_pipe(ialu_reg_reg_vshift);
11361 %}
11362 
11363 // Shift Right Logical Immediate
11364 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11365   match(Set dst (URShiftL src1 src2));
11366 
11367   ins_cost(INSN_COST);
11368   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11369 
11370   ins_encode %{
11371     __ lsr(as_Register($dst$$reg),
11372            as_Register($src1$$reg),
11373            $src2$$constant & 0x3f);
11374   %}
11375 
11376   ins_pipe(ialu_reg_shift);
11377 %}
11378 
11379 // A special-case pattern for card table stores.
11380 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11381   match(Set dst (URShiftL (CastP2X src1) src2));
11382 
11383   ins_cost(INSN_COST);
11384   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11385 
11386   ins_encode %{
11387     __ lsr(as_Register($dst$$reg),
11388            as_Register($src1$$reg),
11389            $src2$$constant & 0x3f);
11390   %}
11391 
11392   ins_pipe(ialu_reg_shift);
11393 %}
11394 
11395 // Shift Right Arithmetic Register
11396 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11397   match(Set dst (RShiftL src1 src2));
11398 
11399   ins_cost(INSN_COST * 2);
11400   format %{ "asrv  $dst, $src1, $src2" %}
11401 
11402   ins_encode %{
11403     __ asrv(as_Register($dst$$reg),
11404             as_Register($src1$$reg),
11405             as_Register($src2$$reg));
11406   %}
11407 
11408   ins_pipe(ialu_reg_reg_vshift);
11409 %}
11410 
11411 // Shift Right Arithmetic Immediate
11412 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11413   match(Set dst (RShiftL src1 src2));
11414 
11415   ins_cost(INSN_COST);
11416   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11417 
11418   ins_encode %{
11419     __ asr(as_Register($dst$$reg),
11420            as_Register($src1$$reg),
11421            $src2$$constant & 0x3f);
11422   %}
11423 
11424   ins_pipe(ialu_reg_shift);
11425 %}
11426 
11427 // BEGIN This section of the file is automatically generated. Do not edit --------------
11428 
11429 instruct regL_not_reg(iRegLNoSp dst,
11430                          iRegL src1, immL_M1 m1,
11431                          rFlagsReg cr) %{
11432   match(Set dst (XorL src1 m1));
11433   ins_cost(INSN_COST);
11434   format %{ "eon  $dst, $src1, zr" %}
11435 
11436   ins_encode %{
11437     __ eon(as_Register($dst$$reg),
11438               as_Register($src1$$reg),
11439               zr,
11440               Assembler::LSL, 0);
11441   %}
11442 
11443   ins_pipe(ialu_reg);
11444 %}
11445 instruct regI_not_reg(iRegINoSp dst,
11446                          iRegIorL2I src1, immI_M1 m1,
11447                          rFlagsReg cr) %{
11448   match(Set dst (XorI src1 m1));
11449   ins_cost(INSN_COST);
11450   format %{ "eonw  $dst, $src1, zr" %}
11451 
11452   ins_encode %{
11453     __ eonw(as_Register($dst$$reg),
11454               as_Register($src1$$reg),
11455               zr,
11456               Assembler::LSL, 0);
11457   %}
11458 
11459   ins_pipe(ialu_reg);
11460 %}
11461 
11462 instruct AndI_reg_not_reg(iRegINoSp dst,
11463                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11464                          rFlagsReg cr) %{
11465   match(Set dst (AndI src1 (XorI src2 m1)));
11466   ins_cost(INSN_COST);
11467   format %{ "bicw  $dst, $src1, $src2" %}
11468 
11469   ins_encode %{
11470     __ bicw(as_Register($dst$$reg),
11471               as_Register($src1$$reg),
11472               as_Register($src2$$reg),
11473               Assembler::LSL, 0);
11474   %}
11475 
11476   ins_pipe(ialu_reg_reg);
11477 %}
11478 
11479 instruct AndL_reg_not_reg(iRegLNoSp dst,
11480                          iRegL src1, iRegL src2, immL_M1 m1,
11481                          rFlagsReg cr) %{
11482   match(Set dst (AndL src1 (XorL src2 m1)));
11483   ins_cost(INSN_COST);
11484   format %{ "bic  $dst, $src1, $src2" %}
11485 
11486   ins_encode %{
11487     __ bic(as_Register($dst$$reg),
11488               as_Register($src1$$reg),
11489               as_Register($src2$$reg),
11490               Assembler::LSL, 0);
11491   %}
11492 
11493   ins_pipe(ialu_reg_reg);
11494 %}
11495 
11496 instruct OrI_reg_not_reg(iRegINoSp dst,
11497                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11498                          rFlagsReg cr) %{
11499   match(Set dst (OrI src1 (XorI src2 m1)));
11500   ins_cost(INSN_COST);
11501   format %{ "ornw  $dst, $src1, $src2" %}
11502 
11503   ins_encode %{
11504     __ ornw(as_Register($dst$$reg),
11505               as_Register($src1$$reg),
11506               as_Register($src2$$reg),
11507               Assembler::LSL, 0);
11508   %}
11509 
11510   ins_pipe(ialu_reg_reg);
11511 %}
11512 
11513 instruct OrL_reg_not_reg(iRegLNoSp dst,
11514                          iRegL src1, iRegL src2, immL_M1 m1,
11515                          rFlagsReg cr) %{
11516   match(Set dst (OrL src1 (XorL src2 m1)));
11517   ins_cost(INSN_COST);
11518   format %{ "orn  $dst, $src1, $src2" %}
11519 
11520   ins_encode %{
11521     __ orn(as_Register($dst$$reg),
11522               as_Register($src1$$reg),
11523               as_Register($src2$$reg),
11524               Assembler::LSL, 0);
11525   %}
11526 
11527   ins_pipe(ialu_reg_reg);
11528 %}
11529 
11530 instruct XorI_reg_not_reg(iRegINoSp dst,
11531                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11532                          rFlagsReg cr) %{
11533   match(Set dst (XorI m1 (XorI src2 src1)));
11534   ins_cost(INSN_COST);
11535   format %{ "eonw  $dst, $src1, $src2" %}
11536 
11537   ins_encode %{
11538     __ eonw(as_Register($dst$$reg),
11539               as_Register($src1$$reg),
11540               as_Register($src2$$reg),
11541               Assembler::LSL, 0);
11542   %}
11543 
11544   ins_pipe(ialu_reg_reg);
11545 %}
11546 
11547 instruct XorL_reg_not_reg(iRegLNoSp dst,
11548                          iRegL src1, iRegL src2, immL_M1 m1,
11549                          rFlagsReg cr) %{
11550   match(Set dst (XorL m1 (XorL src2 src1)));
11551   ins_cost(INSN_COST);
11552   format %{ "eon  $dst, $src1, $src2" %}
11553 
11554   ins_encode %{
11555     __ eon(as_Register($dst$$reg),
11556               as_Register($src1$$reg),
11557               as_Register($src2$$reg),
11558               Assembler::LSL, 0);
11559   %}
11560 
11561   ins_pipe(ialu_reg_reg);
11562 %}
11563 
11564 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11565                          iRegIorL2I src1, iRegIorL2I src2,
11566                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11567   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11568   ins_cost(1.9 * INSN_COST);
11569   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11570 
11571   ins_encode %{
11572     __ bicw(as_Register($dst$$reg),
11573               as_Register($src1$$reg),
11574               as_Register($src2$$reg),
11575               Assembler::LSR,
11576               $src3$$constant & 0x1f);
11577   %}
11578 
11579   ins_pipe(ialu_reg_reg_shift);
11580 %}
11581 
11582 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11583                          iRegL src1, iRegL src2,
11584                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11585   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11586   ins_cost(1.9 * INSN_COST);
11587   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11588 
11589   ins_encode %{
11590     __ bic(as_Register($dst$$reg),
11591               as_Register($src1$$reg),
11592               as_Register($src2$$reg),
11593               Assembler::LSR,
11594               $src3$$constant & 0x3f);
11595   %}
11596 
11597   ins_pipe(ialu_reg_reg_shift);
11598 %}
11599 
11600 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11601                          iRegIorL2I src1, iRegIorL2I src2,
11602                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11603   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11604   ins_cost(1.9 * INSN_COST);
11605   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11606 
11607   ins_encode %{
11608     __ bicw(as_Register($dst$$reg),
11609               as_Register($src1$$reg),
11610               as_Register($src2$$reg),
11611               Assembler::ASR,
11612               $src3$$constant & 0x1f);
11613   %}
11614 
11615   ins_pipe(ialu_reg_reg_shift);
11616 %}
11617 
11618 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11619                          iRegL src1, iRegL src2,
11620                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11621   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11622   ins_cost(1.9 * INSN_COST);
11623   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11624 
11625   ins_encode %{
11626     __ bic(as_Register($dst$$reg),
11627               as_Register($src1$$reg),
11628               as_Register($src2$$reg),
11629               Assembler::ASR,
11630               $src3$$constant & 0x3f);
11631   %}
11632 
11633   ins_pipe(ialu_reg_reg_shift);
11634 %}
11635 
11636 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11637                          iRegIorL2I src1, iRegIorL2I src2,
11638                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11639   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11640   ins_cost(1.9 * INSN_COST);
11641   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11642 
11643   ins_encode %{
11644     __ bicw(as_Register($dst$$reg),
11645               as_Register($src1$$reg),
11646               as_Register($src2$$reg),
11647               Assembler::LSL,
11648               $src3$$constant & 0x1f);
11649   %}
11650 
11651   ins_pipe(ialu_reg_reg_shift);
11652 %}
11653 
11654 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11655                          iRegL src1, iRegL src2,
11656                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11657   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11658   ins_cost(1.9 * INSN_COST);
11659   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11660 
11661   ins_encode %{
11662     __ bic(as_Register($dst$$reg),
11663               as_Register($src1$$reg),
11664               as_Register($src2$$reg),
11665               Assembler::LSL,
11666               $src3$$constant & 0x3f);
11667   %}
11668 
11669   ins_pipe(ialu_reg_reg_shift);
11670 %}
11671 
11672 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11673                          iRegIorL2I src1, iRegIorL2I src2,
11674                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11675   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11676   ins_cost(1.9 * INSN_COST);
11677   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11678 
11679   ins_encode %{
11680     __ eonw(as_Register($dst$$reg),
11681               as_Register($src1$$reg),
11682               as_Register($src2$$reg),
11683               Assembler::LSR,
11684               $src3$$constant & 0x1f);
11685   %}
11686 
11687   ins_pipe(ialu_reg_reg_shift);
11688 %}
11689 
11690 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11691                          iRegL src1, iRegL src2,
11692                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11693   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11694   ins_cost(1.9 * INSN_COST);
11695   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11696 
11697   ins_encode %{
11698     __ eon(as_Register($dst$$reg),
11699               as_Register($src1$$reg),
11700               as_Register($src2$$reg),
11701               Assembler::LSR,
11702               $src3$$constant & 0x3f);
11703   %}
11704 
11705   ins_pipe(ialu_reg_reg_shift);
11706 %}
11707 
11708 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11709                          iRegIorL2I src1, iRegIorL2I src2,
11710                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11711   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11712   ins_cost(1.9 * INSN_COST);
11713   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11714 
11715   ins_encode %{
11716     __ eonw(as_Register($dst$$reg),
11717               as_Register($src1$$reg),
11718               as_Register($src2$$reg),
11719               Assembler::ASR,
11720               $src3$$constant & 0x1f);
11721   %}
11722 
11723   ins_pipe(ialu_reg_reg_shift);
11724 %}
11725 
11726 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11727                          iRegL src1, iRegL src2,
11728                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11729   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11730   ins_cost(1.9 * INSN_COST);
11731   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11732 
11733   ins_encode %{
11734     __ eon(as_Register($dst$$reg),
11735               as_Register($src1$$reg),
11736               as_Register($src2$$reg),
11737               Assembler::ASR,
11738               $src3$$constant & 0x3f);
11739   %}
11740 
11741   ins_pipe(ialu_reg_reg_shift);
11742 %}
11743 
11744 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11745                          iRegIorL2I src1, iRegIorL2I src2,
11746                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11747   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11748   ins_cost(1.9 * INSN_COST);
11749   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11750 
11751   ins_encode %{
11752     __ eonw(as_Register($dst$$reg),
11753               as_Register($src1$$reg),
11754               as_Register($src2$$reg),
11755               Assembler::LSL,
11756               $src3$$constant & 0x1f);
11757   %}
11758 
11759   ins_pipe(ialu_reg_reg_shift);
11760 %}
11761 
11762 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11763                          iRegL src1, iRegL src2,
11764                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11765   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11766   ins_cost(1.9 * INSN_COST);
11767   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11768 
11769   ins_encode %{
11770     __ eon(as_Register($dst$$reg),
11771               as_Register($src1$$reg),
11772               as_Register($src2$$reg),
11773               Assembler::LSL,
11774               $src3$$constant & 0x3f);
11775   %}
11776 
11777   ins_pipe(ialu_reg_reg_shift);
11778 %}
11779 
11780 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11781                          iRegIorL2I src1, iRegIorL2I src2,
11782                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11783   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11784   ins_cost(1.9 * INSN_COST);
11785   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11786 
11787   ins_encode %{
11788     __ ornw(as_Register($dst$$reg),
11789               as_Register($src1$$reg),
11790               as_Register($src2$$reg),
11791               Assembler::LSR,
11792               $src3$$constant & 0x1f);
11793   %}
11794 
11795   ins_pipe(ialu_reg_reg_shift);
11796 %}
11797 
11798 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11799                          iRegL src1, iRegL src2,
11800                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11801   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11802   ins_cost(1.9 * INSN_COST);
11803   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11804 
11805   ins_encode %{
11806     __ orn(as_Register($dst$$reg),
11807               as_Register($src1$$reg),
11808               as_Register($src2$$reg),
11809               Assembler::LSR,
11810               $src3$$constant & 0x3f);
11811   %}
11812 
11813   ins_pipe(ialu_reg_reg_shift);
11814 %}
11815 
11816 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11817                          iRegIorL2I src1, iRegIorL2I src2,
11818                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11819   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11820   ins_cost(1.9 * INSN_COST);
11821   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11822 
11823   ins_encode %{
11824     __ ornw(as_Register($dst$$reg),
11825               as_Register($src1$$reg),
11826               as_Register($src2$$reg),
11827               Assembler::ASR,
11828               $src3$$constant & 0x1f);
11829   %}
11830 
11831   ins_pipe(ialu_reg_reg_shift);
11832 %}
11833 
11834 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11835                          iRegL src1, iRegL src2,
11836                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11837   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11838   ins_cost(1.9 * INSN_COST);
11839   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11840 
11841   ins_encode %{
11842     __ orn(as_Register($dst$$reg),
11843               as_Register($src1$$reg),
11844               as_Register($src2$$reg),
11845               Assembler::ASR,
11846               $src3$$constant & 0x3f);
11847   %}
11848 
11849   ins_pipe(ialu_reg_reg_shift);
11850 %}
11851 
11852 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11853                          iRegIorL2I src1, iRegIorL2I src2,
11854                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11855   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11856   ins_cost(1.9 * INSN_COST);
11857   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11858 
11859   ins_encode %{
11860     __ ornw(as_Register($dst$$reg),
11861               as_Register($src1$$reg),
11862               as_Register($src2$$reg),
11863               Assembler::LSL,
11864               $src3$$constant & 0x1f);
11865   %}
11866 
11867   ins_pipe(ialu_reg_reg_shift);
11868 %}
11869 
11870 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11871                          iRegL src1, iRegL src2,
11872                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11873   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11874   ins_cost(1.9 * INSN_COST);
11875   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11876 
11877   ins_encode %{
11878     __ orn(as_Register($dst$$reg),
11879               as_Register($src1$$reg),
11880               as_Register($src2$$reg),
11881               Assembler::LSL,
11882               $src3$$constant & 0x3f);
11883   %}
11884 
11885   ins_pipe(ialu_reg_reg_shift);
11886 %}
11887 
11888 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11889                          iRegIorL2I src1, iRegIorL2I src2,
11890                          immI src3, rFlagsReg cr) %{
11891   match(Set dst (AndI src1 (URShiftI src2 src3)));
11892 
11893   ins_cost(1.9 * INSN_COST);
11894   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11895 
11896   ins_encode %{
11897     __ andw(as_Register($dst$$reg),
11898               as_Register($src1$$reg),
11899               as_Register($src2$$reg),
11900               Assembler::LSR,
11901               $src3$$constant & 0x1f);
11902   %}
11903 
11904   ins_pipe(ialu_reg_reg_shift);
11905 %}
11906 
11907 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11908                          iRegL src1, iRegL src2,
11909                          immI src3, rFlagsReg cr) %{
11910   match(Set dst (AndL src1 (URShiftL src2 src3)));
11911 
11912   ins_cost(1.9 * INSN_COST);
11913   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11914 
11915   ins_encode %{
11916     __ andr(as_Register($dst$$reg),
11917               as_Register($src1$$reg),
11918               as_Register($src2$$reg),
11919               Assembler::LSR,
11920               $src3$$constant & 0x3f);
11921   %}
11922 
11923   ins_pipe(ialu_reg_reg_shift);
11924 %}
11925 
11926 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11927                          iRegIorL2I src1, iRegIorL2I src2,
11928                          immI src3, rFlagsReg cr) %{
11929   match(Set dst (AndI src1 (RShiftI src2 src3)));
11930 
11931   ins_cost(1.9 * INSN_COST);
11932   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11933 
11934   ins_encode %{
11935     __ andw(as_Register($dst$$reg),
11936               as_Register($src1$$reg),
11937               as_Register($src2$$reg),
11938               Assembler::ASR,
11939               $src3$$constant & 0x1f);
11940   %}
11941 
11942   ins_pipe(ialu_reg_reg_shift);
11943 %}
11944 
11945 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11946                          iRegL src1, iRegL src2,
11947                          immI src3, rFlagsReg cr) %{
11948   match(Set dst (AndL src1 (RShiftL src2 src3)));
11949 
11950   ins_cost(1.9 * INSN_COST);
11951   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11952 
11953   ins_encode %{
11954     __ andr(as_Register($dst$$reg),
11955               as_Register($src1$$reg),
11956               as_Register($src2$$reg),
11957               Assembler::ASR,
11958               $src3$$constant & 0x3f);
11959   %}
11960 
11961   ins_pipe(ialu_reg_reg_shift);
11962 %}
11963 
11964 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11965                          iRegIorL2I src1, iRegIorL2I src2,
11966                          immI src3, rFlagsReg cr) %{
11967   match(Set dst (AndI src1 (LShiftI src2 src3)));
11968 
11969   ins_cost(1.9 * INSN_COST);
11970   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11971 
11972   ins_encode %{
11973     __ andw(as_Register($dst$$reg),
11974               as_Register($src1$$reg),
11975               as_Register($src2$$reg),
11976               Assembler::LSL,
11977               $src3$$constant & 0x1f);
11978   %}
11979 
11980   ins_pipe(ialu_reg_reg_shift);
11981 %}
11982 
11983 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11984                          iRegL src1, iRegL src2,
11985                          immI src3, rFlagsReg cr) %{
11986   match(Set dst (AndL src1 (LShiftL src2 src3)));
11987 
11988   ins_cost(1.9 * INSN_COST);
11989   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11990 
11991   ins_encode %{
11992     __ andr(as_Register($dst$$reg),
11993               as_Register($src1$$reg),
11994               as_Register($src2$$reg),
11995               Assembler::LSL,
11996               $src3$$constant & 0x3f);
11997   %}
11998 
11999   ins_pipe(ialu_reg_reg_shift);
12000 %}
12001 
12002 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12003                          iRegIorL2I src1, iRegIorL2I src2,
12004                          immI src3, rFlagsReg cr) %{
12005   match(Set dst (XorI src1 (URShiftI src2 src3)));
12006 
12007   ins_cost(1.9 * INSN_COST);
12008   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12009 
12010   ins_encode %{
12011     __ eorw(as_Register($dst$$reg),
12012               as_Register($src1$$reg),
12013               as_Register($src2$$reg),
12014               Assembler::LSR,
12015               $src3$$constant & 0x1f);
12016   %}
12017 
12018   ins_pipe(ialu_reg_reg_shift);
12019 %}
12020 
12021 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12022                          iRegL src1, iRegL src2,
12023                          immI src3, rFlagsReg cr) %{
12024   match(Set dst (XorL src1 (URShiftL src2 src3)));
12025 
12026   ins_cost(1.9 * INSN_COST);
12027   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12028 
12029   ins_encode %{
12030     __ eor(as_Register($dst$$reg),
12031               as_Register($src1$$reg),
12032               as_Register($src2$$reg),
12033               Assembler::LSR,
12034               $src3$$constant & 0x3f);
12035   %}
12036 
12037   ins_pipe(ialu_reg_reg_shift);
12038 %}
12039 
12040 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12041                          iRegIorL2I src1, iRegIorL2I src2,
12042                          immI src3, rFlagsReg cr) %{
12043   match(Set dst (XorI src1 (RShiftI src2 src3)));
12044 
12045   ins_cost(1.9 * INSN_COST);
12046   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12047 
12048   ins_encode %{
12049     __ eorw(as_Register($dst$$reg),
12050               as_Register($src1$$reg),
12051               as_Register($src2$$reg),
12052               Assembler::ASR,
12053               $src3$$constant & 0x1f);
12054   %}
12055 
12056   ins_pipe(ialu_reg_reg_shift);
12057 %}
12058 
12059 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12060                          iRegL src1, iRegL src2,
12061                          immI src3, rFlagsReg cr) %{
12062   match(Set dst (XorL src1 (RShiftL src2 src3)));
12063 
12064   ins_cost(1.9 * INSN_COST);
12065   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12066 
12067   ins_encode %{
12068     __ eor(as_Register($dst$$reg),
12069               as_Register($src1$$reg),
12070               as_Register($src2$$reg),
12071               Assembler::ASR,
12072               $src3$$constant & 0x3f);
12073   %}
12074 
12075   ins_pipe(ialu_reg_reg_shift);
12076 %}
12077 
12078 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12079                          iRegIorL2I src1, iRegIorL2I src2,
12080                          immI src3, rFlagsReg cr) %{
12081   match(Set dst (XorI src1 (LShiftI src2 src3)));
12082 
12083   ins_cost(1.9 * INSN_COST);
12084   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12085 
12086   ins_encode %{
12087     __ eorw(as_Register($dst$$reg),
12088               as_Register($src1$$reg),
12089               as_Register($src2$$reg),
12090               Assembler::LSL,
12091               $src3$$constant & 0x1f);
12092   %}
12093 
12094   ins_pipe(ialu_reg_reg_shift);
12095 %}
12096 
12097 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12098                          iRegL src1, iRegL src2,
12099                          immI src3, rFlagsReg cr) %{
12100   match(Set dst (XorL src1 (LShiftL src2 src3)));
12101 
12102   ins_cost(1.9 * INSN_COST);
12103   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12104 
12105   ins_encode %{
12106     __ eor(as_Register($dst$$reg),
12107               as_Register($src1$$reg),
12108               as_Register($src2$$reg),
12109               Assembler::LSL,
12110               $src3$$constant & 0x3f);
12111   %}
12112 
12113   ins_pipe(ialu_reg_reg_shift);
12114 %}
12115 
12116 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12117                          iRegIorL2I src1, iRegIorL2I src2,
12118                          immI src3, rFlagsReg cr) %{
12119   match(Set dst (OrI src1 (URShiftI src2 src3)));
12120 
12121   ins_cost(1.9 * INSN_COST);
12122   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12123 
12124   ins_encode %{
12125     __ orrw(as_Register($dst$$reg),
12126               as_Register($src1$$reg),
12127               as_Register($src2$$reg),
12128               Assembler::LSR,
12129               $src3$$constant & 0x1f);
12130   %}
12131 
12132   ins_pipe(ialu_reg_reg_shift);
12133 %}
12134 
12135 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12136                          iRegL src1, iRegL src2,
12137                          immI src3, rFlagsReg cr) %{
12138   match(Set dst (OrL src1 (URShiftL src2 src3)));
12139 
12140   ins_cost(1.9 * INSN_COST);
12141   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12142 
12143   ins_encode %{
12144     __ orr(as_Register($dst$$reg),
12145               as_Register($src1$$reg),
12146               as_Register($src2$$reg),
12147               Assembler::LSR,
12148               $src3$$constant & 0x3f);
12149   %}
12150 
12151   ins_pipe(ialu_reg_reg_shift);
12152 %}
12153 
12154 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12155                          iRegIorL2I src1, iRegIorL2I src2,
12156                          immI src3, rFlagsReg cr) %{
12157   match(Set dst (OrI src1 (RShiftI src2 src3)));
12158 
12159   ins_cost(1.9 * INSN_COST);
12160   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12161 
12162   ins_encode %{
12163     __ orrw(as_Register($dst$$reg),
12164               as_Register($src1$$reg),
12165               as_Register($src2$$reg),
12166               Assembler::ASR,
12167               $src3$$constant & 0x1f);
12168   %}
12169 
12170   ins_pipe(ialu_reg_reg_shift);
12171 %}
12172 
12173 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12174                          iRegL src1, iRegL src2,
12175                          immI src3, rFlagsReg cr) %{
12176   match(Set dst (OrL src1 (RShiftL src2 src3)));
12177 
12178   ins_cost(1.9 * INSN_COST);
12179   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12180 
12181   ins_encode %{
12182     __ orr(as_Register($dst$$reg),
12183               as_Register($src1$$reg),
12184               as_Register($src2$$reg),
12185               Assembler::ASR,
12186               $src3$$constant & 0x3f);
12187   %}
12188 
12189   ins_pipe(ialu_reg_reg_shift);
12190 %}
12191 
12192 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12193                          iRegIorL2I src1, iRegIorL2I src2,
12194                          immI src3, rFlagsReg cr) %{
12195   match(Set dst (OrI src1 (LShiftI src2 src3)));
12196 
12197   ins_cost(1.9 * INSN_COST);
12198   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12199 
12200   ins_encode %{
12201     __ orrw(as_Register($dst$$reg),
12202               as_Register($src1$$reg),
12203               as_Register($src2$$reg),
12204               Assembler::LSL,
12205               $src3$$constant & 0x1f);
12206   %}
12207 
12208   ins_pipe(ialu_reg_reg_shift);
12209 %}
12210 
12211 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12212                          iRegL src1, iRegL src2,
12213                          immI src3, rFlagsReg cr) %{
12214   match(Set dst (OrL src1 (LShiftL src2 src3)));
12215 
12216   ins_cost(1.9 * INSN_COST);
12217   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12218 
12219   ins_encode %{
12220     __ orr(as_Register($dst$$reg),
12221               as_Register($src1$$reg),
12222               as_Register($src2$$reg),
12223               Assembler::LSL,
12224               $src3$$constant & 0x3f);
12225   %}
12226 
12227   ins_pipe(ialu_reg_reg_shift);
12228 %}
12229 
12230 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12231                          iRegIorL2I src1, iRegIorL2I src2,
12232                          immI src3, rFlagsReg cr) %{
12233   match(Set dst (AddI src1 (URShiftI src2 src3)));
12234 
12235   ins_cost(1.9 * INSN_COST);
12236   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12237 
12238   ins_encode %{
12239     __ addw(as_Register($dst$$reg),
12240               as_Register($src1$$reg),
12241               as_Register($src2$$reg),
12242               Assembler::LSR,
12243               $src3$$constant & 0x1f);
12244   %}
12245 
12246   ins_pipe(ialu_reg_reg_shift);
12247 %}
12248 
12249 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12250                          iRegL src1, iRegL src2,
12251                          immI src3, rFlagsReg cr) %{
12252   match(Set dst (AddL src1 (URShiftL src2 src3)));
12253 
12254   ins_cost(1.9 * INSN_COST);
12255   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12256 
12257   ins_encode %{
12258     __ add(as_Register($dst$$reg),
12259               as_Register($src1$$reg),
12260               as_Register($src2$$reg),
12261               Assembler::LSR,
12262               $src3$$constant & 0x3f);
12263   %}
12264 
12265   ins_pipe(ialu_reg_reg_shift);
12266 %}
12267 
12268 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12269                          iRegIorL2I src1, iRegIorL2I src2,
12270                          immI src3, rFlagsReg cr) %{
12271   match(Set dst (AddI src1 (RShiftI src2 src3)));
12272 
12273   ins_cost(1.9 * INSN_COST);
12274   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12275 
12276   ins_encode %{
12277     __ addw(as_Register($dst$$reg),
12278               as_Register($src1$$reg),
12279               as_Register($src2$$reg),
12280               Assembler::ASR,
12281               $src3$$constant & 0x1f);
12282   %}
12283 
12284   ins_pipe(ialu_reg_reg_shift);
12285 %}
12286 
12287 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12288                          iRegL src1, iRegL src2,
12289                          immI src3, rFlagsReg cr) %{
12290   match(Set dst (AddL src1 (RShiftL src2 src3)));
12291 
12292   ins_cost(1.9 * INSN_COST);
12293   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12294 
12295   ins_encode %{
12296     __ add(as_Register($dst$$reg),
12297               as_Register($src1$$reg),
12298               as_Register($src2$$reg),
12299               Assembler::ASR,
12300               $src3$$constant & 0x3f);
12301   %}
12302 
12303   ins_pipe(ialu_reg_reg_shift);
12304 %}
12305 
12306 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12307                          iRegIorL2I src1, iRegIorL2I src2,
12308                          immI src3, rFlagsReg cr) %{
12309   match(Set dst (AddI src1 (LShiftI src2 src3)));
12310 
12311   ins_cost(1.9 * INSN_COST);
12312   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12313 
12314   ins_encode %{
12315     __ addw(as_Register($dst$$reg),
12316               as_Register($src1$$reg),
12317               as_Register($src2$$reg),
12318               Assembler::LSL,
12319               $src3$$constant & 0x1f);
12320   %}
12321 
12322   ins_pipe(ialu_reg_reg_shift);
12323 %}
12324 
12325 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12326                          iRegL src1, iRegL src2,
12327                          immI src3, rFlagsReg cr) %{
12328   match(Set dst (AddL src1 (LShiftL src2 src3)));
12329 
12330   ins_cost(1.9 * INSN_COST);
12331   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12332 
12333   ins_encode %{
12334     __ add(as_Register($dst$$reg),
12335               as_Register($src1$$reg),
12336               as_Register($src2$$reg),
12337               Assembler::LSL,
12338               $src3$$constant & 0x3f);
12339   %}
12340 
12341   ins_pipe(ialu_reg_reg_shift);
12342 %}
12343 
12344 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12345                          iRegIorL2I src1, iRegIorL2I src2,
12346                          immI src3, rFlagsReg cr) %{
12347   match(Set dst (SubI src1 (URShiftI src2 src3)));
12348 
12349   ins_cost(1.9 * INSN_COST);
12350   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12351 
12352   ins_encode %{
12353     __ subw(as_Register($dst$$reg),
12354               as_Register($src1$$reg),
12355               as_Register($src2$$reg),
12356               Assembler::LSR,
12357               $src3$$constant & 0x1f);
12358   %}
12359 
12360   ins_pipe(ialu_reg_reg_shift);
12361 %}
12362 
12363 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12364                          iRegL src1, iRegL src2,
12365                          immI src3, rFlagsReg cr) %{
12366   match(Set dst (SubL src1 (URShiftL src2 src3)));
12367 
12368   ins_cost(1.9 * INSN_COST);
12369   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12370 
12371   ins_encode %{
12372     __ sub(as_Register($dst$$reg),
12373               as_Register($src1$$reg),
12374               as_Register($src2$$reg),
12375               Assembler::LSR,
12376               $src3$$constant & 0x3f);
12377   %}
12378 
12379   ins_pipe(ialu_reg_reg_shift);
12380 %}
12381 
12382 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12383                          iRegIorL2I src1, iRegIorL2I src2,
12384                          immI src3, rFlagsReg cr) %{
12385   match(Set dst (SubI src1 (RShiftI src2 src3)));
12386 
12387   ins_cost(1.9 * INSN_COST);
12388   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12389 
12390   ins_encode %{
12391     __ subw(as_Register($dst$$reg),
12392               as_Register($src1$$reg),
12393               as_Register($src2$$reg),
12394               Assembler::ASR,
12395               $src3$$constant & 0x1f);
12396   %}
12397 
12398   ins_pipe(ialu_reg_reg_shift);
12399 %}
12400 
12401 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12402                          iRegL src1, iRegL src2,
12403                          immI src3, rFlagsReg cr) %{
12404   match(Set dst (SubL src1 (RShiftL src2 src3)));
12405 
12406   ins_cost(1.9 * INSN_COST);
12407   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12408 
12409   ins_encode %{
12410     __ sub(as_Register($dst$$reg),
12411               as_Register($src1$$reg),
12412               as_Register($src2$$reg),
12413               Assembler::ASR,
12414               $src3$$constant & 0x3f);
12415   %}
12416 
12417   ins_pipe(ialu_reg_reg_shift);
12418 %}
12419 
12420 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12421                          iRegIorL2I src1, iRegIorL2I src2,
12422                          immI src3, rFlagsReg cr) %{
12423   match(Set dst (SubI src1 (LShiftI src2 src3)));
12424 
12425   ins_cost(1.9 * INSN_COST);
12426   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12427 
12428   ins_encode %{
12429     __ subw(as_Register($dst$$reg),
12430               as_Register($src1$$reg),
12431               as_Register($src2$$reg),
12432               Assembler::LSL,
12433               $src3$$constant & 0x1f);
12434   %}
12435 
12436   ins_pipe(ialu_reg_reg_shift);
12437 %}
12438 
12439 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12440                          iRegL src1, iRegL src2,
12441                          immI src3, rFlagsReg cr) %{
12442   match(Set dst (SubL src1 (LShiftL src2 src3)));
12443 
12444   ins_cost(1.9 * INSN_COST);
12445   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12446 
12447   ins_encode %{
12448     __ sub(as_Register($dst$$reg),
12449               as_Register($src1$$reg),
12450               as_Register($src2$$reg),
12451               Assembler::LSL,
12452               $src3$$constant & 0x3f);
12453   %}
12454 
12455   ins_pipe(ialu_reg_reg_shift);
12456 %}
12457 
12458 
12459 
12460 // Shift Left followed by Shift Right.
12461 // This idiom is used by the compiler for the i2b bytecode etc.
12462 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12463 %{
12464   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12465   // Make sure we are not going to exceed what sbfm can do.
12466   predicate((unsigned int)n->in(2)->get_int() <= 63
12467             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12468 
12469   ins_cost(INSN_COST * 2);
12470   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12471   ins_encode %{
12472     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12473     int s = 63 - lshift;
12474     int r = (rshift - lshift) & 63;
12475     __ sbfm(as_Register($dst$$reg),
12476             as_Register($src$$reg),
12477             r, s);
12478   %}
12479 
12480   ins_pipe(ialu_reg_shift);
12481 %}
12482 
12483 // Shift Left followed by Shift Right.
12484 // This idiom is used by the compiler for the i2b bytecode etc.
12485 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12486 %{
12487   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12488   // Make sure we are not going to exceed what sbfmw can do.
12489   predicate((unsigned int)n->in(2)->get_int() <= 31
12490             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12491 
12492   ins_cost(INSN_COST * 2);
12493   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12494   ins_encode %{
12495     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12496     int s = 31 - lshift;
12497     int r = (rshift - lshift) & 31;
12498     __ sbfmw(as_Register($dst$$reg),
12499             as_Register($src$$reg),
12500             r, s);
12501   %}
12502 
12503   ins_pipe(ialu_reg_shift);
12504 %}
12505 
12506 // Shift Left followed by Shift Right.
12507 // This idiom is used by the compiler for the i2b bytecode etc.
12508 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12509 %{
12510   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12511   // Make sure we are not going to exceed what ubfm can do.
12512   predicate((unsigned int)n->in(2)->get_int() <= 63
12513             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12514 
12515   ins_cost(INSN_COST * 2);
12516   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12517   ins_encode %{
12518     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12519     int s = 63 - lshift;
12520     int r = (rshift - lshift) & 63;
12521     __ ubfm(as_Register($dst$$reg),
12522             as_Register($src$$reg),
12523             r, s);
12524   %}
12525 
12526   ins_pipe(ialu_reg_shift);
12527 %}
12528 
12529 // Shift Left followed by Shift Right.
12530 // This idiom is used by the compiler for the i2b bytecode etc.
12531 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12532 %{
12533   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12534   // Make sure we are not going to exceed what ubfmw can do.
12535   predicate((unsigned int)n->in(2)->get_int() <= 31
12536             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12537 
12538   ins_cost(INSN_COST * 2);
12539   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12540   ins_encode %{
12541     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12542     int s = 31 - lshift;
12543     int r = (rshift - lshift) & 31;
12544     __ ubfmw(as_Register($dst$$reg),
12545             as_Register($src$$reg),
12546             r, s);
12547   %}
12548 
12549   ins_pipe(ialu_reg_shift);
12550 %}
12551 // Bitfield extract with shift & mask
12552 
12553 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12554 %{
12555   match(Set dst (AndI (URShiftI src rshift) mask));
12556 
12557   ins_cost(INSN_COST);
12558   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12559   ins_encode %{
12560     int rshift = $rshift$$constant;
12561     long mask = $mask$$constant;
12562     int width = exact_log2(mask+1);
12563     __ ubfxw(as_Register($dst$$reg),
12564             as_Register($src$$reg), rshift, width);
12565   %}
12566   ins_pipe(ialu_reg_shift);
12567 %}
12568 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12569 %{
12570   match(Set dst (AndL (URShiftL src rshift) mask));
12571 
12572   ins_cost(INSN_COST);
12573   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12574   ins_encode %{
12575     int rshift = $rshift$$constant;
12576     long mask = $mask$$constant;
12577     int width = exact_log2(mask+1);
12578     __ ubfx(as_Register($dst$$reg),
12579             as_Register($src$$reg), rshift, width);
12580   %}
12581   ins_pipe(ialu_reg_shift);
12582 %}
12583 
12584 // We can use ubfx when extending an And with a mask when we know mask
12585 // is positive.  We know that because immI_bitmask guarantees it.
12586 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12587 %{
12588   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12589 
12590   ins_cost(INSN_COST * 2);
12591   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12592   ins_encode %{
12593     int rshift = $rshift$$constant;
12594     long mask = $mask$$constant;
12595     int width = exact_log2(mask+1);
12596     __ ubfx(as_Register($dst$$reg),
12597             as_Register($src$$reg), rshift, width);
12598   %}
12599   ins_pipe(ialu_reg_shift);
12600 %}
12601 
12602 // We can use ubfiz when masking by a positive number and then left shifting the result.
12603 // We know that the mask is positive because immI_bitmask guarantees it.
12604 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12605 %{
12606   match(Set dst (LShiftI (AndI src mask) lshift));
12607   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12608     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12609 
12610   ins_cost(INSN_COST);
12611   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12612   ins_encode %{
12613     int lshift = $lshift$$constant;
12614     long mask = $mask$$constant;
12615     int width = exact_log2(mask+1);
12616     __ ubfizw(as_Register($dst$$reg),
12617           as_Register($src$$reg), lshift, width);
12618   %}
12619   ins_pipe(ialu_reg_shift);
12620 %}
12621 // We can use ubfiz when masking by a positive number and then left shifting the result.
12622 // We know that the mask is positive because immL_bitmask guarantees it.
12623 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12624 %{
12625   match(Set dst (LShiftL (AndL src mask) lshift));
12626   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12627     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12628 
12629   ins_cost(INSN_COST);
12630   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12631   ins_encode %{
12632     int lshift = $lshift$$constant;
12633     long mask = $mask$$constant;
12634     int width = exact_log2(mask+1);
12635     __ ubfiz(as_Register($dst$$reg),
12636           as_Register($src$$reg), lshift, width);
12637   %}
12638   ins_pipe(ialu_reg_shift);
12639 %}
12640 
12641 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12642 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12643 %{
12644   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12645   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12646     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12647 
12648   ins_cost(INSN_COST);
12649   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12650   ins_encode %{
12651     int lshift = $lshift$$constant;
12652     long mask = $mask$$constant;
12653     int width = exact_log2(mask+1);
12654     __ ubfiz(as_Register($dst$$reg),
12655              as_Register($src$$reg), lshift, width);
12656   %}
12657   ins_pipe(ialu_reg_shift);
12658 %}
12659 
12660 // Rotations
12661 
12662 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12663 %{
12664   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12665   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12666 
12667   ins_cost(INSN_COST);
12668   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12669 
12670   ins_encode %{
12671     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12672             $rshift$$constant & 63);
12673   %}
12674   ins_pipe(ialu_reg_reg_extr);
12675 %}
12676 
12677 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12678 %{
12679   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12680   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12681 
12682   ins_cost(INSN_COST);
12683   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12684 
12685   ins_encode %{
12686     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12687             $rshift$$constant & 31);
12688   %}
12689   ins_pipe(ialu_reg_reg_extr);
12690 %}
12691 
12692 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12693 %{
12694   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12695   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12696 
12697   ins_cost(INSN_COST);
12698   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12699 
12700   ins_encode %{
12701     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12702             $rshift$$constant & 63);
12703   %}
12704   ins_pipe(ialu_reg_reg_extr);
12705 %}
12706 
12707 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12708 %{
12709   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12710   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12711 
12712   ins_cost(INSN_COST);
12713   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12714 
12715   ins_encode %{
12716     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12717             $rshift$$constant & 31);
12718   %}
12719   ins_pipe(ialu_reg_reg_extr);
12720 %}
12721 
12722 
12723 // rol expander
12724 
12725 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12726 %{
12727   effect(DEF dst, USE src, USE shift);
12728 
12729   format %{ "rol    $dst, $src, $shift" %}
12730   ins_cost(INSN_COST * 3);
12731   ins_encode %{
12732     __ subw(rscratch1, zr, as_Register($shift$$reg));
12733     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12734             rscratch1);
12735     %}
12736   ins_pipe(ialu_reg_reg_vshift);
12737 %}
12738 
12739 // rol expander
12740 
12741 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12742 %{
12743   effect(DEF dst, USE src, USE shift);
12744 
12745   format %{ "rol    $dst, $src, $shift" %}
12746   ins_cost(INSN_COST * 3);
12747   ins_encode %{
12748     __ subw(rscratch1, zr, as_Register($shift$$reg));
12749     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12750             rscratch1);
12751     %}
12752   ins_pipe(ialu_reg_reg_vshift);
12753 %}
12754 
12755 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12756 %{
12757   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12758 
12759   expand %{
12760     rolL_rReg(dst, src, shift, cr);
12761   %}
12762 %}
12763 
12764 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12765 %{
12766   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12767 
12768   expand %{
12769     rolL_rReg(dst, src, shift, cr);
12770   %}
12771 %}
12772 
12773 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12774 %{
12775   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12776 
12777   expand %{
12778     rolI_rReg(dst, src, shift, cr);
12779   %}
12780 %}
12781 
12782 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12783 %{
12784   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12785 
12786   expand %{
12787     rolI_rReg(dst, src, shift, cr);
12788   %}
12789 %}
12790 
12791 // ror expander
12792 
12793 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12794 %{
12795   effect(DEF dst, USE src, USE shift);
12796 
12797   format %{ "ror    $dst, $src, $shift" %}
12798   ins_cost(INSN_COST);
12799   ins_encode %{
12800     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12801             as_Register($shift$$reg));
12802     %}
12803   ins_pipe(ialu_reg_reg_vshift);
12804 %}
12805 
12806 // ror expander
12807 
12808 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12809 %{
12810   effect(DEF dst, USE src, USE shift);
12811 
12812   format %{ "ror    $dst, $src, $shift" %}
12813   ins_cost(INSN_COST);
12814   ins_encode %{
12815     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12816             as_Register($shift$$reg));
12817     %}
12818   ins_pipe(ialu_reg_reg_vshift);
12819 %}
12820 
12821 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12822 %{
12823   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12824 
12825   expand %{
12826     rorL_rReg(dst, src, shift, cr);
12827   %}
12828 %}
12829 
12830 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12831 %{
12832   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12833 
12834   expand %{
12835     rorL_rReg(dst, src, shift, cr);
12836   %}
12837 %}
12838 
12839 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12840 %{
12841   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12842 
12843   expand %{
12844     rorI_rReg(dst, src, shift, cr);
12845   %}
12846 %}
12847 
12848 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12849 %{
12850   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12851 
12852   expand %{
12853     rorI_rReg(dst, src, shift, cr);
12854   %}
12855 %}
12856 
12857 // Add/subtract (extended)
12858 
12859 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12860 %{
12861   match(Set dst (AddL src1 (ConvI2L src2)));
12862   ins_cost(INSN_COST);
12863   format %{ "add  $dst, $src1, $src2, sxtw" %}
12864 
12865    ins_encode %{
12866      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12867             as_Register($src2$$reg), ext::sxtw);
12868    %}
12869   ins_pipe(ialu_reg_reg);
12870 %};
12871 
12872 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12873 %{
12874   match(Set dst (SubL src1 (ConvI2L src2)));
12875   ins_cost(INSN_COST);
12876   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12877 
12878    ins_encode %{
12879      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12880             as_Register($src2$$reg), ext::sxtw);
12881    %}
12882   ins_pipe(ialu_reg_reg);
12883 %};
12884 
12885 
12886 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12887 %{
12888   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12889   ins_cost(INSN_COST);
12890   format %{ "add  $dst, $src1, $src2, sxth" %}
12891 
12892    ins_encode %{
12893      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12894             as_Register($src2$$reg), ext::sxth);
12895    %}
12896   ins_pipe(ialu_reg_reg);
12897 %}
12898 
12899 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12900 %{
12901   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12902   ins_cost(INSN_COST);
12903   format %{ "add  $dst, $src1, $src2, sxtb" %}
12904 
12905    ins_encode %{
12906      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12907             as_Register($src2$$reg), ext::sxtb);
12908    %}
12909   ins_pipe(ialu_reg_reg);
12910 %}
12911 
12912 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12913 %{
12914   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12915   ins_cost(INSN_COST);
12916   format %{ "add  $dst, $src1, $src2, uxtb" %}
12917 
12918    ins_encode %{
12919      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12920             as_Register($src2$$reg), ext::uxtb);
12921    %}
12922   ins_pipe(ialu_reg_reg);
12923 %}
12924 
12925 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12926 %{
12927   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12928   ins_cost(INSN_COST);
12929   format %{ "add  $dst, $src1, $src2, sxth" %}
12930 
12931    ins_encode %{
12932      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12933             as_Register($src2$$reg), ext::sxth);
12934    %}
12935   ins_pipe(ialu_reg_reg);
12936 %}
12937 
12938 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12939 %{
12940   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12941   ins_cost(INSN_COST);
12942   format %{ "add  $dst, $src1, $src2, sxtw" %}
12943 
12944    ins_encode %{
12945      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12946             as_Register($src2$$reg), ext::sxtw);
12947    %}
12948   ins_pipe(ialu_reg_reg);
12949 %}
12950 
12951 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12952 %{
12953   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12954   ins_cost(INSN_COST);
12955   format %{ "add  $dst, $src1, $src2, sxtb" %}
12956 
12957    ins_encode %{
12958      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12959             as_Register($src2$$reg), ext::sxtb);
12960    %}
12961   ins_pipe(ialu_reg_reg);
12962 %}
12963 
12964 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12965 %{
12966   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12967   ins_cost(INSN_COST);
12968   format %{ "add  $dst, $src1, $src2, uxtb" %}
12969 
12970    ins_encode %{
12971      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12972             as_Register($src2$$reg), ext::uxtb);
12973    %}
12974   ins_pipe(ialu_reg_reg);
12975 %}
12976 
12977 
12978 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12979 %{
12980   match(Set dst (AddI src1 (AndI src2 mask)));
12981   ins_cost(INSN_COST);
12982   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12983 
12984    ins_encode %{
12985      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12986             as_Register($src2$$reg), ext::uxtb);
12987    %}
12988   ins_pipe(ialu_reg_reg);
12989 %}
12990 
12991 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12992 %{
12993   match(Set dst (AddI src1 (AndI src2 mask)));
12994   ins_cost(INSN_COST);
12995   format %{ "addw  $dst, $src1, $src2, uxth" %}
12996 
12997    ins_encode %{
12998      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12999             as_Register($src2$$reg), ext::uxth);
13000    %}
13001   ins_pipe(ialu_reg_reg);
13002 %}
13003 
13004 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13005 %{
13006   match(Set dst (AddL src1 (AndL src2 mask)));
13007   ins_cost(INSN_COST);
13008   format %{ "add  $dst, $src1, $src2, uxtb" %}
13009 
13010    ins_encode %{
13011      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13012             as_Register($src2$$reg), ext::uxtb);
13013    %}
13014   ins_pipe(ialu_reg_reg);
13015 %}
13016 
13017 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13018 %{
13019   match(Set dst (AddL src1 (AndL src2 mask)));
13020   ins_cost(INSN_COST);
13021   format %{ "add  $dst, $src1, $src2, uxth" %}
13022 
13023    ins_encode %{
13024      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13025             as_Register($src2$$reg), ext::uxth);
13026    %}
13027   ins_pipe(ialu_reg_reg);
13028 %}
13029 
13030 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13031 %{
13032   match(Set dst (AddL src1 (AndL src2 mask)));
13033   ins_cost(INSN_COST);
13034   format %{ "add  $dst, $src1, $src2, uxtw" %}
13035 
13036    ins_encode %{
13037      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13038             as_Register($src2$$reg), ext::uxtw);
13039    %}
13040   ins_pipe(ialu_reg_reg);
13041 %}
13042 
13043 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13044 %{
13045   match(Set dst (SubI src1 (AndI src2 mask)));
13046   ins_cost(INSN_COST);
13047   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13048 
13049    ins_encode %{
13050      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13051             as_Register($src2$$reg), ext::uxtb);
13052    %}
13053   ins_pipe(ialu_reg_reg);
13054 %}
13055 
13056 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13057 %{
13058   match(Set dst (SubI src1 (AndI src2 mask)));
13059   ins_cost(INSN_COST);
13060   format %{ "subw  $dst, $src1, $src2, uxth" %}
13061 
13062    ins_encode %{
13063      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13064             as_Register($src2$$reg), ext::uxth);
13065    %}
13066   ins_pipe(ialu_reg_reg);
13067 %}
13068 
13069 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13070 %{
13071   match(Set dst (SubL src1 (AndL src2 mask)));
13072   ins_cost(INSN_COST);
13073   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13074 
13075    ins_encode %{
13076      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13077             as_Register($src2$$reg), ext::uxtb);
13078    %}
13079   ins_pipe(ialu_reg_reg);
13080 %}
13081 
13082 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13083 %{
13084   match(Set dst (SubL src1 (AndL src2 mask)));
13085   ins_cost(INSN_COST);
13086   format %{ "sub  $dst, $src1, $src2, uxth" %}
13087 
13088    ins_encode %{
13089      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13090             as_Register($src2$$reg), ext::uxth);
13091    %}
13092   ins_pipe(ialu_reg_reg);
13093 %}
13094 
13095 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13096 %{
13097   match(Set dst (SubL src1 (AndL src2 mask)));
13098   ins_cost(INSN_COST);
13099   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13100 
13101    ins_encode %{
13102      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13103             as_Register($src2$$reg), ext::uxtw);
13104    %}
13105   ins_pipe(ialu_reg_reg);
13106 %}
13107 
13108 
13109 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13110 %{
13111   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13112   ins_cost(1.9 * INSN_COST);
13113   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13114 
13115    ins_encode %{
13116      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13117             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13118    %}
13119   ins_pipe(ialu_reg_reg_shift);
13120 %}
13121 
13122 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13123 %{
13124   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13125   ins_cost(1.9 * INSN_COST);
13126   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13127 
13128    ins_encode %{
13129      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13130             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13131    %}
13132   ins_pipe(ialu_reg_reg_shift);
13133 %}
13134 
13135 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13136 %{
13137   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13138   ins_cost(1.9 * INSN_COST);
13139   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13140 
13141    ins_encode %{
13142      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13143             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13144    %}
13145   ins_pipe(ialu_reg_reg_shift);
13146 %}
13147 
13148 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13149 %{
13150   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13151   ins_cost(1.9 * INSN_COST);
13152   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13153 
13154    ins_encode %{
13155      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13156             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13157    %}
13158   ins_pipe(ialu_reg_reg_shift);
13159 %}
13160 
13161 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13162 %{
13163   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13164   ins_cost(1.9 * INSN_COST);
13165   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13166 
13167    ins_encode %{
13168      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13169             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13170    %}
13171   ins_pipe(ialu_reg_reg_shift);
13172 %}
13173 
13174 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13175 %{
13176   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13177   ins_cost(1.9 * INSN_COST);
13178   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13179 
13180    ins_encode %{
13181      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13182             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13183    %}
13184   ins_pipe(ialu_reg_reg_shift);
13185 %}
13186 
13187 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13188 %{
13189   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13190   ins_cost(1.9 * INSN_COST);
13191   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13192 
13193    ins_encode %{
13194      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13195             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13196    %}
13197   ins_pipe(ialu_reg_reg_shift);
13198 %}
13199 
13200 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13201 %{
13202   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13203   ins_cost(1.9 * INSN_COST);
13204   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13205 
13206    ins_encode %{
13207      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13208             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13209    %}
13210   ins_pipe(ialu_reg_reg_shift);
13211 %}
13212 
13213 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13214 %{
13215   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13216   ins_cost(1.9 * INSN_COST);
13217   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13218 
13219    ins_encode %{
13220      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13221             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13222    %}
13223   ins_pipe(ialu_reg_reg_shift);
13224 %}
13225 
13226 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13227 %{
13228   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13229   ins_cost(1.9 * INSN_COST);
13230   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13231 
13232    ins_encode %{
13233      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13234             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13235    %}
13236   ins_pipe(ialu_reg_reg_shift);
13237 %}
13238 
13239 
13240 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13241 %{
13242   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13243   ins_cost(1.9 * INSN_COST);
13244   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13245 
13246    ins_encode %{
13247      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13248             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13249    %}
13250   ins_pipe(ialu_reg_reg_shift);
13251 %};
13252 
13253 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13254 %{
13255   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13256   ins_cost(1.9 * INSN_COST);
13257   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13258 
13259    ins_encode %{
13260      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13261             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13262    %}
13263   ins_pipe(ialu_reg_reg_shift);
13264 %};
13265 
13266 
13267 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13268 %{
13269   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13270   ins_cost(1.9 * INSN_COST);
13271   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13272 
13273    ins_encode %{
13274      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13275             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13276    %}
13277   ins_pipe(ialu_reg_reg_shift);
13278 %}
13279 
13280 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13281 %{
13282   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13283   ins_cost(1.9 * INSN_COST);
13284   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13285 
13286    ins_encode %{
13287      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13288             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13289    %}
13290   ins_pipe(ialu_reg_reg_shift);
13291 %}
13292 
13293 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13294 %{
13295   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13296   ins_cost(1.9 * INSN_COST);
13297   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13298 
13299    ins_encode %{
13300      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13301             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13302    %}
13303   ins_pipe(ialu_reg_reg_shift);
13304 %}
13305 
13306 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13307 %{
13308   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13309   ins_cost(1.9 * INSN_COST);
13310   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13311 
13312    ins_encode %{
13313      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13314             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13315    %}
13316   ins_pipe(ialu_reg_reg_shift);
13317 %}
13318 
13319 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13320 %{
13321   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13322   ins_cost(1.9 * INSN_COST);
13323   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13324 
13325    ins_encode %{
13326      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13327             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13328    %}
13329   ins_pipe(ialu_reg_reg_shift);
13330 %}
13331 
13332 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13333 %{
13334   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13335   ins_cost(1.9 * INSN_COST);
13336   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13337 
13338    ins_encode %{
13339      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13340             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13341    %}
13342   ins_pipe(ialu_reg_reg_shift);
13343 %}
13344 
13345 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13346 %{
13347   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13348   ins_cost(1.9 * INSN_COST);
13349   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13350 
13351    ins_encode %{
13352      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13353             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13354    %}
13355   ins_pipe(ialu_reg_reg_shift);
13356 %}
13357 
13358 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13359 %{
13360   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13361   ins_cost(1.9 * INSN_COST);
13362   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13363 
13364    ins_encode %{
13365      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13366             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13367    %}
13368   ins_pipe(ialu_reg_reg_shift);
13369 %}
13370 
13371 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13372 %{
13373   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13374   ins_cost(1.9 * INSN_COST);
13375   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13376 
13377    ins_encode %{
13378      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13379             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13380    %}
13381   ins_pipe(ialu_reg_reg_shift);
13382 %}
13383 
13384 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13385 %{
13386   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13387   ins_cost(1.9 * INSN_COST);
13388   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13389 
13390    ins_encode %{
13391      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13392             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13393    %}
13394   ins_pipe(ialu_reg_reg_shift);
13395 %}
13396 // END This section of the file is automatically generated. Do not edit --------------
13397 
13398 // ============================================================================
13399 // Floating Point Arithmetic Instructions
13400 
13401 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13402   match(Set dst (AddF src1 src2));
13403 
13404   ins_cost(INSN_COST * 5);
13405   format %{ "fadds   $dst, $src1, $src2" %}
13406 
13407   ins_encode %{
13408     __ fadds(as_FloatRegister($dst$$reg),
13409              as_FloatRegister($src1$$reg),
13410              as_FloatRegister($src2$$reg));
13411   %}
13412 
13413   ins_pipe(fp_dop_reg_reg_s);
13414 %}
13415 
13416 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13417   match(Set dst (AddD src1 src2));
13418 
13419   ins_cost(INSN_COST * 5);
13420   format %{ "faddd   $dst, $src1, $src2" %}
13421 
13422   ins_encode %{
13423     __ faddd(as_FloatRegister($dst$$reg),
13424              as_FloatRegister($src1$$reg),
13425              as_FloatRegister($src2$$reg));
13426   %}
13427 
13428   ins_pipe(fp_dop_reg_reg_d);
13429 %}
13430 
13431 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13432   match(Set dst (SubF src1 src2));
13433 
13434   ins_cost(INSN_COST * 5);
13435   format %{ "fsubs   $dst, $src1, $src2" %}
13436 
13437   ins_encode %{
13438     __ fsubs(as_FloatRegister($dst$$reg),
13439              as_FloatRegister($src1$$reg),
13440              as_FloatRegister($src2$$reg));
13441   %}
13442 
13443   ins_pipe(fp_dop_reg_reg_s);
13444 %}
13445 
13446 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13447   match(Set dst (SubD src1 src2));
13448 
13449   ins_cost(INSN_COST * 5);
13450   format %{ "fsubd   $dst, $src1, $src2" %}
13451 
13452   ins_encode %{
13453     __ fsubd(as_FloatRegister($dst$$reg),
13454              as_FloatRegister($src1$$reg),
13455              as_FloatRegister($src2$$reg));
13456   %}
13457 
13458   ins_pipe(fp_dop_reg_reg_d);
13459 %}
13460 
13461 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13462   match(Set dst (MulF src1 src2));
13463 
13464   ins_cost(INSN_COST * 6);
13465   format %{ "fmuls   $dst, $src1, $src2" %}
13466 
13467   ins_encode %{
13468     __ fmuls(as_FloatRegister($dst$$reg),
13469              as_FloatRegister($src1$$reg),
13470              as_FloatRegister($src2$$reg));
13471   %}
13472 
13473   ins_pipe(fp_dop_reg_reg_s);
13474 %}
13475 
13476 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13477   match(Set dst (MulD src1 src2));
13478 
13479   ins_cost(INSN_COST * 6);
13480   format %{ "fmuld   $dst, $src1, $src2" %}
13481 
13482   ins_encode %{
13483     __ fmuld(as_FloatRegister($dst$$reg),
13484              as_FloatRegister($src1$$reg),
13485              as_FloatRegister($src2$$reg));
13486   %}
13487 
13488   ins_pipe(fp_dop_reg_reg_d);
13489 %}
13490 
13491 // src1 * src2 + src3
13492 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13493   predicate(UseFMA);
13494   match(Set dst (FmaF src3 (Binary src1 src2)));
13495 
13496   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13497 
13498   ins_encode %{
13499     __ fmadds(as_FloatRegister($dst$$reg),
13500              as_FloatRegister($src1$$reg),
13501              as_FloatRegister($src2$$reg),
13502              as_FloatRegister($src3$$reg));
13503   %}
13504 
13505   ins_pipe(pipe_class_default);
13506 %}
13507 
13508 // src1 * src2 + src3
13509 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13510   predicate(UseFMA);
13511   match(Set dst (FmaD src3 (Binary src1 src2)));
13512 
13513   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13514 
13515   ins_encode %{
13516     __ fmaddd(as_FloatRegister($dst$$reg),
13517              as_FloatRegister($src1$$reg),
13518              as_FloatRegister($src2$$reg),
13519              as_FloatRegister($src3$$reg));
13520   %}
13521 
13522   ins_pipe(pipe_class_default);
13523 %}
13524 
13525 // -src1 * src2 + src3
13526 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13527   predicate(UseFMA);
13528   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13529   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13530 
13531   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13532 
13533   ins_encode %{
13534     __ fmsubs(as_FloatRegister($dst$$reg),
13535               as_FloatRegister($src1$$reg),
13536               as_FloatRegister($src2$$reg),
13537               as_FloatRegister($src3$$reg));
13538   %}
13539 
13540   ins_pipe(pipe_class_default);
13541 %}
13542 
13543 // -src1 * src2 + src3
13544 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13545   predicate(UseFMA);
13546   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13547   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13548 
13549   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13550 
13551   ins_encode %{
13552     __ fmsubd(as_FloatRegister($dst$$reg),
13553               as_FloatRegister($src1$$reg),
13554               as_FloatRegister($src2$$reg),
13555               as_FloatRegister($src3$$reg));
13556   %}
13557 
13558   ins_pipe(pipe_class_default);
13559 %}
13560 
13561 // -src1 * src2 - src3
13562 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13563   predicate(UseFMA);
13564   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13565   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13566 
13567   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13568 
13569   ins_encode %{
13570     __ fnmadds(as_FloatRegister($dst$$reg),
13571                as_FloatRegister($src1$$reg),
13572                as_FloatRegister($src2$$reg),
13573                as_FloatRegister($src3$$reg));
13574   %}
13575 
13576   ins_pipe(pipe_class_default);
13577 %}
13578 
13579 // -src1 * src2 - src3
13580 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13581   predicate(UseFMA);
13582   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13583   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13584 
13585   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13586 
13587   ins_encode %{
13588     __ fnmaddd(as_FloatRegister($dst$$reg),
13589                as_FloatRegister($src1$$reg),
13590                as_FloatRegister($src2$$reg),
13591                as_FloatRegister($src3$$reg));
13592   %}
13593 
13594   ins_pipe(pipe_class_default);
13595 %}
13596 
13597 // src1 * src2 - src3
13598 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13599   predicate(UseFMA);
13600   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13601 
13602   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13603 
13604   ins_encode %{
13605     __ fnmsubs(as_FloatRegister($dst$$reg),
13606                as_FloatRegister($src1$$reg),
13607                as_FloatRegister($src2$$reg),
13608                as_FloatRegister($src3$$reg));
13609   %}
13610 
13611   ins_pipe(pipe_class_default);
13612 %}
13613 
13614 // src1 * src2 - src3
13615 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13616   predicate(UseFMA);
13617   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13618 
13619   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13620 
13621   ins_encode %{
13622   // n.b. insn name should be fnmsubd
13623     __ fnmsub(as_FloatRegister($dst$$reg),
13624               as_FloatRegister($src1$$reg),
13625               as_FloatRegister($src2$$reg),
13626               as_FloatRegister($src3$$reg));
13627   %}
13628 
13629   ins_pipe(pipe_class_default);
13630 %}
13631 
13632 
13633 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13634   match(Set dst (DivF src1  src2));
13635 
13636   ins_cost(INSN_COST * 18);
13637   format %{ "fdivs   $dst, $src1, $src2" %}
13638 
13639   ins_encode %{
13640     __ fdivs(as_FloatRegister($dst$$reg),
13641              as_FloatRegister($src1$$reg),
13642              as_FloatRegister($src2$$reg));
13643   %}
13644 
13645   ins_pipe(fp_div_s);
13646 %}
13647 
13648 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13649   match(Set dst (DivD src1  src2));
13650 
13651   ins_cost(INSN_COST * 32);
13652   format %{ "fdivd   $dst, $src1, $src2" %}
13653 
13654   ins_encode %{
13655     __ fdivd(as_FloatRegister($dst$$reg),
13656              as_FloatRegister($src1$$reg),
13657              as_FloatRegister($src2$$reg));
13658   %}
13659 
13660   ins_pipe(fp_div_d);
13661 %}
13662 
13663 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13664   match(Set dst (NegF src));
13665 
13666   ins_cost(INSN_COST * 3);
13667   format %{ "fneg   $dst, $src" %}
13668 
13669   ins_encode %{
13670     __ fnegs(as_FloatRegister($dst$$reg),
13671              as_FloatRegister($src$$reg));
13672   %}
13673 
13674   ins_pipe(fp_uop_s);
13675 %}
13676 
13677 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13678   match(Set dst (NegD src));
13679 
13680   ins_cost(INSN_COST * 3);
13681   format %{ "fnegd   $dst, $src" %}
13682 
13683   ins_encode %{
13684     __ fnegd(as_FloatRegister($dst$$reg),
13685              as_FloatRegister($src$$reg));
13686   %}
13687 
13688   ins_pipe(fp_uop_d);
13689 %}
13690 
13691 instruct absF_reg(vRegF dst, vRegF src) %{
13692   match(Set dst (AbsF src));
13693 
13694   ins_cost(INSN_COST * 3);
13695   format %{ "fabss   $dst, $src" %}
13696   ins_encode %{
13697     __ fabss(as_FloatRegister($dst$$reg),
13698              as_FloatRegister($src$$reg));
13699   %}
13700 
13701   ins_pipe(fp_uop_s);
13702 %}
13703 
13704 instruct absD_reg(vRegD dst, vRegD src) %{
13705   match(Set dst (AbsD src));
13706 
13707   ins_cost(INSN_COST * 3);
13708   format %{ "fabsd   $dst, $src" %}
13709   ins_encode %{
13710     __ fabsd(as_FloatRegister($dst$$reg),
13711              as_FloatRegister($src$$reg));
13712   %}
13713 
13714   ins_pipe(fp_uop_d);
13715 %}
13716 
13717 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13718   match(Set dst (SqrtD src));
13719 
13720   ins_cost(INSN_COST * 50);
13721   format %{ "fsqrtd  $dst, $src" %}
13722   ins_encode %{
13723     __ fsqrtd(as_FloatRegister($dst$$reg),
13724              as_FloatRegister($src$$reg));
13725   %}
13726 
13727   ins_pipe(fp_div_s);
13728 %}
13729 
13730 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13731   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13732 
13733   ins_cost(INSN_COST * 50);
13734   format %{ "fsqrts  $dst, $src" %}
13735   ins_encode %{
13736     __ fsqrts(as_FloatRegister($dst$$reg),
13737              as_FloatRegister($src$$reg));
13738   %}
13739 
13740   ins_pipe(fp_div_d);
13741 %}
13742 
13743 // ============================================================================
13744 // Logical Instructions
13745 
13746 // Integer Logical Instructions
13747 
13748 // And Instructions
13749 
13750 
13751 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13752   match(Set dst (AndI src1 src2));
13753 
13754   format %{ "andw  $dst, $src1, $src2\t# int" %}
13755 
13756   ins_cost(INSN_COST);
13757   ins_encode %{
13758     __ andw(as_Register($dst$$reg),
13759             as_Register($src1$$reg),
13760             as_Register($src2$$reg));
13761   %}
13762 
13763   ins_pipe(ialu_reg_reg);
13764 %}
13765 
13766 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13767   match(Set dst (AndI src1 src2));
13768 
13769   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13770 
13771   ins_cost(INSN_COST);
13772   ins_encode %{
13773     __ andw(as_Register($dst$$reg),
13774             as_Register($src1$$reg),
13775             (unsigned long)($src2$$constant));
13776   %}
13777 
13778   ins_pipe(ialu_reg_imm);
13779 %}
13780 
13781 // Or Instructions
13782 
13783 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13784   match(Set dst (OrI src1 src2));
13785 
13786   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13787 
13788   ins_cost(INSN_COST);
13789   ins_encode %{
13790     __ orrw(as_Register($dst$$reg),
13791             as_Register($src1$$reg),
13792             as_Register($src2$$reg));
13793   %}
13794 
13795   ins_pipe(ialu_reg_reg);
13796 %}
13797 
13798 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13799   match(Set dst (OrI src1 src2));
13800 
13801   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13802 
13803   ins_cost(INSN_COST);
13804   ins_encode %{
13805     __ orrw(as_Register($dst$$reg),
13806             as_Register($src1$$reg),
13807             (unsigned long)($src2$$constant));
13808   %}
13809 
13810   ins_pipe(ialu_reg_imm);
13811 %}
13812 
13813 // Xor Instructions
13814 
13815 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13816   match(Set dst (XorI src1 src2));
13817 
13818   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13819 
13820   ins_cost(INSN_COST);
13821   ins_encode %{
13822     __ eorw(as_Register($dst$$reg),
13823             as_Register($src1$$reg),
13824             as_Register($src2$$reg));
13825   %}
13826 
13827   ins_pipe(ialu_reg_reg);
13828 %}
13829 
13830 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13831   match(Set dst (XorI src1 src2));
13832 
13833   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13834 
13835   ins_cost(INSN_COST);
13836   ins_encode %{
13837     __ eorw(as_Register($dst$$reg),
13838             as_Register($src1$$reg),
13839             (unsigned long)($src2$$constant));
13840   %}
13841 
13842   ins_pipe(ialu_reg_imm);
13843 %}
13844 
13845 // Long Logical Instructions
13846 // TODO
13847 
13848 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13849   match(Set dst (AndL src1 src2));
13850 
13851   format %{ "and  $dst, $src1, $src2\t# int" %}
13852 
13853   ins_cost(INSN_COST);
13854   ins_encode %{
13855     __ andr(as_Register($dst$$reg),
13856             as_Register($src1$$reg),
13857             as_Register($src2$$reg));
13858   %}
13859 
13860   ins_pipe(ialu_reg_reg);
13861 %}
13862 
13863 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13864   match(Set dst (AndL src1 src2));
13865 
13866   format %{ "and  $dst, $src1, $src2\t# int" %}
13867 
13868   ins_cost(INSN_COST);
13869   ins_encode %{
13870     __ andr(as_Register($dst$$reg),
13871             as_Register($src1$$reg),
13872             (unsigned long)($src2$$constant));
13873   %}
13874 
13875   ins_pipe(ialu_reg_imm);
13876 %}
13877 
13878 // Or Instructions
13879 
13880 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13881   match(Set dst (OrL src1 src2));
13882 
13883   format %{ "orr  $dst, $src1, $src2\t# int" %}
13884 
13885   ins_cost(INSN_COST);
13886   ins_encode %{
13887     __ orr(as_Register($dst$$reg),
13888            as_Register($src1$$reg),
13889            as_Register($src2$$reg));
13890   %}
13891 
13892   ins_pipe(ialu_reg_reg);
13893 %}
13894 
13895 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13896   match(Set dst (OrL src1 src2));
13897 
13898   format %{ "orr  $dst, $src1, $src2\t# int" %}
13899 
13900   ins_cost(INSN_COST);
13901   ins_encode %{
13902     __ orr(as_Register($dst$$reg),
13903            as_Register($src1$$reg),
13904            (unsigned long)($src2$$constant));
13905   %}
13906 
13907   ins_pipe(ialu_reg_imm);
13908 %}
13909 
13910 // Xor Instructions
13911 
13912 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13913   match(Set dst (XorL src1 src2));
13914 
13915   format %{ "eor  $dst, $src1, $src2\t# int" %}
13916 
13917   ins_cost(INSN_COST);
13918   ins_encode %{
13919     __ eor(as_Register($dst$$reg),
13920            as_Register($src1$$reg),
13921            as_Register($src2$$reg));
13922   %}
13923 
13924   ins_pipe(ialu_reg_reg);
13925 %}
13926 
13927 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13928   match(Set dst (XorL src1 src2));
13929 
13930   ins_cost(INSN_COST);
13931   format %{ "eor  $dst, $src1, $src2\t# int" %}
13932 
13933   ins_encode %{
13934     __ eor(as_Register($dst$$reg),
13935            as_Register($src1$$reg),
13936            (unsigned long)($src2$$constant));
13937   %}
13938 
13939   ins_pipe(ialu_reg_imm);
13940 %}
13941 
13942 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13943 %{
13944   match(Set dst (ConvI2L src));
13945 
13946   ins_cost(INSN_COST);
13947   format %{ "sxtw  $dst, $src\t# i2l" %}
13948   ins_encode %{
13949     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13950   %}
13951   ins_pipe(ialu_reg_shift);
13952 %}
13953 
13954 // this pattern occurs in bigmath arithmetic
13955 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13956 %{
13957   match(Set dst (AndL (ConvI2L src) mask));
13958 
13959   ins_cost(INSN_COST);
13960   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13961   ins_encode %{
13962     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13963   %}
13964 
13965   ins_pipe(ialu_reg_shift);
13966 %}
13967 
13968 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13969   match(Set dst (ConvL2I src));
13970 
13971   ins_cost(INSN_COST);
13972   format %{ "movw  $dst, $src \t// l2i" %}
13973 
13974   ins_encode %{
13975     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13976   %}
13977 
13978   ins_pipe(ialu_reg);
13979 %}
13980 
13981 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13982 %{
13983   match(Set dst (Conv2B src));
13984   effect(KILL cr);
13985 
13986   format %{
13987     "cmpw $src, zr\n\t"
13988     "cset $dst, ne"
13989   %}
13990 
13991   ins_encode %{
13992     __ cmpw(as_Register($src$$reg), zr);
13993     __ cset(as_Register($dst$$reg), Assembler::NE);
13994   %}
13995 
13996   ins_pipe(ialu_reg);
13997 %}
13998 
13999 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14000 %{
14001   match(Set dst (Conv2B src));
14002   effect(KILL cr);
14003 
14004   format %{
14005     "cmp  $src, zr\n\t"
14006     "cset $dst, ne"
14007   %}
14008 
14009   ins_encode %{
14010     __ cmp(as_Register($src$$reg), zr);
14011     __ cset(as_Register($dst$$reg), Assembler::NE);
14012   %}
14013 
14014   ins_pipe(ialu_reg);
14015 %}
14016 
14017 instruct convD2F_reg(vRegF dst, vRegD src) %{
14018   match(Set dst (ConvD2F src));
14019 
14020   ins_cost(INSN_COST * 5);
14021   format %{ "fcvtd  $dst, $src \t// d2f" %}
14022 
14023   ins_encode %{
14024     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14025   %}
14026 
14027   ins_pipe(fp_d2f);
14028 %}
14029 
14030 instruct convF2D_reg(vRegD dst, vRegF src) %{
14031   match(Set dst (ConvF2D src));
14032 
14033   ins_cost(INSN_COST * 5);
14034   format %{ "fcvts  $dst, $src \t// f2d" %}
14035 
14036   ins_encode %{
14037     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14038   %}
14039 
14040   ins_pipe(fp_f2d);
14041 %}
14042 
14043 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14044   match(Set dst (ConvF2I src));
14045 
14046   ins_cost(INSN_COST * 5);
14047   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14048 
14049   ins_encode %{
14050     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14051   %}
14052 
14053   ins_pipe(fp_f2i);
14054 %}
14055 
14056 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14057   match(Set dst (ConvF2L src));
14058 
14059   ins_cost(INSN_COST * 5);
14060   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14061 
14062   ins_encode %{
14063     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14064   %}
14065 
14066   ins_pipe(fp_f2l);
14067 %}
14068 
14069 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14070   match(Set dst (ConvI2F src));
14071 
14072   ins_cost(INSN_COST * 5);
14073   format %{ "scvtfws  $dst, $src \t// i2f" %}
14074 
14075   ins_encode %{
14076     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14077   %}
14078 
14079   ins_pipe(fp_i2f);
14080 %}
14081 
14082 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14083   match(Set dst (ConvL2F src));
14084 
14085   ins_cost(INSN_COST * 5);
14086   format %{ "scvtfs  $dst, $src \t// l2f" %}
14087 
14088   ins_encode %{
14089     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14090   %}
14091 
14092   ins_pipe(fp_l2f);
14093 %}
14094 
14095 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14096   match(Set dst (ConvD2I src));
14097 
14098   ins_cost(INSN_COST * 5);
14099   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14100 
14101   ins_encode %{
14102     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14103   %}
14104 
14105   ins_pipe(fp_d2i);
14106 %}
14107 
14108 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14109   match(Set dst (ConvD2L src));
14110 
14111   ins_cost(INSN_COST * 5);
14112   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14113 
14114   ins_encode %{
14115     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14116   %}
14117 
14118   ins_pipe(fp_d2l);
14119 %}
14120 
14121 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14122   match(Set dst (ConvI2D src));
14123 
14124   ins_cost(INSN_COST * 5);
14125   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14126 
14127   ins_encode %{
14128     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14129   %}
14130 
14131   ins_pipe(fp_i2d);
14132 %}
14133 
14134 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14135   match(Set dst (ConvL2D src));
14136 
14137   ins_cost(INSN_COST * 5);
14138   format %{ "scvtfd  $dst, $src \t// l2d" %}
14139 
14140   ins_encode %{
14141     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14142   %}
14143 
14144   ins_pipe(fp_l2d);
14145 %}
14146 
14147 // stack <-> reg and reg <-> reg shuffles with no conversion
14148 
14149 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14150 
14151   match(Set dst (MoveF2I src));
14152 
14153   effect(DEF dst, USE src);
14154 
14155   ins_cost(4 * INSN_COST);
14156 
14157   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14158 
14159   ins_encode %{
14160     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14161   %}
14162 
14163   ins_pipe(iload_reg_reg);
14164 
14165 %}
14166 
14167 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14168 
14169   match(Set dst (MoveI2F src));
14170 
14171   effect(DEF dst, USE src);
14172 
14173   ins_cost(4 * INSN_COST);
14174 
14175   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14176 
14177   ins_encode %{
14178     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14179   %}
14180 
14181   ins_pipe(pipe_class_memory);
14182 
14183 %}
14184 
14185 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14186 
14187   match(Set dst (MoveD2L src));
14188 
14189   effect(DEF dst, USE src);
14190 
14191   ins_cost(4 * INSN_COST);
14192 
14193   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14194 
14195   ins_encode %{
14196     __ ldr($dst$$Register, Address(sp, $src$$disp));
14197   %}
14198 
14199   ins_pipe(iload_reg_reg);
14200 
14201 %}
14202 
14203 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14204 
14205   match(Set dst (MoveL2D src));
14206 
14207   effect(DEF dst, USE src);
14208 
14209   ins_cost(4 * INSN_COST);
14210 
14211   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14212 
14213   ins_encode %{
14214     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14215   %}
14216 
14217   ins_pipe(pipe_class_memory);
14218 
14219 %}
14220 
14221 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14222 
14223   match(Set dst (MoveF2I src));
14224 
14225   effect(DEF dst, USE src);
14226 
14227   ins_cost(INSN_COST);
14228 
14229   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14230 
14231   ins_encode %{
14232     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14233   %}
14234 
14235   ins_pipe(pipe_class_memory);
14236 
14237 %}
14238 
14239 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14240 
14241   match(Set dst (MoveI2F src));
14242 
14243   effect(DEF dst, USE src);
14244 
14245   ins_cost(INSN_COST);
14246 
14247   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14248 
14249   ins_encode %{
14250     __ strw($src$$Register, Address(sp, $dst$$disp));
14251   %}
14252 
14253   ins_pipe(istore_reg_reg);
14254 
14255 %}
14256 
14257 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14258 
14259   match(Set dst (MoveD2L src));
14260 
14261   effect(DEF dst, USE src);
14262 
14263   ins_cost(INSN_COST);
14264 
14265   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14266 
14267   ins_encode %{
14268     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14269   %}
14270 
14271   ins_pipe(pipe_class_memory);
14272 
14273 %}
14274 
14275 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14276 
14277   match(Set dst (MoveL2D src));
14278 
14279   effect(DEF dst, USE src);
14280 
14281   ins_cost(INSN_COST);
14282 
14283   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14284 
14285   ins_encode %{
14286     __ str($src$$Register, Address(sp, $dst$$disp));
14287   %}
14288 
14289   ins_pipe(istore_reg_reg);
14290 
14291 %}
14292 
14293 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14294 
14295   match(Set dst (MoveF2I src));
14296 
14297   effect(DEF dst, USE src);
14298 
14299   ins_cost(INSN_COST);
14300 
14301   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14302 
14303   ins_encode %{
14304     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14305   %}
14306 
14307   ins_pipe(fp_f2i);
14308 
14309 %}
14310 
14311 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14312 
14313   match(Set dst (MoveI2F src));
14314 
14315   effect(DEF dst, USE src);
14316 
14317   ins_cost(INSN_COST);
14318 
14319   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14320 
14321   ins_encode %{
14322     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14323   %}
14324 
14325   ins_pipe(fp_i2f);
14326 
14327 %}
14328 
14329 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14330 
14331   match(Set dst (MoveD2L src));
14332 
14333   effect(DEF dst, USE src);
14334 
14335   ins_cost(INSN_COST);
14336 
14337   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14338 
14339   ins_encode %{
14340     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14341   %}
14342 
14343   ins_pipe(fp_d2l);
14344 
14345 %}
14346 
14347 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14348 
14349   match(Set dst (MoveL2D src));
14350 
14351   effect(DEF dst, USE src);
14352 
14353   ins_cost(INSN_COST);
14354 
14355   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14356 
14357   ins_encode %{
14358     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14359   %}
14360 
14361   ins_pipe(fp_l2d);
14362 
14363 %}
14364 
14365 // ============================================================================
14366 // clearing of an array
14367 
14368 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14369 %{
14370   match(Set dummy (ClearArray cnt base));
14371   effect(USE_KILL cnt, USE_KILL base);
14372 
14373   ins_cost(4 * INSN_COST);
14374   format %{ "ClearArray $cnt, $base" %}
14375 
14376   ins_encode %{
14377     __ zero_words($base$$Register, $cnt$$Register);
14378   %}
14379 
14380   ins_pipe(pipe_class_memory);
14381 %}
14382 
14383 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14384 %{
14385   predicate((u_int64_t)n->in(2)->get_long()
14386             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14387   match(Set dummy (ClearArray cnt base));
14388   effect(USE_KILL base);
14389 
14390   ins_cost(4 * INSN_COST);
14391   format %{ "ClearArray $cnt, $base" %}
14392 
14393   ins_encode %{
14394     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14395   %}
14396 
14397   ins_pipe(pipe_class_memory);
14398 %}
14399 
14400 // ============================================================================
14401 // Overflow Math Instructions
14402 
14403 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14404 %{
14405   match(Set cr (OverflowAddI op1 op2));
14406 
14407   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14408   ins_cost(INSN_COST);
14409   ins_encode %{
14410     __ cmnw($op1$$Register, $op2$$Register);
14411   %}
14412 
14413   ins_pipe(icmp_reg_reg);
14414 %}
14415 
14416 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14417 %{
14418   match(Set cr (OverflowAddI op1 op2));
14419 
14420   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14421   ins_cost(INSN_COST);
14422   ins_encode %{
14423     __ cmnw($op1$$Register, $op2$$constant);
14424   %}
14425 
14426   ins_pipe(icmp_reg_imm);
14427 %}
14428 
14429 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14430 %{
14431   match(Set cr (OverflowAddL op1 op2));
14432 
14433   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14434   ins_cost(INSN_COST);
14435   ins_encode %{
14436     __ cmn($op1$$Register, $op2$$Register);
14437   %}
14438 
14439   ins_pipe(icmp_reg_reg);
14440 %}
14441 
14442 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14443 %{
14444   match(Set cr (OverflowAddL op1 op2));
14445 
14446   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14447   ins_cost(INSN_COST);
14448   ins_encode %{
14449     __ cmn($op1$$Register, $op2$$constant);
14450   %}
14451 
14452   ins_pipe(icmp_reg_imm);
14453 %}
14454 
14455 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14456 %{
14457   match(Set cr (OverflowSubI op1 op2));
14458 
14459   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14460   ins_cost(INSN_COST);
14461   ins_encode %{
14462     __ cmpw($op1$$Register, $op2$$Register);
14463   %}
14464 
14465   ins_pipe(icmp_reg_reg);
14466 %}
14467 
14468 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14469 %{
14470   match(Set cr (OverflowSubI op1 op2));
14471 
14472   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14473   ins_cost(INSN_COST);
14474   ins_encode %{
14475     __ cmpw($op1$$Register, $op2$$constant);
14476   %}
14477 
14478   ins_pipe(icmp_reg_imm);
14479 %}
14480 
14481 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14482 %{
14483   match(Set cr (OverflowSubL op1 op2));
14484 
14485   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14486   ins_cost(INSN_COST);
14487   ins_encode %{
14488     __ cmp($op1$$Register, $op2$$Register);
14489   %}
14490 
14491   ins_pipe(icmp_reg_reg);
14492 %}
14493 
14494 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14495 %{
14496   match(Set cr (OverflowSubL op1 op2));
14497 
14498   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14499   ins_cost(INSN_COST);
14500   ins_encode %{
14501     __ cmp($op1$$Register, $op2$$constant);
14502   %}
14503 
14504   ins_pipe(icmp_reg_imm);
14505 %}
14506 
14507 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14508 %{
14509   match(Set cr (OverflowSubI zero op1));
14510 
14511   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14512   ins_cost(INSN_COST);
14513   ins_encode %{
14514     __ cmpw(zr, $op1$$Register);
14515   %}
14516 
14517   ins_pipe(icmp_reg_imm);
14518 %}
14519 
14520 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14521 %{
14522   match(Set cr (OverflowSubL zero op1));
14523 
14524   format %{ "cmp   zr, $op1\t# overflow check long" %}
14525   ins_cost(INSN_COST);
14526   ins_encode %{
14527     __ cmp(zr, $op1$$Register);
14528   %}
14529 
14530   ins_pipe(icmp_reg_imm);
14531 %}
14532 
14533 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14534 %{
14535   match(Set cr (OverflowMulI op1 op2));
14536 
14537   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14538             "cmp   rscratch1, rscratch1, sxtw\n\t"
14539             "movw  rscratch1, #0x80000000\n\t"
14540             "cselw rscratch1, rscratch1, zr, NE\n\t"
14541             "cmpw  rscratch1, #1" %}
14542   ins_cost(5 * INSN_COST);
14543   ins_encode %{
14544     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14545     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14546     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14547     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14548     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14549   %}
14550 
14551   ins_pipe(pipe_slow);
14552 %}
14553 
14554 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14555 %{
14556   match(If cmp (OverflowMulI op1 op2));
14557   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14558             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14559   effect(USE labl, KILL cr);
14560 
14561   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14562             "cmp   rscratch1, rscratch1, sxtw\n\t"
14563             "b$cmp   $labl" %}
14564   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14565   ins_encode %{
14566     Label* L = $labl$$label;
14567     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14568     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14569     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14570     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14571   %}
14572 
14573   ins_pipe(pipe_serial);
14574 %}
14575 
14576 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14577 %{
14578   match(Set cr (OverflowMulL op1 op2));
14579 
14580   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14581             "smulh rscratch2, $op1, $op2\n\t"
14582             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14583             "movw  rscratch1, #0x80000000\n\t"
14584             "cselw rscratch1, rscratch1, zr, NE\n\t"
14585             "cmpw  rscratch1, #1" %}
14586   ins_cost(6 * INSN_COST);
14587   ins_encode %{
14588     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14589     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14590     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14591     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14592     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14593     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14594   %}
14595 
14596   ins_pipe(pipe_slow);
14597 %}
14598 
14599 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14600 %{
14601   match(If cmp (OverflowMulL op1 op2));
14602   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14603             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14604   effect(USE labl, KILL cr);
14605 
14606   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14607             "smulh rscratch2, $op1, $op2\n\t"
14608             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14609             "b$cmp $labl" %}
14610   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14611   ins_encode %{
14612     Label* L = $labl$$label;
14613     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14614     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14615     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14616     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14617     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14618   %}
14619 
14620   ins_pipe(pipe_serial);
14621 %}
14622 
14623 // ============================================================================
14624 // Compare Instructions
14625 
14626 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14627 %{
14628   match(Set cr (CmpI op1 op2));
14629 
14630   effect(DEF cr, USE op1, USE op2);
14631 
14632   ins_cost(INSN_COST);
14633   format %{ "cmpw  $op1, $op2" %}
14634 
14635   ins_encode(aarch64_enc_cmpw(op1, op2));
14636 
14637   ins_pipe(icmp_reg_reg);
14638 %}
14639 
14640 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14641 %{
14642   match(Set cr (CmpI op1 zero));
14643 
14644   effect(DEF cr, USE op1);
14645 
14646   ins_cost(INSN_COST);
14647   format %{ "cmpw $op1, 0" %}
14648 
14649   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14650 
14651   ins_pipe(icmp_reg_imm);
14652 %}
14653 
14654 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14655 %{
14656   match(Set cr (CmpI op1 op2));
14657 
14658   effect(DEF cr, USE op1);
14659 
14660   ins_cost(INSN_COST);
14661   format %{ "cmpw  $op1, $op2" %}
14662 
14663   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14664 
14665   ins_pipe(icmp_reg_imm);
14666 %}
14667 
14668 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14669 %{
14670   match(Set cr (CmpI op1 op2));
14671 
14672   effect(DEF cr, USE op1);
14673 
14674   ins_cost(INSN_COST * 2);
14675   format %{ "cmpw  $op1, $op2" %}
14676 
14677   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14678 
14679   ins_pipe(icmp_reg_imm);
14680 %}
14681 
14682 // Unsigned compare Instructions; really, same as signed compare
14683 // except it should only be used to feed an If or a CMovI which takes a
14684 // cmpOpU.
14685 
14686 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14687 %{
14688   match(Set cr (CmpU op1 op2));
14689 
14690   effect(DEF cr, USE op1, USE op2);
14691 
14692   ins_cost(INSN_COST);
14693   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14694 
14695   ins_encode(aarch64_enc_cmpw(op1, op2));
14696 
14697   ins_pipe(icmp_reg_reg);
14698 %}
14699 
14700 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14701 %{
14702   match(Set cr (CmpU op1 zero));
14703 
14704   effect(DEF cr, USE op1);
14705 
14706   ins_cost(INSN_COST);
14707   format %{ "cmpw $op1, #0\t# unsigned" %}
14708 
14709   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14710 
14711   ins_pipe(icmp_reg_imm);
14712 %}
14713 
14714 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14715 %{
14716   match(Set cr (CmpU op1 op2));
14717 
14718   effect(DEF cr, USE op1);
14719 
14720   ins_cost(INSN_COST);
14721   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14722 
14723   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14724 
14725   ins_pipe(icmp_reg_imm);
14726 %}
14727 
14728 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14729 %{
14730   match(Set cr (CmpU op1 op2));
14731 
14732   effect(DEF cr, USE op1);
14733 
14734   ins_cost(INSN_COST * 2);
14735   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14736 
14737   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14738 
14739   ins_pipe(icmp_reg_imm);
14740 %}
14741 
14742 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14743 %{
14744   match(Set cr (CmpL op1 op2));
14745 
14746   effect(DEF cr, USE op1, USE op2);
14747 
14748   ins_cost(INSN_COST);
14749   format %{ "cmp  $op1, $op2" %}
14750 
14751   ins_encode(aarch64_enc_cmp(op1, op2));
14752 
14753   ins_pipe(icmp_reg_reg);
14754 %}
14755 
14756 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14757 %{
14758   match(Set cr (CmpL op1 zero));
14759 
14760   effect(DEF cr, USE op1);
14761 
14762   ins_cost(INSN_COST);
14763   format %{ "tst  $op1" %}
14764 
14765   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14766 
14767   ins_pipe(icmp_reg_imm);
14768 %}
14769 
14770 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14771 %{
14772   match(Set cr (CmpL op1 op2));
14773 
14774   effect(DEF cr, USE op1);
14775 
14776   ins_cost(INSN_COST);
14777   format %{ "cmp  $op1, $op2" %}
14778 
14779   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14780 
14781   ins_pipe(icmp_reg_imm);
14782 %}
14783 
14784 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14785 %{
14786   match(Set cr (CmpL op1 op2));
14787 
14788   effect(DEF cr, USE op1);
14789 
14790   ins_cost(INSN_COST * 2);
14791   format %{ "cmp  $op1, $op2" %}
14792 
14793   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14794 
14795   ins_pipe(icmp_reg_imm);
14796 %}
14797 
14798 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14799 %{
14800   match(Set cr (CmpUL op1 op2));
14801 
14802   effect(DEF cr, USE op1, USE op2);
14803 
14804   ins_cost(INSN_COST);
14805   format %{ "cmp  $op1, $op2" %}
14806 
14807   ins_encode(aarch64_enc_cmp(op1, op2));
14808 
14809   ins_pipe(icmp_reg_reg);
14810 %}
14811 
14812 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14813 %{
14814   match(Set cr (CmpUL op1 zero));
14815 
14816   effect(DEF cr, USE op1);
14817 
14818   ins_cost(INSN_COST);
14819   format %{ "tst  $op1" %}
14820 
14821   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14822 
14823   ins_pipe(icmp_reg_imm);
14824 %}
14825 
14826 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14827 %{
14828   match(Set cr (CmpUL op1 op2));
14829 
14830   effect(DEF cr, USE op1);
14831 
14832   ins_cost(INSN_COST);
14833   format %{ "cmp  $op1, $op2" %}
14834 
14835   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14836 
14837   ins_pipe(icmp_reg_imm);
14838 %}
14839 
14840 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14841 %{
14842   match(Set cr (CmpUL op1 op2));
14843 
14844   effect(DEF cr, USE op1);
14845 
14846   ins_cost(INSN_COST * 2);
14847   format %{ "cmp  $op1, $op2" %}
14848 
14849   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14850 
14851   ins_pipe(icmp_reg_imm);
14852 %}
14853 
14854 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14855 %{
14856   match(Set cr (CmpP op1 op2));
14857 
14858   effect(DEF cr, USE op1, USE op2);
14859 
14860   ins_cost(INSN_COST);
14861   format %{ "cmp  $op1, $op2\t // ptr" %}
14862 
14863   ins_encode(aarch64_enc_cmpp(op1, op2));
14864 
14865   ins_pipe(icmp_reg_reg);
14866 %}
14867 
14868 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14869 %{
14870   match(Set cr (CmpN op1 op2));
14871 
14872   effect(DEF cr, USE op1, USE op2);
14873 
14874   ins_cost(INSN_COST);
14875   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14876 
14877   ins_encode(aarch64_enc_cmpn(op1, op2));
14878 
14879   ins_pipe(icmp_reg_reg);
14880 %}
14881 
14882 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14883 %{
14884   match(Set cr (CmpP op1 zero));
14885 
14886   effect(DEF cr, USE op1, USE zero);
14887 
14888   ins_cost(INSN_COST);
14889   format %{ "cmp  $op1, 0\t // ptr" %}
14890 
14891   ins_encode(aarch64_enc_testp(op1));
14892 
14893   ins_pipe(icmp_reg_imm);
14894 %}
14895 
14896 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14897 %{
14898   match(Set cr (CmpN op1 zero));
14899 
14900   effect(DEF cr, USE op1, USE zero);
14901 
14902   ins_cost(INSN_COST);
14903   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14904 
14905   ins_encode(aarch64_enc_testn(op1));
14906 
14907   ins_pipe(icmp_reg_imm);
14908 %}
14909 
14910 // FP comparisons
14911 //
14912 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14913 // using normal cmpOp. See declaration of rFlagsReg for details.
14914 
14915 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14916 %{
14917   match(Set cr (CmpF src1 src2));
14918 
14919   ins_cost(3 * INSN_COST);
14920   format %{ "fcmps $src1, $src2" %}
14921 
14922   ins_encode %{
14923     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14924   %}
14925 
14926   ins_pipe(pipe_class_compare);
14927 %}
14928 
14929 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14930 %{
14931   match(Set cr (CmpF src1 src2));
14932 
14933   ins_cost(3 * INSN_COST);
14934   format %{ "fcmps $src1, 0.0" %}
14935 
14936   ins_encode %{
14937     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14938   %}
14939 
14940   ins_pipe(pipe_class_compare);
14941 %}
14942 // FROM HERE
14943 
14944 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14945 %{
14946   match(Set cr (CmpD src1 src2));
14947 
14948   ins_cost(3 * INSN_COST);
14949   format %{ "fcmpd $src1, $src2" %}
14950 
14951   ins_encode %{
14952     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14953   %}
14954 
14955   ins_pipe(pipe_class_compare);
14956 %}
14957 
14958 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14959 %{
14960   match(Set cr (CmpD src1 src2));
14961 
14962   ins_cost(3 * INSN_COST);
14963   format %{ "fcmpd $src1, 0.0" %}
14964 
14965   ins_encode %{
14966     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14967   %}
14968 
14969   ins_pipe(pipe_class_compare);
14970 %}
14971 
14972 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14973 %{
14974   match(Set dst (CmpF3 src1 src2));
14975   effect(KILL cr);
14976 
14977   ins_cost(5 * INSN_COST);
14978   format %{ "fcmps $src1, $src2\n\t"
14979             "csinvw($dst, zr, zr, eq\n\t"
14980             "csnegw($dst, $dst, $dst, lt)"
14981   %}
14982 
14983   ins_encode %{
14984     Label done;
14985     FloatRegister s1 = as_FloatRegister($src1$$reg);
14986     FloatRegister s2 = as_FloatRegister($src2$$reg);
14987     Register d = as_Register($dst$$reg);
14988     __ fcmps(s1, s2);
14989     // installs 0 if EQ else -1
14990     __ csinvw(d, zr, zr, Assembler::EQ);
14991     // keeps -1 if less or unordered else installs 1
14992     __ csnegw(d, d, d, Assembler::LT);
14993     __ bind(done);
14994   %}
14995 
14996   ins_pipe(pipe_class_default);
14997 
14998 %}
14999 
15000 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15001 %{
15002   match(Set dst (CmpD3 src1 src2));
15003   effect(KILL cr);
15004 
15005   ins_cost(5 * INSN_COST);
15006   format %{ "fcmpd $src1, $src2\n\t"
15007             "csinvw($dst, zr, zr, eq\n\t"
15008             "csnegw($dst, $dst, $dst, lt)"
15009   %}
15010 
15011   ins_encode %{
15012     Label done;
15013     FloatRegister s1 = as_FloatRegister($src1$$reg);
15014     FloatRegister s2 = as_FloatRegister($src2$$reg);
15015     Register d = as_Register($dst$$reg);
15016     __ fcmpd(s1, s2);
15017     // installs 0 if EQ else -1
15018     __ csinvw(d, zr, zr, Assembler::EQ);
15019     // keeps -1 if less or unordered else installs 1
15020     __ csnegw(d, d, d, Assembler::LT);
15021     __ bind(done);
15022   %}
15023   ins_pipe(pipe_class_default);
15024 
15025 %}
15026 
15027 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15028 %{
15029   match(Set dst (CmpF3 src1 zero));
15030   effect(KILL cr);
15031 
15032   ins_cost(5 * INSN_COST);
15033   format %{ "fcmps $src1, 0.0\n\t"
15034             "csinvw($dst, zr, zr, eq\n\t"
15035             "csnegw($dst, $dst, $dst, lt)"
15036   %}
15037 
15038   ins_encode %{
15039     Label done;
15040     FloatRegister s1 = as_FloatRegister($src1$$reg);
15041     Register d = as_Register($dst$$reg);
15042     __ fcmps(s1, 0.0D);
15043     // installs 0 if EQ else -1
15044     __ csinvw(d, zr, zr, Assembler::EQ);
15045     // keeps -1 if less or unordered else installs 1
15046     __ csnegw(d, d, d, Assembler::LT);
15047     __ bind(done);
15048   %}
15049 
15050   ins_pipe(pipe_class_default);
15051 
15052 %}
15053 
15054 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15055 %{
15056   match(Set dst (CmpD3 src1 zero));
15057   effect(KILL cr);
15058 
15059   ins_cost(5 * INSN_COST);
15060   format %{ "fcmpd $src1, 0.0\n\t"
15061             "csinvw($dst, zr, zr, eq\n\t"
15062             "csnegw($dst, $dst, $dst, lt)"
15063   %}
15064 
15065   ins_encode %{
15066     Label done;
15067     FloatRegister s1 = as_FloatRegister($src1$$reg);
15068     Register d = as_Register($dst$$reg);
15069     __ fcmpd(s1, 0.0D);
15070     // installs 0 if EQ else -1
15071     __ csinvw(d, zr, zr, Assembler::EQ);
15072     // keeps -1 if less or unordered else installs 1
15073     __ csnegw(d, d, d, Assembler::LT);
15074     __ bind(done);
15075   %}
15076   ins_pipe(pipe_class_default);
15077 
15078 %}
15079 
15080 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15081 %{
15082   match(Set dst (CmpLTMask p q));
15083   effect(KILL cr);
15084 
15085   ins_cost(3 * INSN_COST);
15086 
15087   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15088             "csetw $dst, lt\n\t"
15089             "subw $dst, zr, $dst"
15090   %}
15091 
15092   ins_encode %{
15093     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15094     __ csetw(as_Register($dst$$reg), Assembler::LT);
15095     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15096   %}
15097 
15098   ins_pipe(ialu_reg_reg);
15099 %}
15100 
15101 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15102 %{
15103   match(Set dst (CmpLTMask src zero));
15104   effect(KILL cr);
15105 
15106   ins_cost(INSN_COST);
15107 
15108   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15109 
15110   ins_encode %{
15111     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15112   %}
15113 
15114   ins_pipe(ialu_reg_shift);
15115 %}
15116 
15117 // ============================================================================
15118 // Max and Min
15119 
15120 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15121 %{
15122   match(Set dst (MinI src1 src2));
15123 
15124   effect(DEF dst, USE src1, USE src2, KILL cr);
15125   size(8);
15126 
15127   ins_cost(INSN_COST * 3);
15128   format %{
15129     "cmpw $src1 $src2\t signed int\n\t"
15130     "cselw $dst, $src1, $src2 lt\t"
15131   %}
15132 
15133   ins_encode %{
15134     __ cmpw(as_Register($src1$$reg),
15135             as_Register($src2$$reg));
15136     __ cselw(as_Register($dst$$reg),
15137              as_Register($src1$$reg),
15138              as_Register($src2$$reg),
15139              Assembler::LT);
15140   %}
15141 
15142   ins_pipe(ialu_reg_reg);
15143 %}
15144 // FROM HERE
15145 
15146 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15147 %{
15148   match(Set dst (MaxI src1 src2));
15149 
15150   effect(DEF dst, USE src1, USE src2, KILL cr);
15151   size(8);
15152 
15153   ins_cost(INSN_COST * 3);
15154   format %{
15155     "cmpw $src1 $src2\t signed int\n\t"
15156     "cselw $dst, $src1, $src2 gt\t"
15157   %}
15158 
15159   ins_encode %{
15160     __ cmpw(as_Register($src1$$reg),
15161             as_Register($src2$$reg));
15162     __ cselw(as_Register($dst$$reg),
15163              as_Register($src1$$reg),
15164              as_Register($src2$$reg),
15165              Assembler::GT);
15166   %}
15167 
15168   ins_pipe(ialu_reg_reg);
15169 %}
15170 
15171 // ============================================================================
15172 // Branch Instructions
15173 
15174 // Direct Branch.
15175 instruct branch(label lbl)
15176 %{
15177   match(Goto);
15178 
15179   effect(USE lbl);
15180 
15181   ins_cost(BRANCH_COST);
15182   format %{ "b  $lbl" %}
15183 
15184   ins_encode(aarch64_enc_b(lbl));
15185 
15186   ins_pipe(pipe_branch);
15187 %}
15188 
15189 // Conditional Near Branch
15190 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15191 %{
15192   // Same match rule as `branchConFar'.
15193   match(If cmp cr);
15194 
15195   effect(USE lbl);
15196 
15197   ins_cost(BRANCH_COST);
15198   // If set to 1 this indicates that the current instruction is a
15199   // short variant of a long branch. This avoids using this
15200   // instruction in first-pass matching. It will then only be used in
15201   // the `Shorten_branches' pass.
15202   // ins_short_branch(1);
15203   format %{ "b$cmp  $lbl" %}
15204 
15205   ins_encode(aarch64_enc_br_con(cmp, lbl));
15206 
15207   ins_pipe(pipe_branch_cond);
15208 %}
15209 
15210 // Conditional Near Branch Unsigned
15211 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15212 %{
15213   // Same match rule as `branchConFar'.
15214   match(If cmp cr);
15215 
15216   effect(USE lbl);
15217 
15218   ins_cost(BRANCH_COST);
15219   // If set to 1 this indicates that the current instruction is a
15220   // short variant of a long branch. This avoids using this
15221   // instruction in first-pass matching. It will then only be used in
15222   // the `Shorten_branches' pass.
15223   // ins_short_branch(1);
15224   format %{ "b$cmp  $lbl\t# unsigned" %}
15225 
15226   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15227 
15228   ins_pipe(pipe_branch_cond);
15229 %}
15230 
15231 // Make use of CBZ and CBNZ.  These instructions, as well as being
15232 // shorter than (cmp; branch), have the additional benefit of not
15233 // killing the flags.
15234 
15235 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15236   match(If cmp (CmpI op1 op2));
15237   effect(USE labl);
15238 
15239   ins_cost(BRANCH_COST);
15240   format %{ "cbw$cmp   $op1, $labl" %}
15241   ins_encode %{
15242     Label* L = $labl$$label;
15243     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15244     if (cond == Assembler::EQ)
15245       __ cbzw($op1$$Register, *L);
15246     else
15247       __ cbnzw($op1$$Register, *L);
15248   %}
15249   ins_pipe(pipe_cmp_branch);
15250 %}
15251 
15252 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15253   match(If cmp (CmpL op1 op2));
15254   effect(USE labl);
15255 
15256   ins_cost(BRANCH_COST);
15257   format %{ "cb$cmp   $op1, $labl" %}
15258   ins_encode %{
15259     Label* L = $labl$$label;
15260     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15261     if (cond == Assembler::EQ)
15262       __ cbz($op1$$Register, *L);
15263     else
15264       __ cbnz($op1$$Register, *L);
15265   %}
15266   ins_pipe(pipe_cmp_branch);
15267 %}
15268 
15269 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15270   match(If cmp (CmpP op1 op2));
15271   effect(USE labl);
15272 
15273   ins_cost(BRANCH_COST);
15274   format %{ "cb$cmp   $op1, $labl" %}
15275   ins_encode %{
15276     Label* L = $labl$$label;
15277     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15278     if (cond == Assembler::EQ)
15279       __ cbz($op1$$Register, *L);
15280     else
15281       __ cbnz($op1$$Register, *L);
15282   %}
15283   ins_pipe(pipe_cmp_branch);
15284 %}
15285 
15286 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15287   match(If cmp (CmpN op1 op2));
15288   effect(USE labl);
15289 
15290   ins_cost(BRANCH_COST);
15291   format %{ "cbw$cmp   $op1, $labl" %}
15292   ins_encode %{
15293     Label* L = $labl$$label;
15294     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15295     if (cond == Assembler::EQ)
15296       __ cbzw($op1$$Register, *L);
15297     else
15298       __ cbnzw($op1$$Register, *L);
15299   %}
15300   ins_pipe(pipe_cmp_branch);
15301 %}
15302 
15303 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15304   match(If cmp (CmpP (DecodeN oop) zero));
15305   effect(USE labl);
15306 
15307   ins_cost(BRANCH_COST);
15308   format %{ "cb$cmp   $oop, $labl" %}
15309   ins_encode %{
15310     Label* L = $labl$$label;
15311     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15312     if (cond == Assembler::EQ)
15313       __ cbzw($oop$$Register, *L);
15314     else
15315       __ cbnzw($oop$$Register, *L);
15316   %}
15317   ins_pipe(pipe_cmp_branch);
15318 %}
15319 
15320 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15321   match(If cmp (CmpU op1 op2));
15322   effect(USE labl);
15323 
15324   ins_cost(BRANCH_COST);
15325   format %{ "cbw$cmp   $op1, $labl" %}
15326   ins_encode %{
15327     Label* L = $labl$$label;
15328     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15329     if (cond == Assembler::EQ || cond == Assembler::LS)
15330       __ cbzw($op1$$Register, *L);
15331     else
15332       __ cbnzw($op1$$Register, *L);
15333   %}
15334   ins_pipe(pipe_cmp_branch);
15335 %}
15336 
15337 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15338   match(If cmp (CmpUL op1 op2));
15339   effect(USE labl);
15340 
15341   ins_cost(BRANCH_COST);
15342   format %{ "cb$cmp   $op1, $labl" %}
15343   ins_encode %{
15344     Label* L = $labl$$label;
15345     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15346     if (cond == Assembler::EQ || cond == Assembler::LS)
15347       __ cbz($op1$$Register, *L);
15348     else
15349       __ cbnz($op1$$Register, *L);
15350   %}
15351   ins_pipe(pipe_cmp_branch);
15352 %}
15353 
15354 // Test bit and Branch
15355 
15356 // Patterns for short (< 32KiB) variants
15357 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15358   match(If cmp (CmpL op1 op2));
15359   effect(USE labl);
15360 
15361   ins_cost(BRANCH_COST);
15362   format %{ "cb$cmp   $op1, $labl # long" %}
15363   ins_encode %{
15364     Label* L = $labl$$label;
15365     Assembler::Condition cond =
15366       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15367     __ tbr(cond, $op1$$Register, 63, *L);
15368   %}
15369   ins_pipe(pipe_cmp_branch);
15370   ins_short_branch(1);
15371 %}
15372 
15373 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15374   match(If cmp (CmpI op1 op2));
15375   effect(USE labl);
15376 
15377   ins_cost(BRANCH_COST);
15378   format %{ "cb$cmp   $op1, $labl # int" %}
15379   ins_encode %{
15380     Label* L = $labl$$label;
15381     Assembler::Condition cond =
15382       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15383     __ tbr(cond, $op1$$Register, 31, *L);
15384   %}
15385   ins_pipe(pipe_cmp_branch);
15386   ins_short_branch(1);
15387 %}
15388 
15389 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15390   match(If cmp (CmpL (AndL op1 op2) op3));
15391   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15392   effect(USE labl);
15393 
15394   ins_cost(BRANCH_COST);
15395   format %{ "tb$cmp   $op1, $op2, $labl" %}
15396   ins_encode %{
15397     Label* L = $labl$$label;
15398     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15399     int bit = exact_log2($op2$$constant);
15400     __ tbr(cond, $op1$$Register, bit, *L);
15401   %}
15402   ins_pipe(pipe_cmp_branch);
15403   ins_short_branch(1);
15404 %}
15405 
15406 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15407   match(If cmp (CmpI (AndI op1 op2) op3));
15408   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15409   effect(USE labl);
15410 
15411   ins_cost(BRANCH_COST);
15412   format %{ "tb$cmp   $op1, $op2, $labl" %}
15413   ins_encode %{
15414     Label* L = $labl$$label;
15415     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15416     int bit = exact_log2($op2$$constant);
15417     __ tbr(cond, $op1$$Register, bit, *L);
15418   %}
15419   ins_pipe(pipe_cmp_branch);
15420   ins_short_branch(1);
15421 %}
15422 
15423 // And far variants
15424 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15425   match(If cmp (CmpL op1 op2));
15426   effect(USE labl);
15427 
15428   ins_cost(BRANCH_COST);
15429   format %{ "cb$cmp   $op1, $labl # long" %}
15430   ins_encode %{
15431     Label* L = $labl$$label;
15432     Assembler::Condition cond =
15433       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15434     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15435   %}
15436   ins_pipe(pipe_cmp_branch);
15437 %}
15438 
15439 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15440   match(If cmp (CmpI op1 op2));
15441   effect(USE labl);
15442 
15443   ins_cost(BRANCH_COST);
15444   format %{ "cb$cmp   $op1, $labl # int" %}
15445   ins_encode %{
15446     Label* L = $labl$$label;
15447     Assembler::Condition cond =
15448       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15449     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15450   %}
15451   ins_pipe(pipe_cmp_branch);
15452 %}
15453 
15454 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15455   match(If cmp (CmpL (AndL op1 op2) op3));
15456   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15457   effect(USE labl);
15458 
15459   ins_cost(BRANCH_COST);
15460   format %{ "tb$cmp   $op1, $op2, $labl" %}
15461   ins_encode %{
15462     Label* L = $labl$$label;
15463     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15464     int bit = exact_log2($op2$$constant);
15465     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15466   %}
15467   ins_pipe(pipe_cmp_branch);
15468 %}
15469 
15470 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15471   match(If cmp (CmpI (AndI op1 op2) op3));
15472   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15473   effect(USE labl);
15474 
15475   ins_cost(BRANCH_COST);
15476   format %{ "tb$cmp   $op1, $op2, $labl" %}
15477   ins_encode %{
15478     Label* L = $labl$$label;
15479     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15480     int bit = exact_log2($op2$$constant);
15481     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15482   %}
15483   ins_pipe(pipe_cmp_branch);
15484 %}
15485 
15486 // Test bits
15487 
15488 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15489   match(Set cr (CmpL (AndL op1 op2) op3));
15490   predicate(Assembler::operand_valid_for_logical_immediate
15491             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15492 
15493   ins_cost(INSN_COST);
15494   format %{ "tst $op1, $op2 # long" %}
15495   ins_encode %{
15496     __ tst($op1$$Register, $op2$$constant);
15497   %}
15498   ins_pipe(ialu_reg_reg);
15499 %}
15500 
15501 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15502   match(Set cr (CmpI (AndI op1 op2) op3));
15503   predicate(Assembler::operand_valid_for_logical_immediate
15504             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15505 
15506   ins_cost(INSN_COST);
15507   format %{ "tst $op1, $op2 # int" %}
15508   ins_encode %{
15509     __ tstw($op1$$Register, $op2$$constant);
15510   %}
15511   ins_pipe(ialu_reg_reg);
15512 %}
15513 
15514 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15515   match(Set cr (CmpL (AndL op1 op2) op3));
15516 
15517   ins_cost(INSN_COST);
15518   format %{ "tst $op1, $op2 # long" %}
15519   ins_encode %{
15520     __ tst($op1$$Register, $op2$$Register);
15521   %}
15522   ins_pipe(ialu_reg_reg);
15523 %}
15524 
15525 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15526   match(Set cr (CmpI (AndI op1 op2) op3));
15527 
15528   ins_cost(INSN_COST);
15529   format %{ "tstw $op1, $op2 # int" %}
15530   ins_encode %{
15531     __ tstw($op1$$Register, $op2$$Register);
15532   %}
15533   ins_pipe(ialu_reg_reg);
15534 %}
15535 
15536 
15537 // Conditional Far Branch
15538 // Conditional Far Branch Unsigned
15539 // TODO: fixme
15540 
15541 // counted loop end branch near
15542 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15543 %{
15544   match(CountedLoopEnd cmp cr);
15545 
15546   effect(USE lbl);
15547 
15548   ins_cost(BRANCH_COST);
15549   // short variant.
15550   // ins_short_branch(1);
15551   format %{ "b$cmp $lbl \t// counted loop end" %}
15552 
15553   ins_encode(aarch64_enc_br_con(cmp, lbl));
15554 
15555   ins_pipe(pipe_branch);
15556 %}
15557 
15558 // counted loop end branch near Unsigned
15559 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15560 %{
15561   match(CountedLoopEnd cmp cr);
15562 
15563   effect(USE lbl);
15564 
15565   ins_cost(BRANCH_COST);
15566   // short variant.
15567   // ins_short_branch(1);
15568   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15569 
15570   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15571 
15572   ins_pipe(pipe_branch);
15573 %}
15574 
15575 // counted loop end branch far
15576 // counted loop end branch far unsigned
15577 // TODO: fixme
15578 
15579 // ============================================================================
15580 // inlined locking and unlocking
15581 
15582 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15583 %{
15584   match(Set cr (FastLock object box));
15585   effect(TEMP tmp, TEMP tmp2);
15586 
15587   // TODO
15588   // identify correct cost
15589   ins_cost(5 * INSN_COST);
15590   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15591 
15592   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15593 
15594   ins_pipe(pipe_serial);
15595 %}
15596 
15597 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15598 %{
15599   match(Set cr (FastUnlock object box));
15600   effect(TEMP tmp, TEMP tmp2);
15601 
15602   ins_cost(5 * INSN_COST);
15603   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15604 
15605   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15606 
15607   ins_pipe(pipe_serial);
15608 %}
15609 
15610 
15611 // ============================================================================
15612 // Safepoint Instructions
15613 
15614 // TODO
15615 // provide a near and far version of this code
15616 
15617 instruct safePoint(iRegP poll)
15618 %{
15619   match(SafePoint poll);
15620 
15621   format %{
15622     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15623   %}
15624   ins_encode %{
15625     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15626   %}
15627   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15628 %}
15629 
15630 
15631 // ============================================================================
15632 // Procedure Call/Return Instructions
15633 
15634 // Call Java Static Instruction
15635 
15636 instruct CallStaticJavaDirect(method meth)
15637 %{
15638   match(CallStaticJava);
15639 
15640   effect(USE meth);
15641 
15642   ins_cost(CALL_COST);
15643 
15644   format %{ "call,static $meth \t// ==> " %}
15645 
15646   ins_encode( aarch64_enc_java_static_call(meth),
15647               aarch64_enc_call_epilog );
15648 
15649   ins_pipe(pipe_class_call);
15650 %}
15651 
15652 // TO HERE
15653 
15654 // Call Java Dynamic Instruction
15655 instruct CallDynamicJavaDirect(method meth)
15656 %{
15657   match(CallDynamicJava);
15658 
15659   effect(USE meth);
15660 
15661   ins_cost(CALL_COST);
15662 
15663   format %{ "CALL,dynamic $meth \t// ==> " %}
15664 
15665   ins_encode( aarch64_enc_java_dynamic_call(meth),
15666                aarch64_enc_call_epilog );
15667 
15668   ins_pipe(pipe_class_call);
15669 %}
15670 
15671 // Call Runtime Instruction
15672 
15673 instruct CallRuntimeDirect(method meth)
15674 %{
15675   match(CallRuntime);
15676 
15677   effect(USE meth);
15678 
15679   ins_cost(CALL_COST);
15680 
15681   format %{ "CALL, runtime $meth" %}
15682 
15683   ins_encode( aarch64_enc_java_to_runtime(meth) );
15684 
15685   ins_pipe(pipe_class_call);
15686 %}
15687 
15688 // Call Runtime Instruction
15689 
15690 instruct CallLeafDirect(method meth)
15691 %{
15692   match(CallLeaf);
15693 
15694   effect(USE meth);
15695 
15696   ins_cost(CALL_COST);
15697 
15698   format %{ "CALL, runtime leaf $meth" %}
15699 
15700   ins_encode( aarch64_enc_java_to_runtime(meth) );
15701 
15702   ins_pipe(pipe_class_call);
15703 %}
15704 
15705 // Call Runtime Instruction
15706 
15707 instruct CallLeafNoFPDirect(method meth)
15708 %{
15709   match(CallLeafNoFP);
15710 
15711   effect(USE meth);
15712 
15713   ins_cost(CALL_COST);
15714 
15715   format %{ "CALL, runtime leaf nofp $meth" %}
15716 
15717   ins_encode( aarch64_enc_java_to_runtime(meth) );
15718 
15719   ins_pipe(pipe_class_call);
15720 %}
15721 
15722 // Tail Call; Jump from runtime stub to Java code.
15723 // Also known as an 'interprocedural jump'.
15724 // Target of jump will eventually return to caller.
15725 // TailJump below removes the return address.
15726 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15727 %{
15728   match(TailCall jump_target method_oop);
15729 
15730   ins_cost(CALL_COST);
15731 
15732   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15733 
15734   ins_encode(aarch64_enc_tail_call(jump_target));
15735 
15736   ins_pipe(pipe_class_call);
15737 %}
15738 
15739 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15740 %{
15741   match(TailJump jump_target ex_oop);
15742 
15743   ins_cost(CALL_COST);
15744 
15745   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15746 
15747   ins_encode(aarch64_enc_tail_jmp(jump_target));
15748 
15749   ins_pipe(pipe_class_call);
15750 %}
15751 
15752 // Create exception oop: created by stack-crawling runtime code.
15753 // Created exception is now available to this handler, and is setup
15754 // just prior to jumping to this handler. No code emitted.
15755 // TODO check
15756 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15757 instruct CreateException(iRegP_R0 ex_oop)
15758 %{
15759   match(Set ex_oop (CreateEx));
15760 
15761   format %{ " -- \t// exception oop; no code emitted" %}
15762 
15763   size(0);
15764 
15765   ins_encode( /*empty*/ );
15766 
15767   ins_pipe(pipe_class_empty);
15768 %}
15769 
15770 // Rethrow exception: The exception oop will come in the first
15771 // argument position. Then JUMP (not call) to the rethrow stub code.
15772 instruct RethrowException() %{
15773   match(Rethrow);
15774   ins_cost(CALL_COST);
15775 
15776   format %{ "b rethrow_stub" %}
15777 
15778   ins_encode( aarch64_enc_rethrow() );
15779 
15780   ins_pipe(pipe_class_call);
15781 %}
15782 
15783 
15784 // Return Instruction
15785 // epilog node loads ret address into lr as part of frame pop
15786 instruct Ret()
15787 %{
15788   match(Return);
15789 
15790   format %{ "ret\t// return register" %}
15791 
15792   ins_encode( aarch64_enc_ret() );
15793 
15794   ins_pipe(pipe_branch);
15795 %}
15796 
15797 // Die now.
15798 instruct ShouldNotReachHere() %{
15799   match(Halt);
15800 
15801   ins_cost(CALL_COST);
15802   format %{ "ShouldNotReachHere" %}
15803 
15804   ins_encode %{
15805     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15806     // return true
15807     __ dpcs1(0xdead + 1);
15808   %}
15809 
15810   ins_pipe(pipe_class_default);
15811 %}
15812 
15813 // ============================================================================
15814 // Partial Subtype Check
15815 //
15816 // superklass array for an instance of the superklass.  Set a hidden
15817 // internal cache on a hit (cache is checked with exposed code in
15818 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15819 // encoding ALSO sets flags.
15820 
15821 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15822 %{
15823   match(Set result (PartialSubtypeCheck sub super));
15824   effect(KILL cr, KILL temp);
15825 
15826   ins_cost(1100);  // slightly larger than the next version
15827   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15828 
15829   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15830 
15831   opcode(0x1); // Force zero of result reg on hit
15832 
15833   ins_pipe(pipe_class_memory);
15834 %}
15835 
15836 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15837 %{
15838   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15839   effect(KILL temp, KILL result);
15840 
15841   ins_cost(1100);  // slightly larger than the next version
15842   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15843 
15844   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15845 
15846   opcode(0x0); // Don't zero result reg on hit
15847 
15848   ins_pipe(pipe_class_memory);
15849 %}
15850 
15851 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15852                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15853 %{
15854   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15855   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15856   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15857 
15858   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15859   ins_encode %{
15860     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15861     __ string_compare($str1$$Register, $str2$$Register,
15862                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15863                       $tmp1$$Register,
15864                       fnoreg, fnoreg, StrIntrinsicNode::UU);
15865   %}
15866   ins_pipe(pipe_class_memory);
15867 %}
15868 
15869 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15870                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15871 %{
15872   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15873   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15874   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15875 
15876   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15877   ins_encode %{
15878     __ string_compare($str1$$Register, $str2$$Register,
15879                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15880                       $tmp1$$Register,
15881                       fnoreg, fnoreg, StrIntrinsicNode::LL);
15882   %}
15883   ins_pipe(pipe_class_memory);
15884 %}
15885 
15886 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15887                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15888 %{
15889   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15890   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15891   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15892 
15893   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15894   ins_encode %{
15895     __ string_compare($str1$$Register, $str2$$Register,
15896                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15897                       $tmp1$$Register,
15898                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
15899   %}
15900   ins_pipe(pipe_class_memory);
15901 %}
15902 
15903 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15904                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15905 %{
15906   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15907   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15908   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15909 
15910   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15911   ins_encode %{
15912     __ string_compare($str1$$Register, $str2$$Register,
15913                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15914                       $tmp1$$Register,
15915                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
15916   %}
15917   ins_pipe(pipe_class_memory);
15918 %}
15919 
15920 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15921        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15922 %{
15923   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15924   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15925   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15926          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15927   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15928 
15929   ins_encode %{
15930     __ string_indexof($str1$$Register, $str2$$Register,
15931                       $cnt1$$Register, $cnt2$$Register,
15932                       $tmp1$$Register, $tmp2$$Register,
15933                       $tmp3$$Register, $tmp4$$Register,
15934                       -1, $result$$Register, StrIntrinsicNode::UU);
15935   %}
15936   ins_pipe(pipe_class_memory);
15937 %}
15938 
15939 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15940        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15941 %{
15942   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15943   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15944   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15945          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15946   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15947 
15948   ins_encode %{
15949     __ string_indexof($str1$$Register, $str2$$Register,
15950                       $cnt1$$Register, $cnt2$$Register,
15951                       $tmp1$$Register, $tmp2$$Register,
15952                       $tmp3$$Register, $tmp4$$Register,
15953                       -1, $result$$Register, StrIntrinsicNode::LL);
15954   %}
15955   ins_pipe(pipe_class_memory);
15956 %}
15957 
15958 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15959        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15960 %{
15961   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15962   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15963   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15964          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15965   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15966 
15967   ins_encode %{
15968     __ string_indexof($str1$$Register, $str2$$Register,
15969                       $cnt1$$Register, $cnt2$$Register,
15970                       $tmp1$$Register, $tmp2$$Register,
15971                       $tmp3$$Register, $tmp4$$Register,
15972                       -1, $result$$Register, StrIntrinsicNode::UL);
15973   %}
15974   ins_pipe(pipe_class_memory);
15975 %}
15976 
15977 instruct string_indexofLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15978        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15979 %{
15980   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
15981   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15982   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15983          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15984   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LU)" %}
15985 
15986   ins_encode %{
15987     __ string_indexof($str1$$Register, $str2$$Register,
15988                       $cnt1$$Register, $cnt2$$Register,
15989                       $tmp1$$Register, $tmp2$$Register,
15990                       $tmp3$$Register, $tmp4$$Register,
15991                       -1, $result$$Register, StrIntrinsicNode::LU);
15992   %}
15993   ins_pipe(pipe_class_memory);
15994 %}
15995 
15996 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15997                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15998                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15999 %{
16000   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16001   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16002   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16003          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16004   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16005 
16006   ins_encode %{
16007     int icnt2 = (int)$int_cnt2$$constant;
16008     __ string_indexof($str1$$Register, $str2$$Register,
16009                       $cnt1$$Register, zr,
16010                       $tmp1$$Register, $tmp2$$Register,
16011                       $tmp3$$Register, $tmp4$$Register,
16012                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16013   %}
16014   ins_pipe(pipe_class_memory);
16015 %}
16016 
16017 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16018                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16019                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16020 %{
16021   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16022   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16023   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16024          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16025   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16026 
16027   ins_encode %{
16028     int icnt2 = (int)$int_cnt2$$constant;
16029     __ string_indexof($str1$$Register, $str2$$Register,
16030                       $cnt1$$Register, zr,
16031                       $tmp1$$Register, $tmp2$$Register,
16032                       $tmp3$$Register, $tmp4$$Register,
16033                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16034   %}
16035   ins_pipe(pipe_class_memory);
16036 %}
16037 
16038 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16039                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16040                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16041 %{
16042   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16043   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16044   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16045          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16046   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16047 
16048   ins_encode %{
16049     int icnt2 = (int)$int_cnt2$$constant;
16050     __ string_indexof($str1$$Register, $str2$$Register,
16051                       $cnt1$$Register, zr,
16052                       $tmp1$$Register, $tmp2$$Register,
16053                       $tmp3$$Register, $tmp4$$Register,
16054                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16055   %}
16056   ins_pipe(pipe_class_memory);
16057 %}
16058 
16059 instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16060                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16061                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16062 %{
16063   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
16064   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16065   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16066          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16067   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LU)" %}
16068 
16069   ins_encode %{
16070     int icnt2 = (int)$int_cnt2$$constant;
16071     __ string_indexof($str1$$Register, $str2$$Register,
16072                       $cnt1$$Register, zr,
16073                       $tmp1$$Register, $tmp2$$Register,
16074                       $tmp3$$Register, $tmp4$$Register,
16075                       icnt2, $result$$Register, StrIntrinsicNode::LU);
16076   %}
16077   ins_pipe(pipe_class_memory);
16078 %}
16079 
16080 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16081                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16082                               iRegINoSp tmp3, rFlagsReg cr)
16083 %{
16084   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16085   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16086          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16087 
16088   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16089 
16090   ins_encode %{
16091     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16092                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16093                            $tmp3$$Register);
16094   %}
16095   ins_pipe(pipe_class_memory);
16096 %}
16097 
16098 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16099                         iRegI_R0 result, rFlagsReg cr)
16100 %{
16101   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16102   match(Set result (StrEquals (Binary str1 str2) cnt));
16103   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16104 
16105   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16106   ins_encode %{
16107     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16108     __ string_equals($str1$$Register, $str2$$Register,
16109                      $result$$Register, $cnt$$Register, 1);
16110   %}
16111   ins_pipe(pipe_class_memory);
16112 %}
16113 
16114 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16115                         iRegI_R0 result, rFlagsReg cr)
16116 %{
16117   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16118   match(Set result (StrEquals (Binary str1 str2) cnt));
16119   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16120 
16121   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16122   ins_encode %{
16123     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16124     __ string_equals($str1$$Register, $str2$$Register,
16125                      $result$$Register, $cnt$$Register, 2);
16126   %}
16127   ins_pipe(pipe_class_memory);
16128 %}
16129 
16130 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16131                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16132                        iRegP_R10 tmp, rFlagsReg cr)
16133 %{
16134   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16135   match(Set result (AryEq ary1 ary2));
16136   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16137 
16138   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16139   ins_encode %{
16140     __ arrays_equals($ary1$$Register, $ary2$$Register,
16141                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16142                      $result$$Register, $tmp$$Register, 1);
16143     %}
16144   ins_pipe(pipe_class_memory);
16145 %}
16146 
16147 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16148                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16149                        iRegP_R10 tmp, rFlagsReg cr)
16150 %{
16151   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16152   match(Set result (AryEq ary1 ary2));
16153   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16154 
16155   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16156   ins_encode %{
16157     __ arrays_equals($ary1$$Register, $ary2$$Register,
16158                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16159                      $result$$Register, $tmp$$Register, 2);
16160   %}
16161   ins_pipe(pipe_class_memory);
16162 %}
16163 
16164 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16165 %{
16166   match(Set result (HasNegatives ary1 len));
16167   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16168   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16169   ins_encode %{
16170     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16171   %}
16172   ins_pipe( pipe_slow );
16173 %}
16174 
16175 // fast char[] to byte[] compression
16176 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16177                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16178                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16179                          iRegI_R0 result, rFlagsReg cr)
16180 %{
16181   match(Set result (StrCompressedCopy src (Binary dst len)));
16182   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16183 
16184   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16185   ins_encode %{
16186     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16187                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16188                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16189                            $result$$Register);
16190   %}
16191   ins_pipe( pipe_slow );
16192 %}
16193 
16194 // fast byte[] to char[] inflation
16195 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16196                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16197 %{
16198   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16199   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16200 
16201   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16202   ins_encode %{
16203     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16204                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16205   %}
16206   ins_pipe(pipe_class_memory);
16207 %}
16208 
16209 // encode char[] to byte[] in ISO_8859_1
16210 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16211                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16212                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16213                           iRegI_R0 result, rFlagsReg cr)
16214 %{
16215   match(Set result (EncodeISOArray src (Binary dst len)));
16216   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16217          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16218 
16219   format %{ "Encode array $src,$dst,$len -> $result" %}
16220   ins_encode %{
16221     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16222          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16223          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16224   %}
16225   ins_pipe( pipe_class_memory );
16226 %}
16227 
16228 // ============================================================================
16229 // This name is KNOWN by the ADLC and cannot be changed.
16230 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16231 // for this guy.
16232 instruct tlsLoadP(thread_RegP dst)
16233 %{
16234   match(Set dst (ThreadLocal));
16235 
16236   ins_cost(0);
16237 
16238   format %{ " -- \t// $dst=Thread::current(), empty" %}
16239 
16240   size(0);
16241 
16242   ins_encode( /*empty*/ );
16243 
16244   ins_pipe(pipe_class_empty);
16245 %}
16246 
16247 // ====================VECTOR INSTRUCTIONS=====================================
16248 
16249 // Load vector (32 bits)
16250 instruct loadV4(vecD dst, vmem4 mem)
16251 %{
16252   predicate(n->as_LoadVector()->memory_size() == 4);
16253   match(Set dst (LoadVector mem));
16254   ins_cost(4 * INSN_COST);
16255   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16256   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16257   ins_pipe(vload_reg_mem64);
16258 %}
16259 
16260 // Load vector (64 bits)
16261 instruct loadV8(vecD dst, vmem8 mem)
16262 %{
16263   predicate(n->as_LoadVector()->memory_size() == 8);
16264   match(Set dst (LoadVector mem));
16265   ins_cost(4 * INSN_COST);
16266   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16267   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16268   ins_pipe(vload_reg_mem64);
16269 %}
16270 
16271 // Load Vector (128 bits)
16272 instruct loadV16(vecX dst, vmem16 mem)
16273 %{
16274   predicate(n->as_LoadVector()->memory_size() == 16);
16275   match(Set dst (LoadVector mem));
16276   ins_cost(4 * INSN_COST);
16277   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16278   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16279   ins_pipe(vload_reg_mem128);
16280 %}
16281 
16282 // Store Vector (32 bits)
16283 instruct storeV4(vecD src, vmem4 mem)
16284 %{
16285   predicate(n->as_StoreVector()->memory_size() == 4);
16286   match(Set mem (StoreVector mem src));
16287   ins_cost(4 * INSN_COST);
16288   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16289   ins_encode( aarch64_enc_strvS(src, mem) );
16290   ins_pipe(vstore_reg_mem64);
16291 %}
16292 
16293 // Store Vector (64 bits)
16294 instruct storeV8(vecD src, vmem8 mem)
16295 %{
16296   predicate(n->as_StoreVector()->memory_size() == 8);
16297   match(Set mem (StoreVector mem src));
16298   ins_cost(4 * INSN_COST);
16299   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16300   ins_encode( aarch64_enc_strvD(src, mem) );
16301   ins_pipe(vstore_reg_mem64);
16302 %}
16303 
16304 // Store Vector (128 bits)
16305 instruct storeV16(vecX src, vmem16 mem)
16306 %{
16307   predicate(n->as_StoreVector()->memory_size() == 16);
16308   match(Set mem (StoreVector mem src));
16309   ins_cost(4 * INSN_COST);
16310   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16311   ins_encode( aarch64_enc_strvQ(src, mem) );
16312   ins_pipe(vstore_reg_mem128);
16313 %}
16314 
16315 instruct replicate8B(vecD dst, iRegIorL2I src)
16316 %{
16317   predicate(n->as_Vector()->length() == 4 ||
16318             n->as_Vector()->length() == 8);
16319   match(Set dst (ReplicateB src));
16320   ins_cost(INSN_COST);
16321   format %{ "dup  $dst, $src\t# vector (8B)" %}
16322   ins_encode %{
16323     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16324   %}
16325   ins_pipe(vdup_reg_reg64);
16326 %}
16327 
16328 instruct replicate16B(vecX dst, iRegIorL2I src)
16329 %{
16330   predicate(n->as_Vector()->length() == 16);
16331   match(Set dst (ReplicateB src));
16332   ins_cost(INSN_COST);
16333   format %{ "dup  $dst, $src\t# vector (16B)" %}
16334   ins_encode %{
16335     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16336   %}
16337   ins_pipe(vdup_reg_reg128);
16338 %}
16339 
16340 instruct replicate8B_imm(vecD dst, immI con)
16341 %{
16342   predicate(n->as_Vector()->length() == 4 ||
16343             n->as_Vector()->length() == 8);
16344   match(Set dst (ReplicateB con));
16345   ins_cost(INSN_COST);
16346   format %{ "movi  $dst, $con\t# vector(8B)" %}
16347   ins_encode %{
16348     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16349   %}
16350   ins_pipe(vmovi_reg_imm64);
16351 %}
16352 
16353 instruct replicate16B_imm(vecX dst, immI con)
16354 %{
16355   predicate(n->as_Vector()->length() == 16);
16356   match(Set dst (ReplicateB con));
16357   ins_cost(INSN_COST);
16358   format %{ "movi  $dst, $con\t# vector(16B)" %}
16359   ins_encode %{
16360     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16361   %}
16362   ins_pipe(vmovi_reg_imm128);
16363 %}
16364 
16365 instruct replicate4S(vecD dst, iRegIorL2I src)
16366 %{
16367   predicate(n->as_Vector()->length() == 2 ||
16368             n->as_Vector()->length() == 4);
16369   match(Set dst (ReplicateS src));
16370   ins_cost(INSN_COST);
16371   format %{ "dup  $dst, $src\t# vector (4S)" %}
16372   ins_encode %{
16373     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16374   %}
16375   ins_pipe(vdup_reg_reg64);
16376 %}
16377 
16378 instruct replicate8S(vecX dst, iRegIorL2I src)
16379 %{
16380   predicate(n->as_Vector()->length() == 8);
16381   match(Set dst (ReplicateS src));
16382   ins_cost(INSN_COST);
16383   format %{ "dup  $dst, $src\t# vector (8S)" %}
16384   ins_encode %{
16385     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16386   %}
16387   ins_pipe(vdup_reg_reg128);
16388 %}
16389 
16390 instruct replicate4S_imm(vecD dst, immI con)
16391 %{
16392   predicate(n->as_Vector()->length() == 2 ||
16393             n->as_Vector()->length() == 4);
16394   match(Set dst (ReplicateS con));
16395   ins_cost(INSN_COST);
16396   format %{ "movi  $dst, $con\t# vector(4H)" %}
16397   ins_encode %{
16398     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16399   %}
16400   ins_pipe(vmovi_reg_imm64);
16401 %}
16402 
16403 instruct replicate8S_imm(vecX dst, immI con)
16404 %{
16405   predicate(n->as_Vector()->length() == 8);
16406   match(Set dst (ReplicateS con));
16407   ins_cost(INSN_COST);
16408   format %{ "movi  $dst, $con\t# vector(8H)" %}
16409   ins_encode %{
16410     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16411   %}
16412   ins_pipe(vmovi_reg_imm128);
16413 %}
16414 
16415 instruct replicate2I(vecD dst, iRegIorL2I src)
16416 %{
16417   predicate(n->as_Vector()->length() == 2);
16418   match(Set dst (ReplicateI src));
16419   ins_cost(INSN_COST);
16420   format %{ "dup  $dst, $src\t# vector (2I)" %}
16421   ins_encode %{
16422     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16423   %}
16424   ins_pipe(vdup_reg_reg64);
16425 %}
16426 
16427 instruct replicate4I(vecX dst, iRegIorL2I src)
16428 %{
16429   predicate(n->as_Vector()->length() == 4);
16430   match(Set dst (ReplicateI src));
16431   ins_cost(INSN_COST);
16432   format %{ "dup  $dst, $src\t# vector (4I)" %}
16433   ins_encode %{
16434     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16435   %}
16436   ins_pipe(vdup_reg_reg128);
16437 %}
16438 
16439 instruct replicate2I_imm(vecD dst, immI con)
16440 %{
16441   predicate(n->as_Vector()->length() == 2);
16442   match(Set dst (ReplicateI con));
16443   ins_cost(INSN_COST);
16444   format %{ "movi  $dst, $con\t# vector(2I)" %}
16445   ins_encode %{
16446     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16447   %}
16448   ins_pipe(vmovi_reg_imm64);
16449 %}
16450 
16451 instruct replicate4I_imm(vecX dst, immI con)
16452 %{
16453   predicate(n->as_Vector()->length() == 4);
16454   match(Set dst (ReplicateI con));
16455   ins_cost(INSN_COST);
16456   format %{ "movi  $dst, $con\t# vector(4I)" %}
16457   ins_encode %{
16458     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16459   %}
16460   ins_pipe(vmovi_reg_imm128);
16461 %}
16462 
16463 instruct replicate2L(vecX dst, iRegL src)
16464 %{
16465   predicate(n->as_Vector()->length() == 2);
16466   match(Set dst (ReplicateL src));
16467   ins_cost(INSN_COST);
16468   format %{ "dup  $dst, $src\t# vector (2L)" %}
16469   ins_encode %{
16470     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16471   %}
16472   ins_pipe(vdup_reg_reg128);
16473 %}
16474 
16475 instruct replicate2L_zero(vecX dst, immI0 zero)
16476 %{
16477   predicate(n->as_Vector()->length() == 2);
16478   match(Set dst (ReplicateI zero));
16479   ins_cost(INSN_COST);
16480   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16481   ins_encode %{
16482     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16483            as_FloatRegister($dst$$reg),
16484            as_FloatRegister($dst$$reg));
16485   %}
16486   ins_pipe(vmovi_reg_imm128);
16487 %}
16488 
16489 instruct replicate2F(vecD dst, vRegF src)
16490 %{
16491   predicate(n->as_Vector()->length() == 2);
16492   match(Set dst (ReplicateF src));
16493   ins_cost(INSN_COST);
16494   format %{ "dup  $dst, $src\t# vector (2F)" %}
16495   ins_encode %{
16496     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16497            as_FloatRegister($src$$reg));
16498   %}
16499   ins_pipe(vdup_reg_freg64);
16500 %}
16501 
16502 instruct replicate4F(vecX dst, vRegF src)
16503 %{
16504   predicate(n->as_Vector()->length() == 4);
16505   match(Set dst (ReplicateF src));
16506   ins_cost(INSN_COST);
16507   format %{ "dup  $dst, $src\t# vector (4F)" %}
16508   ins_encode %{
16509     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16510            as_FloatRegister($src$$reg));
16511   %}
16512   ins_pipe(vdup_reg_freg128);
16513 %}
16514 
16515 instruct replicate2D(vecX dst, vRegD src)
16516 %{
16517   predicate(n->as_Vector()->length() == 2);
16518   match(Set dst (ReplicateD src));
16519   ins_cost(INSN_COST);
16520   format %{ "dup  $dst, $src\t# vector (2D)" %}
16521   ins_encode %{
16522     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16523            as_FloatRegister($src$$reg));
16524   %}
16525   ins_pipe(vdup_reg_dreg128);
16526 %}
16527 
16528 // ====================REDUCTION ARITHMETIC====================================
16529 
16530 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16531 %{
16532   match(Set dst (AddReductionVI src1 src2));
16533   ins_cost(INSN_COST);
16534   effect(TEMP tmp, TEMP tmp2);
16535   format %{ "umov  $tmp, $src2, S, 0\n\t"
16536             "umov  $tmp2, $src2, S, 1\n\t"
16537             "addw  $dst, $src1, $tmp\n\t"
16538             "addw  $dst, $dst, $tmp2\t add reduction2i"
16539   %}
16540   ins_encode %{
16541     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16542     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16543     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16544     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16545   %}
16546   ins_pipe(pipe_class_default);
16547 %}
16548 
16549 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16550 %{
16551   match(Set dst (AddReductionVI src1 src2));
16552   ins_cost(INSN_COST);
16553   effect(TEMP tmp, TEMP tmp2);
16554   format %{ "addv  $tmp, T4S, $src2\n\t"
16555             "umov  $tmp2, $tmp, S, 0\n\t"
16556             "addw  $dst, $tmp2, $src1\t add reduction4i"
16557   %}
16558   ins_encode %{
16559     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16560             as_FloatRegister($src2$$reg));
16561     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16562     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16563   %}
16564   ins_pipe(pipe_class_default);
16565 %}
16566 
16567 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16568 %{
16569   match(Set dst (MulReductionVI src1 src2));
16570   ins_cost(INSN_COST);
16571   effect(TEMP tmp, TEMP dst);
16572   format %{ "umov  $tmp, $src2, S, 0\n\t"
16573             "mul   $dst, $tmp, $src1\n\t"
16574             "umov  $tmp, $src2, S, 1\n\t"
16575             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16576   %}
16577   ins_encode %{
16578     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16579     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16580     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16581     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16582   %}
16583   ins_pipe(pipe_class_default);
16584 %}
16585 
16586 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16587 %{
16588   match(Set dst (MulReductionVI src1 src2));
16589   ins_cost(INSN_COST);
16590   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16591   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16592             "mul   $tmp, $tmp, $src2\n\t"
16593             "umov  $tmp2, $tmp, S, 0\n\t"
16594             "mul   $dst, $tmp2, $src1\n\t"
16595             "umov  $tmp2, $tmp, S, 1\n\t"
16596             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16597   %}
16598   ins_encode %{
16599     __ ins(as_FloatRegister($tmp$$reg), __ D,
16600            as_FloatRegister($src2$$reg), 0, 1);
16601     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16602            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16603     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16604     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16605     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16606     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16607   %}
16608   ins_pipe(pipe_class_default);
16609 %}
16610 
16611 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16612 %{
16613   match(Set dst (AddReductionVF src1 src2));
16614   ins_cost(INSN_COST);
16615   effect(TEMP tmp, TEMP dst);
16616   format %{ "fadds $dst, $src1, $src2\n\t"
16617             "ins   $tmp, S, $src2, 0, 1\n\t"
16618             "fadds $dst, $dst, $tmp\t add reduction2f"
16619   %}
16620   ins_encode %{
16621     __ fadds(as_FloatRegister($dst$$reg),
16622              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16623     __ ins(as_FloatRegister($tmp$$reg), __ S,
16624            as_FloatRegister($src2$$reg), 0, 1);
16625     __ fadds(as_FloatRegister($dst$$reg),
16626              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16627   %}
16628   ins_pipe(pipe_class_default);
16629 %}
16630 
16631 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16632 %{
16633   match(Set dst (AddReductionVF src1 src2));
16634   ins_cost(INSN_COST);
16635   effect(TEMP tmp, TEMP dst);
16636   format %{ "fadds $dst, $src1, $src2\n\t"
16637             "ins   $tmp, S, $src2, 0, 1\n\t"
16638             "fadds $dst, $dst, $tmp\n\t"
16639             "ins   $tmp, S, $src2, 0, 2\n\t"
16640             "fadds $dst, $dst, $tmp\n\t"
16641             "ins   $tmp, S, $src2, 0, 3\n\t"
16642             "fadds $dst, $dst, $tmp\t add reduction4f"
16643   %}
16644   ins_encode %{
16645     __ fadds(as_FloatRegister($dst$$reg),
16646              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16647     __ ins(as_FloatRegister($tmp$$reg), __ S,
16648            as_FloatRegister($src2$$reg), 0, 1);
16649     __ fadds(as_FloatRegister($dst$$reg),
16650              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16651     __ ins(as_FloatRegister($tmp$$reg), __ S,
16652            as_FloatRegister($src2$$reg), 0, 2);
16653     __ fadds(as_FloatRegister($dst$$reg),
16654              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16655     __ ins(as_FloatRegister($tmp$$reg), __ S,
16656            as_FloatRegister($src2$$reg), 0, 3);
16657     __ fadds(as_FloatRegister($dst$$reg),
16658              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16659   %}
16660   ins_pipe(pipe_class_default);
16661 %}
16662 
16663 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16664 %{
16665   match(Set dst (MulReductionVF src1 src2));
16666   ins_cost(INSN_COST);
16667   effect(TEMP tmp, TEMP dst);
16668   format %{ "fmuls $dst, $src1, $src2\n\t"
16669             "ins   $tmp, S, $src2, 0, 1\n\t"
16670             "fmuls $dst, $dst, $tmp\t add reduction4f"
16671   %}
16672   ins_encode %{
16673     __ fmuls(as_FloatRegister($dst$$reg),
16674              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16675     __ ins(as_FloatRegister($tmp$$reg), __ S,
16676            as_FloatRegister($src2$$reg), 0, 1);
16677     __ fmuls(as_FloatRegister($dst$$reg),
16678              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16679   %}
16680   ins_pipe(pipe_class_default);
16681 %}
16682 
16683 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16684 %{
16685   match(Set dst (MulReductionVF src1 src2));
16686   ins_cost(INSN_COST);
16687   effect(TEMP tmp, TEMP dst);
16688   format %{ "fmuls $dst, $src1, $src2\n\t"
16689             "ins   $tmp, S, $src2, 0, 1\n\t"
16690             "fmuls $dst, $dst, $tmp\n\t"
16691             "ins   $tmp, S, $src2, 0, 2\n\t"
16692             "fmuls $dst, $dst, $tmp\n\t"
16693             "ins   $tmp, S, $src2, 0, 3\n\t"
16694             "fmuls $dst, $dst, $tmp\t add reduction4f"
16695   %}
16696   ins_encode %{
16697     __ fmuls(as_FloatRegister($dst$$reg),
16698              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16699     __ ins(as_FloatRegister($tmp$$reg), __ S,
16700            as_FloatRegister($src2$$reg), 0, 1);
16701     __ fmuls(as_FloatRegister($dst$$reg),
16702              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16703     __ ins(as_FloatRegister($tmp$$reg), __ S,
16704            as_FloatRegister($src2$$reg), 0, 2);
16705     __ fmuls(as_FloatRegister($dst$$reg),
16706              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16707     __ ins(as_FloatRegister($tmp$$reg), __ S,
16708            as_FloatRegister($src2$$reg), 0, 3);
16709     __ fmuls(as_FloatRegister($dst$$reg),
16710              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16711   %}
16712   ins_pipe(pipe_class_default);
16713 %}
16714 
16715 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16716 %{
16717   match(Set dst (AddReductionVD src1 src2));
16718   ins_cost(INSN_COST);
16719   effect(TEMP tmp, TEMP dst);
16720   format %{ "faddd $dst, $src1, $src2\n\t"
16721             "ins   $tmp, D, $src2, 0, 1\n\t"
16722             "faddd $dst, $dst, $tmp\t add reduction2d"
16723   %}
16724   ins_encode %{
16725     __ faddd(as_FloatRegister($dst$$reg),
16726              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16727     __ ins(as_FloatRegister($tmp$$reg), __ D,
16728            as_FloatRegister($src2$$reg), 0, 1);
16729     __ faddd(as_FloatRegister($dst$$reg),
16730              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16731   %}
16732   ins_pipe(pipe_class_default);
16733 %}
16734 
16735 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16736 %{
16737   match(Set dst (MulReductionVD src1 src2));
16738   ins_cost(INSN_COST);
16739   effect(TEMP tmp, TEMP dst);
16740   format %{ "fmuld $dst, $src1, $src2\n\t"
16741             "ins   $tmp, D, $src2, 0, 1\n\t"
16742             "fmuld $dst, $dst, $tmp\t add reduction2d"
16743   %}
16744   ins_encode %{
16745     __ fmuld(as_FloatRegister($dst$$reg),
16746              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16747     __ ins(as_FloatRegister($tmp$$reg), __ D,
16748            as_FloatRegister($src2$$reg), 0, 1);
16749     __ fmuld(as_FloatRegister($dst$$reg),
16750              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16751   %}
16752   ins_pipe(pipe_class_default);
16753 %}
16754 
16755 // ====================VECTOR ARITHMETIC=======================================
16756 
16757 // --------------------------------- ADD --------------------------------------
16758 
16759 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16760 %{
16761   predicate(n->as_Vector()->length() == 4 ||
16762             n->as_Vector()->length() == 8);
16763   match(Set dst (AddVB src1 src2));
16764   ins_cost(INSN_COST);
16765   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16766   ins_encode %{
16767     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16768             as_FloatRegister($src1$$reg),
16769             as_FloatRegister($src2$$reg));
16770   %}
16771   ins_pipe(vdop64);
16772 %}
16773 
16774 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16775 %{
16776   predicate(n->as_Vector()->length() == 16);
16777   match(Set dst (AddVB src1 src2));
16778   ins_cost(INSN_COST);
16779   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16780   ins_encode %{
16781     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16782             as_FloatRegister($src1$$reg),
16783             as_FloatRegister($src2$$reg));
16784   %}
16785   ins_pipe(vdop128);
16786 %}
16787 
16788 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16789 %{
16790   predicate(n->as_Vector()->length() == 2 ||
16791             n->as_Vector()->length() == 4);
16792   match(Set dst (AddVS src1 src2));
16793   ins_cost(INSN_COST);
16794   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16795   ins_encode %{
16796     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16797             as_FloatRegister($src1$$reg),
16798             as_FloatRegister($src2$$reg));
16799   %}
16800   ins_pipe(vdop64);
16801 %}
16802 
16803 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16804 %{
16805   predicate(n->as_Vector()->length() == 8);
16806   match(Set dst (AddVS src1 src2));
16807   ins_cost(INSN_COST);
16808   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16809   ins_encode %{
16810     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16811             as_FloatRegister($src1$$reg),
16812             as_FloatRegister($src2$$reg));
16813   %}
16814   ins_pipe(vdop128);
16815 %}
16816 
16817 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16818 %{
16819   predicate(n->as_Vector()->length() == 2);
16820   match(Set dst (AddVI src1 src2));
16821   ins_cost(INSN_COST);
16822   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16823   ins_encode %{
16824     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16825             as_FloatRegister($src1$$reg),
16826             as_FloatRegister($src2$$reg));
16827   %}
16828   ins_pipe(vdop64);
16829 %}
16830 
16831 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16832 %{
16833   predicate(n->as_Vector()->length() == 4);
16834   match(Set dst (AddVI src1 src2));
16835   ins_cost(INSN_COST);
16836   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16837   ins_encode %{
16838     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16839             as_FloatRegister($src1$$reg),
16840             as_FloatRegister($src2$$reg));
16841   %}
16842   ins_pipe(vdop128);
16843 %}
16844 
16845 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16846 %{
16847   predicate(n->as_Vector()->length() == 2);
16848   match(Set dst (AddVL src1 src2));
16849   ins_cost(INSN_COST);
16850   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16851   ins_encode %{
16852     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16853             as_FloatRegister($src1$$reg),
16854             as_FloatRegister($src2$$reg));
16855   %}
16856   ins_pipe(vdop128);
16857 %}
16858 
16859 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16860 %{
16861   predicate(n->as_Vector()->length() == 2);
16862   match(Set dst (AddVF src1 src2));
16863   ins_cost(INSN_COST);
16864   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16865   ins_encode %{
16866     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16867             as_FloatRegister($src1$$reg),
16868             as_FloatRegister($src2$$reg));
16869   %}
16870   ins_pipe(vdop_fp64);
16871 %}
16872 
16873 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16874 %{
16875   predicate(n->as_Vector()->length() == 4);
16876   match(Set dst (AddVF src1 src2));
16877   ins_cost(INSN_COST);
16878   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16879   ins_encode %{
16880     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16881             as_FloatRegister($src1$$reg),
16882             as_FloatRegister($src2$$reg));
16883   %}
16884   ins_pipe(vdop_fp128);
16885 %}
16886 
16887 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16888 %{
16889   match(Set dst (AddVD src1 src2));
16890   ins_cost(INSN_COST);
16891   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16892   ins_encode %{
16893     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16894             as_FloatRegister($src1$$reg),
16895             as_FloatRegister($src2$$reg));
16896   %}
16897   ins_pipe(vdop_fp128);
16898 %}
16899 
16900 // --------------------------------- SUB --------------------------------------
16901 
16902 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16903 %{
16904   predicate(n->as_Vector()->length() == 4 ||
16905             n->as_Vector()->length() == 8);
16906   match(Set dst (SubVB src1 src2));
16907   ins_cost(INSN_COST);
16908   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16909   ins_encode %{
16910     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16911             as_FloatRegister($src1$$reg),
16912             as_FloatRegister($src2$$reg));
16913   %}
16914   ins_pipe(vdop64);
16915 %}
16916 
16917 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16918 %{
16919   predicate(n->as_Vector()->length() == 16);
16920   match(Set dst (SubVB src1 src2));
16921   ins_cost(INSN_COST);
16922   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16923   ins_encode %{
16924     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16925             as_FloatRegister($src1$$reg),
16926             as_FloatRegister($src2$$reg));
16927   %}
16928   ins_pipe(vdop128);
16929 %}
16930 
16931 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16932 %{
16933   predicate(n->as_Vector()->length() == 2 ||
16934             n->as_Vector()->length() == 4);
16935   match(Set dst (SubVS src1 src2));
16936   ins_cost(INSN_COST);
16937   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16938   ins_encode %{
16939     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16940             as_FloatRegister($src1$$reg),
16941             as_FloatRegister($src2$$reg));
16942   %}
16943   ins_pipe(vdop64);
16944 %}
16945 
16946 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16947 %{
16948   predicate(n->as_Vector()->length() == 8);
16949   match(Set dst (SubVS src1 src2));
16950   ins_cost(INSN_COST);
16951   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16952   ins_encode %{
16953     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16954             as_FloatRegister($src1$$reg),
16955             as_FloatRegister($src2$$reg));
16956   %}
16957   ins_pipe(vdop128);
16958 %}
16959 
16960 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16961 %{
16962   predicate(n->as_Vector()->length() == 2);
16963   match(Set dst (SubVI src1 src2));
16964   ins_cost(INSN_COST);
16965   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16966   ins_encode %{
16967     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16968             as_FloatRegister($src1$$reg),
16969             as_FloatRegister($src2$$reg));
16970   %}
16971   ins_pipe(vdop64);
16972 %}
16973 
16974 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16975 %{
16976   predicate(n->as_Vector()->length() == 4);
16977   match(Set dst (SubVI src1 src2));
16978   ins_cost(INSN_COST);
16979   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16980   ins_encode %{
16981     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16982             as_FloatRegister($src1$$reg),
16983             as_FloatRegister($src2$$reg));
16984   %}
16985   ins_pipe(vdop128);
16986 %}
16987 
16988 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16989 %{
16990   predicate(n->as_Vector()->length() == 2);
16991   match(Set dst (SubVL src1 src2));
16992   ins_cost(INSN_COST);
16993   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16994   ins_encode %{
16995     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16996             as_FloatRegister($src1$$reg),
16997             as_FloatRegister($src2$$reg));
16998   %}
16999   ins_pipe(vdop128);
17000 %}
17001 
17002 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17003 %{
17004   predicate(n->as_Vector()->length() == 2);
17005   match(Set dst (SubVF src1 src2));
17006   ins_cost(INSN_COST);
17007   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17008   ins_encode %{
17009     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17010             as_FloatRegister($src1$$reg),
17011             as_FloatRegister($src2$$reg));
17012   %}
17013   ins_pipe(vdop_fp64);
17014 %}
17015 
17016 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17017 %{
17018   predicate(n->as_Vector()->length() == 4);
17019   match(Set dst (SubVF src1 src2));
17020   ins_cost(INSN_COST);
17021   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17022   ins_encode %{
17023     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17024             as_FloatRegister($src1$$reg),
17025             as_FloatRegister($src2$$reg));
17026   %}
17027   ins_pipe(vdop_fp128);
17028 %}
17029 
17030 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17031 %{
17032   predicate(n->as_Vector()->length() == 2);
17033   match(Set dst (SubVD src1 src2));
17034   ins_cost(INSN_COST);
17035   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17036   ins_encode %{
17037     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17038             as_FloatRegister($src1$$reg),
17039             as_FloatRegister($src2$$reg));
17040   %}
17041   ins_pipe(vdop_fp128);
17042 %}
17043 
17044 // --------------------------------- MUL --------------------------------------
17045 
17046 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17047 %{
17048   predicate(n->as_Vector()->length() == 2 ||
17049             n->as_Vector()->length() == 4);
17050   match(Set dst (MulVS src1 src2));
17051   ins_cost(INSN_COST);
17052   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17053   ins_encode %{
17054     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17055             as_FloatRegister($src1$$reg),
17056             as_FloatRegister($src2$$reg));
17057   %}
17058   ins_pipe(vmul64);
17059 %}
17060 
17061 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17062 %{
17063   predicate(n->as_Vector()->length() == 8);
17064   match(Set dst (MulVS src1 src2));
17065   ins_cost(INSN_COST);
17066   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17067   ins_encode %{
17068     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17069             as_FloatRegister($src1$$reg),
17070             as_FloatRegister($src2$$reg));
17071   %}
17072   ins_pipe(vmul128);
17073 %}
17074 
17075 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17076 %{
17077   predicate(n->as_Vector()->length() == 2);
17078   match(Set dst (MulVI src1 src2));
17079   ins_cost(INSN_COST);
17080   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17081   ins_encode %{
17082     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17083             as_FloatRegister($src1$$reg),
17084             as_FloatRegister($src2$$reg));
17085   %}
17086   ins_pipe(vmul64);
17087 %}
17088 
17089 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17090 %{
17091   predicate(n->as_Vector()->length() == 4);
17092   match(Set dst (MulVI src1 src2));
17093   ins_cost(INSN_COST);
17094   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17095   ins_encode %{
17096     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17097             as_FloatRegister($src1$$reg),
17098             as_FloatRegister($src2$$reg));
17099   %}
17100   ins_pipe(vmul128);
17101 %}
17102 
17103 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17104 %{
17105   predicate(n->as_Vector()->length() == 2);
17106   match(Set dst (MulVF src1 src2));
17107   ins_cost(INSN_COST);
17108   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17109   ins_encode %{
17110     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17111             as_FloatRegister($src1$$reg),
17112             as_FloatRegister($src2$$reg));
17113   %}
17114   ins_pipe(vmuldiv_fp64);
17115 %}
17116 
17117 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17118 %{
17119   predicate(n->as_Vector()->length() == 4);
17120   match(Set dst (MulVF src1 src2));
17121   ins_cost(INSN_COST);
17122   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17123   ins_encode %{
17124     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17125             as_FloatRegister($src1$$reg),
17126             as_FloatRegister($src2$$reg));
17127   %}
17128   ins_pipe(vmuldiv_fp128);
17129 %}
17130 
17131 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17132 %{
17133   predicate(n->as_Vector()->length() == 2);
17134   match(Set dst (MulVD src1 src2));
17135   ins_cost(INSN_COST);
17136   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17137   ins_encode %{
17138     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17139             as_FloatRegister($src1$$reg),
17140             as_FloatRegister($src2$$reg));
17141   %}
17142   ins_pipe(vmuldiv_fp128);
17143 %}
17144 
17145 // --------------------------------- MLA --------------------------------------
17146 
17147 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17148 %{
17149   predicate(n->as_Vector()->length() == 2 ||
17150             n->as_Vector()->length() == 4);
17151   match(Set dst (AddVS dst (MulVS src1 src2)));
17152   ins_cost(INSN_COST);
17153   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17154   ins_encode %{
17155     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17156             as_FloatRegister($src1$$reg),
17157             as_FloatRegister($src2$$reg));
17158   %}
17159   ins_pipe(vmla64);
17160 %}
17161 
17162 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17163 %{
17164   predicate(n->as_Vector()->length() == 8);
17165   match(Set dst (AddVS dst (MulVS src1 src2)));
17166   ins_cost(INSN_COST);
17167   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17168   ins_encode %{
17169     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17170             as_FloatRegister($src1$$reg),
17171             as_FloatRegister($src2$$reg));
17172   %}
17173   ins_pipe(vmla128);
17174 %}
17175 
17176 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17177 %{
17178   predicate(n->as_Vector()->length() == 2);
17179   match(Set dst (AddVI dst (MulVI src1 src2)));
17180   ins_cost(INSN_COST);
17181   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17182   ins_encode %{
17183     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17184             as_FloatRegister($src1$$reg),
17185             as_FloatRegister($src2$$reg));
17186   %}
17187   ins_pipe(vmla64);
17188 %}
17189 
17190 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17191 %{
17192   predicate(n->as_Vector()->length() == 4);
17193   match(Set dst (AddVI dst (MulVI src1 src2)));
17194   ins_cost(INSN_COST);
17195   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17196   ins_encode %{
17197     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17198             as_FloatRegister($src1$$reg),
17199             as_FloatRegister($src2$$reg));
17200   %}
17201   ins_pipe(vmla128);
17202 %}
17203 
17204 // dst + src1 * src2
17205 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17206   predicate(UseFMA && n->as_Vector()->length() == 2);
17207   match(Set dst (FmaVF  dst (Binary src1 src2)));
17208   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17209   ins_cost(INSN_COST);
17210   ins_encode %{
17211     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17212             as_FloatRegister($src1$$reg),
17213             as_FloatRegister($src2$$reg));
17214   %}
17215   ins_pipe(vmuldiv_fp64);
17216 %}
17217 
17218 // dst + src1 * src2
17219 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17220   predicate(UseFMA && n->as_Vector()->length() == 4);
17221   match(Set dst (FmaVF  dst (Binary src1 src2)));
17222   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17223   ins_cost(INSN_COST);
17224   ins_encode %{
17225     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17226             as_FloatRegister($src1$$reg),
17227             as_FloatRegister($src2$$reg));
17228   %}
17229   ins_pipe(vmuldiv_fp128);
17230 %}
17231 
17232 // dst + src1 * src2
17233 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17234   predicate(UseFMA && n->as_Vector()->length() == 2);
17235   match(Set dst (FmaVD  dst (Binary src1 src2)));
17236   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17237   ins_cost(INSN_COST);
17238   ins_encode %{
17239     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17240             as_FloatRegister($src1$$reg),
17241             as_FloatRegister($src2$$reg));
17242   %}
17243   ins_pipe(vmuldiv_fp128);
17244 %}
17245 
17246 // --------------------------------- MLS --------------------------------------
17247 
17248 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17249 %{
17250   predicate(n->as_Vector()->length() == 2 ||
17251             n->as_Vector()->length() == 4);
17252   match(Set dst (SubVS dst (MulVS src1 src2)));
17253   ins_cost(INSN_COST);
17254   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17255   ins_encode %{
17256     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17257             as_FloatRegister($src1$$reg),
17258             as_FloatRegister($src2$$reg));
17259   %}
17260   ins_pipe(vmla64);
17261 %}
17262 
17263 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17264 %{
17265   predicate(n->as_Vector()->length() == 8);
17266   match(Set dst (SubVS dst (MulVS src1 src2)));
17267   ins_cost(INSN_COST);
17268   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17269   ins_encode %{
17270     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17271             as_FloatRegister($src1$$reg),
17272             as_FloatRegister($src2$$reg));
17273   %}
17274   ins_pipe(vmla128);
17275 %}
17276 
17277 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17278 %{
17279   predicate(n->as_Vector()->length() == 2);
17280   match(Set dst (SubVI dst (MulVI src1 src2)));
17281   ins_cost(INSN_COST);
17282   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17283   ins_encode %{
17284     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17285             as_FloatRegister($src1$$reg),
17286             as_FloatRegister($src2$$reg));
17287   %}
17288   ins_pipe(vmla64);
17289 %}
17290 
17291 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17292 %{
17293   predicate(n->as_Vector()->length() == 4);
17294   match(Set dst (SubVI dst (MulVI src1 src2)));
17295   ins_cost(INSN_COST);
17296   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17297   ins_encode %{
17298     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17299             as_FloatRegister($src1$$reg),
17300             as_FloatRegister($src2$$reg));
17301   %}
17302   ins_pipe(vmla128);
17303 %}
17304 
17305 // dst - src1 * src2
17306 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17307   predicate(UseFMA && n->as_Vector()->length() == 2);
17308   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17309   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17310   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17311   ins_cost(INSN_COST);
17312   ins_encode %{
17313     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17314             as_FloatRegister($src1$$reg),
17315             as_FloatRegister($src2$$reg));
17316   %}
17317   ins_pipe(vmuldiv_fp64);
17318 %}
17319 
17320 // dst - src1 * src2
17321 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17322   predicate(UseFMA && n->as_Vector()->length() == 4);
17323   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17324   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17325   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17326   ins_cost(INSN_COST);
17327   ins_encode %{
17328     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17329             as_FloatRegister($src1$$reg),
17330             as_FloatRegister($src2$$reg));
17331   %}
17332   ins_pipe(vmuldiv_fp128);
17333 %}
17334 
17335 // dst - src1 * src2
17336 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17337   predicate(UseFMA && n->as_Vector()->length() == 2);
17338   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17339   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17340   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17341   ins_cost(INSN_COST);
17342   ins_encode %{
17343     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17344             as_FloatRegister($src1$$reg),
17345             as_FloatRegister($src2$$reg));
17346   %}
17347   ins_pipe(vmuldiv_fp128);
17348 %}
17349 
17350 // --------------------------------- DIV --------------------------------------
17351 
17352 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17353 %{
17354   predicate(n->as_Vector()->length() == 2);
17355   match(Set dst (DivVF src1 src2));
17356   ins_cost(INSN_COST);
17357   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17358   ins_encode %{
17359     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17360             as_FloatRegister($src1$$reg),
17361             as_FloatRegister($src2$$reg));
17362   %}
17363   ins_pipe(vmuldiv_fp64);
17364 %}
17365 
17366 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17367 %{
17368   predicate(n->as_Vector()->length() == 4);
17369   match(Set dst (DivVF src1 src2));
17370   ins_cost(INSN_COST);
17371   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17372   ins_encode %{
17373     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17374             as_FloatRegister($src1$$reg),
17375             as_FloatRegister($src2$$reg));
17376   %}
17377   ins_pipe(vmuldiv_fp128);
17378 %}
17379 
17380 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17381 %{
17382   predicate(n->as_Vector()->length() == 2);
17383   match(Set dst (DivVD src1 src2));
17384   ins_cost(INSN_COST);
17385   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17386   ins_encode %{
17387     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17388             as_FloatRegister($src1$$reg),
17389             as_FloatRegister($src2$$reg));
17390   %}
17391   ins_pipe(vmuldiv_fp128);
17392 %}
17393 
17394 // --------------------------------- SQRT -------------------------------------
17395 
17396 instruct vsqrt2D(vecX dst, vecX src)
17397 %{
17398   predicate(n->as_Vector()->length() == 2);
17399   match(Set dst (SqrtVD src));
17400   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17401   ins_encode %{
17402     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17403              as_FloatRegister($src$$reg));
17404   %}
17405   ins_pipe(vsqrt_fp128);
17406 %}
17407 
17408 // --------------------------------- ABS --------------------------------------
17409 
17410 instruct vabs2F(vecD dst, vecD src)
17411 %{
17412   predicate(n->as_Vector()->length() == 2);
17413   match(Set dst (AbsVF src));
17414   ins_cost(INSN_COST * 3);
17415   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17416   ins_encode %{
17417     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17418             as_FloatRegister($src$$reg));
17419   %}
17420   ins_pipe(vunop_fp64);
17421 %}
17422 
17423 instruct vabs4F(vecX dst, vecX src)
17424 %{
17425   predicate(n->as_Vector()->length() == 4);
17426   match(Set dst (AbsVF src));
17427   ins_cost(INSN_COST * 3);
17428   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17429   ins_encode %{
17430     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17431             as_FloatRegister($src$$reg));
17432   %}
17433   ins_pipe(vunop_fp128);
17434 %}
17435 
17436 instruct vabs2D(vecX dst, vecX src)
17437 %{
17438   predicate(n->as_Vector()->length() == 2);
17439   match(Set dst (AbsVD src));
17440   ins_cost(INSN_COST * 3);
17441   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17442   ins_encode %{
17443     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17444             as_FloatRegister($src$$reg));
17445   %}
17446   ins_pipe(vunop_fp128);
17447 %}
17448 
17449 // --------------------------------- NEG --------------------------------------
17450 
17451 instruct vneg2F(vecD dst, vecD src)
17452 %{
17453   predicate(n->as_Vector()->length() == 2);
17454   match(Set dst (NegVF src));
17455   ins_cost(INSN_COST * 3);
17456   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17457   ins_encode %{
17458     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17459             as_FloatRegister($src$$reg));
17460   %}
17461   ins_pipe(vunop_fp64);
17462 %}
17463 
17464 instruct vneg4F(vecX dst, vecX src)
17465 %{
17466   predicate(n->as_Vector()->length() == 4);
17467   match(Set dst (NegVF src));
17468   ins_cost(INSN_COST * 3);
17469   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17470   ins_encode %{
17471     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17472             as_FloatRegister($src$$reg));
17473   %}
17474   ins_pipe(vunop_fp128);
17475 %}
17476 
17477 instruct vneg2D(vecX dst, vecX src)
17478 %{
17479   predicate(n->as_Vector()->length() == 2);
17480   match(Set dst (NegVD src));
17481   ins_cost(INSN_COST * 3);
17482   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17483   ins_encode %{
17484     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17485             as_FloatRegister($src$$reg));
17486   %}
17487   ins_pipe(vunop_fp128);
17488 %}
17489 
17490 // --------------------------------- AND --------------------------------------
17491 
17492 instruct vand8B(vecD dst, vecD src1, vecD src2)
17493 %{
17494   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17495             n->as_Vector()->length_in_bytes() == 8);
17496   match(Set dst (AndV src1 src2));
17497   ins_cost(INSN_COST);
17498   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17499   ins_encode %{
17500     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17501             as_FloatRegister($src1$$reg),
17502             as_FloatRegister($src2$$reg));
17503   %}
17504   ins_pipe(vlogical64);
17505 %}
17506 
17507 instruct vand16B(vecX dst, vecX src1, vecX src2)
17508 %{
17509   predicate(n->as_Vector()->length_in_bytes() == 16);
17510   match(Set dst (AndV src1 src2));
17511   ins_cost(INSN_COST);
17512   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17513   ins_encode %{
17514     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17515             as_FloatRegister($src1$$reg),
17516             as_FloatRegister($src2$$reg));
17517   %}
17518   ins_pipe(vlogical128);
17519 %}
17520 
17521 // --------------------------------- OR ---------------------------------------
17522 
17523 instruct vor8B(vecD dst, vecD src1, vecD src2)
17524 %{
17525   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17526             n->as_Vector()->length_in_bytes() == 8);
17527   match(Set dst (OrV src1 src2));
17528   ins_cost(INSN_COST);
17529   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17530   ins_encode %{
17531     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17532             as_FloatRegister($src1$$reg),
17533             as_FloatRegister($src2$$reg));
17534   %}
17535   ins_pipe(vlogical64);
17536 %}
17537 
17538 instruct vor16B(vecX dst, vecX src1, vecX src2)
17539 %{
17540   predicate(n->as_Vector()->length_in_bytes() == 16);
17541   match(Set dst (OrV src1 src2));
17542   ins_cost(INSN_COST);
17543   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17544   ins_encode %{
17545     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17546             as_FloatRegister($src1$$reg),
17547             as_FloatRegister($src2$$reg));
17548   %}
17549   ins_pipe(vlogical128);
17550 %}
17551 
17552 // --------------------------------- XOR --------------------------------------
17553 
17554 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17555 %{
17556   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17557             n->as_Vector()->length_in_bytes() == 8);
17558   match(Set dst (XorV src1 src2));
17559   ins_cost(INSN_COST);
17560   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17561   ins_encode %{
17562     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17563             as_FloatRegister($src1$$reg),
17564             as_FloatRegister($src2$$reg));
17565   %}
17566   ins_pipe(vlogical64);
17567 %}
17568 
17569 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17570 %{
17571   predicate(n->as_Vector()->length_in_bytes() == 16);
17572   match(Set dst (XorV src1 src2));
17573   ins_cost(INSN_COST);
17574   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17575   ins_encode %{
17576     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17577             as_FloatRegister($src1$$reg),
17578             as_FloatRegister($src2$$reg));
17579   %}
17580   ins_pipe(vlogical128);
17581 %}
17582 
17583 // ------------------------------ Shift ---------------------------------------
17584 
17585 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17586   match(Set dst (LShiftCntV cnt));
17587   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17588   ins_encode %{
17589     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17590   %}
17591   ins_pipe(vdup_reg_reg128);
17592 %}
17593 
17594 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17595 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17596   match(Set dst (RShiftCntV cnt));
17597   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17598   ins_encode %{
17599     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17600     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17601   %}
17602   ins_pipe(vdup_reg_reg128);
17603 %}
17604 
17605 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17606   predicate(n->as_Vector()->length() == 4 ||
17607             n->as_Vector()->length() == 8);
17608   match(Set dst (LShiftVB src shift));
17609   match(Set dst (RShiftVB src shift));
17610   ins_cost(INSN_COST);
17611   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17612   ins_encode %{
17613     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17614             as_FloatRegister($src$$reg),
17615             as_FloatRegister($shift$$reg));
17616   %}
17617   ins_pipe(vshift64);
17618 %}
17619 
17620 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17621   predicate(n->as_Vector()->length() == 16);
17622   match(Set dst (LShiftVB src shift));
17623   match(Set dst (RShiftVB src shift));
17624   ins_cost(INSN_COST);
17625   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17626   ins_encode %{
17627     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17628             as_FloatRegister($src$$reg),
17629             as_FloatRegister($shift$$reg));
17630   %}
17631   ins_pipe(vshift128);
17632 %}
17633 
17634 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17635   predicate(n->as_Vector()->length() == 4 ||
17636             n->as_Vector()->length() == 8);
17637   match(Set dst (URShiftVB src shift));
17638   ins_cost(INSN_COST);
17639   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17640   ins_encode %{
17641     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17642             as_FloatRegister($src$$reg),
17643             as_FloatRegister($shift$$reg));
17644   %}
17645   ins_pipe(vshift64);
17646 %}
17647 
17648 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17649   predicate(n->as_Vector()->length() == 16);
17650   match(Set dst (URShiftVB src shift));
17651   ins_cost(INSN_COST);
17652   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17653   ins_encode %{
17654     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17655             as_FloatRegister($src$$reg),
17656             as_FloatRegister($shift$$reg));
17657   %}
17658   ins_pipe(vshift128);
17659 %}
17660 
17661 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17662   predicate(n->as_Vector()->length() == 4 ||
17663             n->as_Vector()->length() == 8);
17664   match(Set dst (LShiftVB src shift));
17665   ins_cost(INSN_COST);
17666   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17667   ins_encode %{
17668     int sh = (int)$shift$$constant;
17669     if (sh >= 8) {
17670       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17671              as_FloatRegister($src$$reg),
17672              as_FloatRegister($src$$reg));
17673     } else {
17674       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17675              as_FloatRegister($src$$reg), sh);
17676     }
17677   %}
17678   ins_pipe(vshift64_imm);
17679 %}
17680 
17681 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17682   predicate(n->as_Vector()->length() == 16);
17683   match(Set dst (LShiftVB src shift));
17684   ins_cost(INSN_COST);
17685   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17686   ins_encode %{
17687     int sh = (int)$shift$$constant;
17688     if (sh >= 8) {
17689       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17690              as_FloatRegister($src$$reg),
17691              as_FloatRegister($src$$reg));
17692     } else {
17693       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17694              as_FloatRegister($src$$reg), sh);
17695     }
17696   %}
17697   ins_pipe(vshift128_imm);
17698 %}
17699 
17700 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17701   predicate(n->as_Vector()->length() == 4 ||
17702             n->as_Vector()->length() == 8);
17703   match(Set dst (RShiftVB src shift));
17704   ins_cost(INSN_COST);
17705   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17706   ins_encode %{
17707     int sh = (int)$shift$$constant;
17708     if (sh >= 8) sh = 7;
17709     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17710            as_FloatRegister($src$$reg), sh);
17711   %}
17712   ins_pipe(vshift64_imm);
17713 %}
17714 
17715 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17716   predicate(n->as_Vector()->length() == 16);
17717   match(Set dst (RShiftVB src shift));
17718   ins_cost(INSN_COST);
17719   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17720   ins_encode %{
17721     int sh = (int)$shift$$constant;
17722     if (sh >= 8) sh = 7;
17723     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17724            as_FloatRegister($src$$reg), sh);
17725   %}
17726   ins_pipe(vshift128_imm);
17727 %}
17728 
17729 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17730   predicate(n->as_Vector()->length() == 4 ||
17731             n->as_Vector()->length() == 8);
17732   match(Set dst (URShiftVB src shift));
17733   ins_cost(INSN_COST);
17734   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17735   ins_encode %{
17736     int sh = (int)$shift$$constant;
17737     if (sh >= 8) {
17738       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17739              as_FloatRegister($src$$reg),
17740              as_FloatRegister($src$$reg));
17741     } else {
17742       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17743              as_FloatRegister($src$$reg), sh);
17744     }
17745   %}
17746   ins_pipe(vshift64_imm);
17747 %}
17748 
17749 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17750   predicate(n->as_Vector()->length() == 16);
17751   match(Set dst (URShiftVB src shift));
17752   ins_cost(INSN_COST);
17753   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17754   ins_encode %{
17755     int sh = (int)$shift$$constant;
17756     if (sh >= 8) {
17757       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17758              as_FloatRegister($src$$reg),
17759              as_FloatRegister($src$$reg));
17760     } else {
17761       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17762              as_FloatRegister($src$$reg), sh);
17763     }
17764   %}
17765   ins_pipe(vshift128_imm);
17766 %}
17767 
17768 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17769   predicate(n->as_Vector()->length() == 2 ||
17770             n->as_Vector()->length() == 4);
17771   match(Set dst (LShiftVS src shift));
17772   match(Set dst (RShiftVS src shift));
17773   ins_cost(INSN_COST);
17774   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17775   ins_encode %{
17776     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17777             as_FloatRegister($src$$reg),
17778             as_FloatRegister($shift$$reg));
17779   %}
17780   ins_pipe(vshift64);
17781 %}
17782 
17783 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17784   predicate(n->as_Vector()->length() == 8);
17785   match(Set dst (LShiftVS src shift));
17786   match(Set dst (RShiftVS src shift));
17787   ins_cost(INSN_COST);
17788   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17789   ins_encode %{
17790     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17791             as_FloatRegister($src$$reg),
17792             as_FloatRegister($shift$$reg));
17793   %}
17794   ins_pipe(vshift128);
17795 %}
17796 
17797 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17798   predicate(n->as_Vector()->length() == 2 ||
17799             n->as_Vector()->length() == 4);
17800   match(Set dst (URShiftVS src shift));
17801   ins_cost(INSN_COST);
17802   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17803   ins_encode %{
17804     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17805             as_FloatRegister($src$$reg),
17806             as_FloatRegister($shift$$reg));
17807   %}
17808   ins_pipe(vshift64);
17809 %}
17810 
17811 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17812   predicate(n->as_Vector()->length() == 8);
17813   match(Set dst (URShiftVS src shift));
17814   ins_cost(INSN_COST);
17815   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17816   ins_encode %{
17817     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17818             as_FloatRegister($src$$reg),
17819             as_FloatRegister($shift$$reg));
17820   %}
17821   ins_pipe(vshift128);
17822 %}
17823 
17824 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17825   predicate(n->as_Vector()->length() == 2 ||
17826             n->as_Vector()->length() == 4);
17827   match(Set dst (LShiftVS src shift));
17828   ins_cost(INSN_COST);
17829   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17830   ins_encode %{
17831     int sh = (int)$shift$$constant;
17832     if (sh >= 16) {
17833       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17834              as_FloatRegister($src$$reg),
17835              as_FloatRegister($src$$reg));
17836     } else {
17837       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17838              as_FloatRegister($src$$reg), sh);
17839     }
17840   %}
17841   ins_pipe(vshift64_imm);
17842 %}
17843 
17844 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17845   predicate(n->as_Vector()->length() == 8);
17846   match(Set dst (LShiftVS src shift));
17847   ins_cost(INSN_COST);
17848   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17849   ins_encode %{
17850     int sh = (int)$shift$$constant;
17851     if (sh >= 16) {
17852       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17853              as_FloatRegister($src$$reg),
17854              as_FloatRegister($src$$reg));
17855     } else {
17856       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17857              as_FloatRegister($src$$reg), sh);
17858     }
17859   %}
17860   ins_pipe(vshift128_imm);
17861 %}
17862 
17863 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17864   predicate(n->as_Vector()->length() == 2 ||
17865             n->as_Vector()->length() == 4);
17866   match(Set dst (RShiftVS src shift));
17867   ins_cost(INSN_COST);
17868   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17869   ins_encode %{
17870     int sh = (int)$shift$$constant;
17871     if (sh >= 16) sh = 15;
17872     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17873            as_FloatRegister($src$$reg), sh);
17874   %}
17875   ins_pipe(vshift64_imm);
17876 %}
17877 
17878 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17879   predicate(n->as_Vector()->length() == 8);
17880   match(Set dst (RShiftVS src shift));
17881   ins_cost(INSN_COST);
17882   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17883   ins_encode %{
17884     int sh = (int)$shift$$constant;
17885     if (sh >= 16) sh = 15;
17886     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17887            as_FloatRegister($src$$reg), sh);
17888   %}
17889   ins_pipe(vshift128_imm);
17890 %}
17891 
17892 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17893   predicate(n->as_Vector()->length() == 2 ||
17894             n->as_Vector()->length() == 4);
17895   match(Set dst (URShiftVS src shift));
17896   ins_cost(INSN_COST);
17897   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17898   ins_encode %{
17899     int sh = (int)$shift$$constant;
17900     if (sh >= 16) {
17901       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17902              as_FloatRegister($src$$reg),
17903              as_FloatRegister($src$$reg));
17904     } else {
17905       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17906              as_FloatRegister($src$$reg), sh);
17907     }
17908   %}
17909   ins_pipe(vshift64_imm);
17910 %}
17911 
17912 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17913   predicate(n->as_Vector()->length() == 8);
17914   match(Set dst (URShiftVS src shift));
17915   ins_cost(INSN_COST);
17916   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17917   ins_encode %{
17918     int sh = (int)$shift$$constant;
17919     if (sh >= 16) {
17920       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17921              as_FloatRegister($src$$reg),
17922              as_FloatRegister($src$$reg));
17923     } else {
17924       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17925              as_FloatRegister($src$$reg), sh);
17926     }
17927   %}
17928   ins_pipe(vshift128_imm);
17929 %}
17930 
17931 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17932   predicate(n->as_Vector()->length() == 2);
17933   match(Set dst (LShiftVI src shift));
17934   match(Set dst (RShiftVI src shift));
17935   ins_cost(INSN_COST);
17936   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17937   ins_encode %{
17938     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17939             as_FloatRegister($src$$reg),
17940             as_FloatRegister($shift$$reg));
17941   %}
17942   ins_pipe(vshift64);
17943 %}
17944 
17945 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17946   predicate(n->as_Vector()->length() == 4);
17947   match(Set dst (LShiftVI src shift));
17948   match(Set dst (RShiftVI src shift));
17949   ins_cost(INSN_COST);
17950   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17951   ins_encode %{
17952     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17953             as_FloatRegister($src$$reg),
17954             as_FloatRegister($shift$$reg));
17955   %}
17956   ins_pipe(vshift128);
17957 %}
17958 
17959 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17960   predicate(n->as_Vector()->length() == 2);
17961   match(Set dst (URShiftVI src shift));
17962   ins_cost(INSN_COST);
17963   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17964   ins_encode %{
17965     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17966             as_FloatRegister($src$$reg),
17967             as_FloatRegister($shift$$reg));
17968   %}
17969   ins_pipe(vshift64);
17970 %}
17971 
17972 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17973   predicate(n->as_Vector()->length() == 4);
17974   match(Set dst (URShiftVI src shift));
17975   ins_cost(INSN_COST);
17976   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17977   ins_encode %{
17978     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17979             as_FloatRegister($src$$reg),
17980             as_FloatRegister($shift$$reg));
17981   %}
17982   ins_pipe(vshift128);
17983 %}
17984 
17985 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17986   predicate(n->as_Vector()->length() == 2);
17987   match(Set dst (LShiftVI src shift));
17988   ins_cost(INSN_COST);
17989   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17990   ins_encode %{
17991     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17992            as_FloatRegister($src$$reg),
17993            (int)$shift$$constant);
17994   %}
17995   ins_pipe(vshift64_imm);
17996 %}
17997 
17998 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17999   predicate(n->as_Vector()->length() == 4);
18000   match(Set dst (LShiftVI src shift));
18001   ins_cost(INSN_COST);
18002   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18003   ins_encode %{
18004     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18005            as_FloatRegister($src$$reg),
18006            (int)$shift$$constant);
18007   %}
18008   ins_pipe(vshift128_imm);
18009 %}
18010 
18011 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18012   predicate(n->as_Vector()->length() == 2);
18013   match(Set dst (RShiftVI src shift));
18014   ins_cost(INSN_COST);
18015   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18016   ins_encode %{
18017     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18018             as_FloatRegister($src$$reg),
18019             (int)$shift$$constant);
18020   %}
18021   ins_pipe(vshift64_imm);
18022 %}
18023 
18024 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18025   predicate(n->as_Vector()->length() == 4);
18026   match(Set dst (RShiftVI src shift));
18027   ins_cost(INSN_COST);
18028   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18029   ins_encode %{
18030     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18031             as_FloatRegister($src$$reg),
18032             (int)$shift$$constant);
18033   %}
18034   ins_pipe(vshift128_imm);
18035 %}
18036 
18037 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18038   predicate(n->as_Vector()->length() == 2);
18039   match(Set dst (URShiftVI src shift));
18040   ins_cost(INSN_COST);
18041   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18042   ins_encode %{
18043     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18044             as_FloatRegister($src$$reg),
18045             (int)$shift$$constant);
18046   %}
18047   ins_pipe(vshift64_imm);
18048 %}
18049 
18050 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18051   predicate(n->as_Vector()->length() == 4);
18052   match(Set dst (URShiftVI src shift));
18053   ins_cost(INSN_COST);
18054   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18055   ins_encode %{
18056     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18057             as_FloatRegister($src$$reg),
18058             (int)$shift$$constant);
18059   %}
18060   ins_pipe(vshift128_imm);
18061 %}
18062 
18063 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18064   predicate(n->as_Vector()->length() == 2);
18065   match(Set dst (LShiftVL src shift));
18066   match(Set dst (RShiftVL src shift));
18067   ins_cost(INSN_COST);
18068   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18069   ins_encode %{
18070     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18071             as_FloatRegister($src$$reg),
18072             as_FloatRegister($shift$$reg));
18073   %}
18074   ins_pipe(vshift128);
18075 %}
18076 
18077 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18078   predicate(n->as_Vector()->length() == 2);
18079   match(Set dst (URShiftVL src shift));
18080   ins_cost(INSN_COST);
18081   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18082   ins_encode %{
18083     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18084             as_FloatRegister($src$$reg),
18085             as_FloatRegister($shift$$reg));
18086   %}
18087   ins_pipe(vshift128);
18088 %}
18089 
18090 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18091   predicate(n->as_Vector()->length() == 2);
18092   match(Set dst (LShiftVL src shift));
18093   ins_cost(INSN_COST);
18094   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18095   ins_encode %{
18096     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18097            as_FloatRegister($src$$reg),
18098            (int)$shift$$constant);
18099   %}
18100   ins_pipe(vshift128_imm);
18101 %}
18102 
18103 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18104   predicate(n->as_Vector()->length() == 2);
18105   match(Set dst (RShiftVL src shift));
18106   ins_cost(INSN_COST);
18107   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18108   ins_encode %{
18109     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18110             as_FloatRegister($src$$reg),
18111             (int)$shift$$constant);
18112   %}
18113   ins_pipe(vshift128_imm);
18114 %}
18115 
18116 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18117   predicate(n->as_Vector()->length() == 2);
18118   match(Set dst (URShiftVL src shift));
18119   ins_cost(INSN_COST);
18120   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18121   ins_encode %{
18122     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18123             as_FloatRegister($src$$reg),
18124             (int)$shift$$constant);
18125   %}
18126   ins_pipe(vshift128_imm);
18127 %}
18128 
18129 //----------PEEPHOLE RULES-----------------------------------------------------
18130 // These must follow all instruction definitions as they use the names
18131 // defined in the instructions definitions.
18132 //
18133 // peepmatch ( root_instr_name [preceding_instruction]* );
18134 //
18135 // peepconstraint %{
18136 // (instruction_number.operand_name relational_op instruction_number.operand_name
18137 //  [, ...] );
18138 // // instruction numbers are zero-based using left to right order in peepmatch
18139 //
18140 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18141 // // provide an instruction_number.operand_name for each operand that appears
18142 // // in the replacement instruction's match rule
18143 //
18144 // ---------VM FLAGS---------------------------------------------------------
18145 //
18146 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18147 //
18148 // Each peephole rule is given an identifying number starting with zero and
18149 // increasing by one in the order seen by the parser.  An individual peephole
18150 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18151 // on the command-line.
18152 //
18153 // ---------CURRENT LIMITATIONS----------------------------------------------
18154 //
18155 // Only match adjacent instructions in same basic block
18156 // Only equality constraints
18157 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18158 // Only one replacement instruction
18159 //
18160 // ---------EXAMPLE----------------------------------------------------------
18161 //
18162 // // pertinent parts of existing instructions in architecture description
18163 // instruct movI(iRegINoSp dst, iRegI src)
18164 // %{
18165 //   match(Set dst (CopyI src));
18166 // %}
18167 //
18168 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18169 // %{
18170 //   match(Set dst (AddI dst src));
18171 //   effect(KILL cr);
18172 // %}
18173 //
18174 // // Change (inc mov) to lea
18175 // peephole %{
18176 //   // increment preceeded by register-register move
18177 //   peepmatch ( incI_iReg movI );
18178 //   // require that the destination register of the increment
18179 //   // match the destination register of the move
18180 //   peepconstraint ( 0.dst == 1.dst );
18181 //   // construct a replacement instruction that sets
18182 //   // the destination to ( move's source register + one )
18183 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18184 // %}
18185 //
18186 
18187 // Implementation no longer uses movX instructions since
18188 // machine-independent system no longer uses CopyX nodes.
18189 //
18190 // peephole
18191 // %{
18192 //   peepmatch (incI_iReg movI);
18193 //   peepconstraint (0.dst == 1.dst);
18194 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18195 // %}
18196 
18197 // peephole
18198 // %{
18199 //   peepmatch (decI_iReg movI);
18200 //   peepconstraint (0.dst == 1.dst);
18201 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18202 // %}
18203 
18204 // peephole
18205 // %{
18206 //   peepmatch (addI_iReg_imm movI);
18207 //   peepconstraint (0.dst == 1.dst);
18208 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18209 // %}
18210 
18211 // peephole
18212 // %{
18213 //   peepmatch (incL_iReg movL);
18214 //   peepconstraint (0.dst == 1.dst);
18215 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18216 // %}
18217 
18218 // peephole
18219 // %{
18220 //   peepmatch (decL_iReg movL);
18221 //   peepconstraint (0.dst == 1.dst);
18222 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18223 // %}
18224 
18225 // peephole
18226 // %{
18227 //   peepmatch (addL_iReg_imm movL);
18228 //   peepconstraint (0.dst == 1.dst);
18229 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18230 // %}
18231 
18232 // peephole
18233 // %{
18234 //   peepmatch (addP_iReg_imm movP);
18235 //   peepconstraint (0.dst == 1.dst);
18236 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18237 // %}
18238 
18239 // // Change load of spilled value to only a spill
18240 // instruct storeI(memory mem, iRegI src)
18241 // %{
18242 //   match(Set mem (StoreI mem src));
18243 // %}
18244 //
18245 // instruct loadI(iRegINoSp dst, memory mem)
18246 // %{
18247 //   match(Set dst (LoadI mem));
18248 // %}
18249 //
18250 
18251 //----------SMARTSPILL RULES---------------------------------------------------
18252 // These must follow all instruction definitions as they use the names
18253 // defined in the instructions definitions.
18254 
18255 // Local Variables:
18256 // mode: c++
18257 // End: