1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   // graph traversal helpers
1040 
1041   MemBarNode *parent_membar(const Node *n);
1042   MemBarNode *child_membar(const MemBarNode *n);
1043   bool leading_membar(const MemBarNode *barrier);
1044 
1045   bool is_card_mark_membar(const MemBarNode *barrier);
1046   bool is_CAS(int opcode);
1047 
1048   MemBarNode *leading_to_normal(MemBarNode *leading);
1049   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1050   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1051   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1052   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1053 
1054   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1055 
1056   bool unnecessary_acquire(const Node *barrier);
1057   bool needs_acquiring_load(const Node *load);
1058 
1059   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1060 
1061   bool unnecessary_release(const Node *barrier);
1062   bool unnecessary_volatile(const Node *barrier);
1063   bool needs_releasing_store(const Node *store);
1064 
1065   // predicate controlling translation of CompareAndSwapX
1066   bool needs_acquiring_load_exclusive(const Node *load);
1067 
1068   // predicate controlling translation of StoreCM
1069   bool unnecessary_storestore(const Node *storecm);
1070 
1071   // predicate controlling addressing modes
1072   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1073 %}
1074 
1075 source %{
1076 
1077   // Optimizaton of volatile gets and puts
1078   // -------------------------------------
1079   //
1080   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1081   // use to implement volatile reads and writes. For a volatile read
1082   // we simply need
1083   //
1084   //   ldar<x>
1085   //
1086   // and for a volatile write we need
1087   //
1088   //   stlr<x>
1089   //
1090   // Alternatively, we can implement them by pairing a normal
1091   // load/store with a memory barrier. For a volatile read we need
1092   //
1093   //   ldr<x>
1094   //   dmb ishld
1095   //
1096   // for a volatile write
1097   //
1098   //   dmb ish
1099   //   str<x>
1100   //   dmb ish
1101   //
1102   // We can also use ldaxr and stlxr to implement compare and swap CAS
1103   // sequences. These are normally translated to an instruction
1104   // sequence like the following
1105   //
1106   //   dmb      ish
1107   // retry:
1108   //   ldxr<x>   rval raddr
1109   //   cmp       rval rold
1110   //   b.ne done
1111   //   stlxr<x>  rval, rnew, rold
1112   //   cbnz      rval retry
1113   // done:
1114   //   cset      r0, eq
1115   //   dmb ishld
1116   //
1117   // Note that the exclusive store is already using an stlxr
1118   // instruction. That is required to ensure visibility to other
1119   // threads of the exclusive write (assuming it succeeds) before that
1120   // of any subsequent writes.
1121   //
1122   // The following instruction sequence is an improvement on the above
1123   //
1124   // retry:
1125   //   ldaxr<x>  rval raddr
1126   //   cmp       rval rold
1127   //   b.ne done
1128   //   stlxr<x>  rval, rnew, rold
1129   //   cbnz      rval retry
1130   // done:
1131   //   cset      r0, eq
1132   //
1133   // We don't need the leading dmb ish since the stlxr guarantees
1134   // visibility of prior writes in the case that the swap is
1135   // successful. Crucially we don't have to worry about the case where
1136   // the swap is not successful since no valid program should be
1137   // relying on visibility of prior changes by the attempting thread
1138   // in the case where the CAS fails.
1139   //
1140   // Similarly, we don't need the trailing dmb ishld if we substitute
1141   // an ldaxr instruction since that will provide all the guarantees we
1142   // require regarding observation of changes made by other threads
1143   // before any change to the CAS address observed by the load.
1144   //
1145   // In order to generate the desired instruction sequence we need to
1146   // be able to identify specific 'signature' ideal graph node
1147   // sequences which i) occur as a translation of a volatile reads or
1148   // writes or CAS operations and ii) do not occur through any other
1149   // translation or graph transformation. We can then provide
1150   // alternative aldc matching rules which translate these node
1151   // sequences to the desired machine code sequences. Selection of the
1152   // alternative rules can be implemented by predicates which identify
1153   // the relevant node sequences.
1154   //
1155   // The ideal graph generator translates a volatile read to the node
1156   // sequence
1157   //
1158   //   LoadX[mo_acquire]
1159   //   MemBarAcquire
1160   //
1161   // As a special case when using the compressed oops optimization we
1162   // may also see this variant
1163   //
1164   //   LoadN[mo_acquire]
1165   //   DecodeN
1166   //   MemBarAcquire
1167   //
1168   // A volatile write is translated to the node sequence
1169   //
1170   //   MemBarRelease
1171   //   StoreX[mo_release] {CardMark}-optional
1172   //   MemBarVolatile
1173   //
1174   // n.b. the above node patterns are generated with a strict
1175   // 'signature' configuration of input and output dependencies (see
1176   // the predicates below for exact details). The card mark may be as
1177   // simple as a few extra nodes or, in a few GC configurations, may
1178   // include more complex control flow between the leading and
1179   // trailing memory barriers. However, whatever the card mark
1180   // configuration these signatures are unique to translated volatile
1181   // reads/stores -- they will not appear as a result of any other
1182   // bytecode translation or inlining nor as a consequence of
1183   // optimizing transforms.
1184   //
1185   // We also want to catch inlined unsafe volatile gets and puts and
1186   // be able to implement them using either ldar<x>/stlr<x> or some
1187   // combination of ldr<x>/stlr<x> and dmb instructions.
1188   //
1189   // Inlined unsafe volatiles puts manifest as a minor variant of the
1190   // normal volatile put node sequence containing an extra cpuorder
1191   // membar
1192   //
1193   //   MemBarRelease
1194   //   MemBarCPUOrder
1195   //   StoreX[mo_release] {CardMark}-optional
1196   //   MemBarCPUOrder
1197   //   MemBarVolatile
1198   //
1199   // n.b. as an aside, a cpuorder membar is not itself subject to
1200   // matching and translation by adlc rules.  However, the rule
1201   // predicates need to detect its presence in order to correctly
1202   // select the desired adlc rules.
1203   //
1204   // Inlined unsafe volatile gets manifest as a slightly different
1205   // node sequence to a normal volatile get because of the
1206   // introduction of some CPUOrder memory barriers to bracket the
1207   // Load. However, but the same basic skeleton of a LoadX feeding a
1208   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1209   // present
1210   //
1211   //   MemBarCPUOrder
1212   //        ||       \\
1213   //   MemBarCPUOrder LoadX[mo_acquire]
1214   //        ||            |
1215   //        ||       {DecodeN} optional
1216   //        ||       /
1217   //     MemBarAcquire
1218   //
1219   // In this case the acquire membar does not directly depend on the
1220   // load. However, we can be sure that the load is generated from an
1221   // inlined unsafe volatile get if we see it dependent on this unique
1222   // sequence of membar nodes. Similarly, given an acquire membar we
1223   // can know that it was added because of an inlined unsafe volatile
1224   // get if it is fed and feeds a cpuorder membar and if its feed
1225   // membar also feeds an acquiring load.
1226   //
1227   // Finally an inlined (Unsafe) CAS operation is translated to the
1228   // following ideal graph
1229   //
1230   //   MemBarRelease
1231   //   MemBarCPUOrder
1232   //   CompareAndSwapX {CardMark}-optional
1233   //   MemBarCPUOrder
1234   //   MemBarAcquire
1235   //
1236   // So, where we can identify these volatile read and write
1237   // signatures we can choose to plant either of the above two code
1238   // sequences. For a volatile read we can simply plant a normal
1239   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1240   // also choose to inhibit translation of the MemBarAcquire and
1241   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1242   //
1243   // When we recognise a volatile store signature we can choose to
1244   // plant at a dmb ish as a translation for the MemBarRelease, a
1245   // normal str<x> and then a dmb ish for the MemBarVolatile.
1246   // Alternatively, we can inhibit translation of the MemBarRelease
1247   // and MemBarVolatile and instead plant a simple stlr<x>
1248   // instruction.
1249   //
1250   // when we recognise a CAS signature we can choose to plant a dmb
1251   // ish as a translation for the MemBarRelease, the conventional
1252   // macro-instruction sequence for the CompareAndSwap node (which
1253   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1254   // Alternatively, we can elide generation of the dmb instructions
1255   // and plant the alternative CompareAndSwap macro-instruction
1256   // sequence (which uses ldaxr<x>).
1257   //
1258   // Of course, the above only applies when we see these signature
1259   // configurations. We still want to plant dmb instructions in any
1260   // other cases where we may see a MemBarAcquire, MemBarRelease or
1261   // MemBarVolatile. For example, at the end of a constructor which
1262   // writes final/volatile fields we will see a MemBarRelease
1263   // instruction and this needs a 'dmb ish' lest we risk the
1264   // constructed object being visible without making the
1265   // final/volatile field writes visible.
1266   //
1267   // n.b. the translation rules below which rely on detection of the
1268   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1269   // If we see anything other than the signature configurations we
1270   // always just translate the loads and stores to ldr<x> and str<x>
1271   // and translate acquire, release and volatile membars to the
1272   // relevant dmb instructions.
1273   //
1274 
1275   // graph traversal helpers used for volatile put/get and CAS
1276   // optimization
1277 
1278   // 1) general purpose helpers
1279 
1280   // if node n is linked to a parent MemBarNode by an intervening
1281   // Control and Memory ProjNode return the MemBarNode otherwise return
1282   // NULL.
1283   //
1284   // n may only be a Load or a MemBar.
1285 
1286   MemBarNode *parent_membar(const Node *n)
1287   {
1288     Node *ctl = NULL;
1289     Node *mem = NULL;
1290     Node *membar = NULL;
1291 
1292     if (n->is_Load()) {
1293       ctl = n->lookup(LoadNode::Control);
1294       mem = n->lookup(LoadNode::Memory);
1295     } else if (n->is_MemBar()) {
1296       ctl = n->lookup(TypeFunc::Control);
1297       mem = n->lookup(TypeFunc::Memory);
1298     } else {
1299         return NULL;
1300     }
1301 
1302     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1303       return NULL;
1304     }
1305 
1306     membar = ctl->lookup(0);
1307 
1308     if (!membar || !membar->is_MemBar()) {
1309       return NULL;
1310     }
1311 
1312     if (mem->lookup(0) != membar) {
1313       return NULL;
1314     }
1315 
1316     return membar->as_MemBar();
1317   }
1318 
1319   // if n is linked to a child MemBarNode by intervening Control and
1320   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1321 
1322   MemBarNode *child_membar(const MemBarNode *n)
1323   {
1324     ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
1325     ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
1326 
1327     // MemBar needs to have both a Ctl and Mem projection
1328     if (! ctl || ! mem)
1329       return NULL;
1330 
1331     MemBarNode *child = NULL;
1332     Node *x;
1333 
1334     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1335       x = ctl->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x->is_MemBar()) {
1339           child = x->as_MemBar();
1340           break;
1341       }
1342     }
1343 
1344     if (child == NULL) {
1345       return NULL;
1346     }
1347 
1348     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1349       x = mem->fast_out(i);
1350       // if we see a membar we keep hold of it. we may also see a new
1351       // arena copy of the original but it will appear later
1352       if (x == child) {
1353         return child;
1354       }
1355     }
1356     return NULL;
1357   }
1358 
1359   // helper predicate use to filter candidates for a leading memory
1360   // barrier
1361   //
1362   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1363   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1364 
1365   bool leading_membar(const MemBarNode *barrier)
1366   {
1367     int opcode = barrier->Opcode();
1368     // if this is a release membar we are ok
1369     if (opcode == Op_MemBarRelease) {
1370       return true;
1371     }
1372     // if its a cpuorder membar . . .
1373     if (opcode != Op_MemBarCPUOrder) {
1374       return false;
1375     }
1376     // then the parent has to be a release membar
1377     MemBarNode *parent = parent_membar(barrier);
1378     if (!parent) {
1379       return false;
1380     }
1381     opcode = parent->Opcode();
1382     return opcode == Op_MemBarRelease;
1383   }
1384 
1385   // 2) card mark detection helper
1386 
1387   // helper predicate which can be used to detect a volatile membar
1388   // introduced as part of a conditional card mark sequence either by
1389   // G1 or by CMS when UseCondCardMark is true.
1390   //
1391   // membar can be definitively determined to be part of a card mark
1392   // sequence if and only if all the following hold
1393   //
1394   // i) it is a MemBarVolatile
1395   //
1396   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1397   // true
1398   //
1399   // iii) the node's Mem projection feeds a StoreCM node.
1400 
1401   bool is_card_mark_membar(const MemBarNode *barrier)
1402   {
1403     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1404       return false;
1405     }
1406 
1407     if (barrier->Opcode() != Op_MemBarVolatile) {
1408       return false;
1409     }
1410 
1411     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1412 
1413     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1414       Node *y = mem->fast_out(i);
1415       if (y->Opcode() == Op_StoreCM) {
1416         return true;
1417       }
1418     }
1419 
1420     return false;
1421   }
1422 
1423 
1424   // 3) helper predicates to traverse volatile put or CAS graphs which
1425   // may contain GC barrier subgraphs
1426 
1427   // Preamble
1428   // --------
1429   //
1430   // for volatile writes we can omit generating barriers and employ a
1431   // releasing store when we see a node sequence sequence with a
1432   // leading MemBarRelease and a trailing MemBarVolatile as follows
1433   //
1434   //   MemBarRelease
1435   //  {      ||      } -- optional
1436   //  {MemBarCPUOrder}
1437   //         ||     \\
1438   //         ||     StoreX[mo_release]
1439   //         | \     /
1440   //         | MergeMem
1441   //         | /
1442   //  {MemBarCPUOrder} -- optional
1443   //  {      ||      }
1444   //   MemBarVolatile
1445   //
1446   // where
1447   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1448   //  | \ and / indicate further routing of the Ctl and Mem feeds
1449   //
1450   // this is the graph we see for non-object stores. however, for a
1451   // volatile Object store (StoreN/P) we may see other nodes below the
1452   // leading membar because of the need for a GC pre- or post-write
1453   // barrier.
1454   //
1455   // with most GC configurations we with see this simple variant which
1456   // includes a post-write barrier card mark.
1457   //
1458   //   MemBarRelease______________________________
1459   //         ||    \\               Ctl \        \\
1460   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1461   //         | \     /                       . . .  /
1462   //         | MergeMem
1463   //         | /
1464   //         ||      /
1465   //  {MemBarCPUOrder} -- optional
1466   //  {      ||      }
1467   //   MemBarVolatile
1468   //
1469   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1470   // the object address to an int used to compute the card offset) and
1471   // Ctl+Mem to a StoreB node (which does the actual card mark).
1472   //
1473   // n.b. a StoreCM node will only appear in this configuration when
1474   // using CMS or G1. StoreCM differs from a normal card mark write (StoreB)
1475   // because it implies a requirement to order visibility of the card
1476   // mark (StoreCM) relative to the object put (StoreP/N) using a
1477   // StoreStore memory barrier (arguably this ought to be represented
1478   // explicitly in the ideal graph but that is not how it works). This
1479   // ordering is required for both non-volatile and volatile
1480   // puts. Normally that means we need to translate a StoreCM using
1481   // the sequence
1482   //
1483   //   dmb ishst
1484   //   strb
1485   //
1486   // However, when using G1 or CMS with conditional card marking (as
1487   // we shall see) we don't need to insert the dmb when translating
1488   // StoreCM because there is already an intervening StoreLoad barrier
1489   // between it and the StoreP/N.
1490   //
1491   // It is also possible to perform the card mark conditionally on it
1492   // currently being unmarked in which case the volatile put graph
1493   // will look slightly different
1494   //
1495   //   MemBarRelease____________________________________________
1496   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1497   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1498   //         | \     /                              \            |
1499   //         | MergeMem                            . . .      StoreB
1500   //         | /                                                /
1501   //         ||     /
1502   //   MemBarVolatile
1503   //
1504   // It is worth noting at this stage that both the above
1505   // configurations can be uniquely identified by checking that the
1506   // memory flow includes the following subgraph:
1507   //
1508   //   MemBarRelease
1509   //  {MemBarCPUOrder}
1510   //          |  \      . . .
1511   //          |  StoreX[mo_release]  . . .
1512   //          |   /
1513   //         MergeMem
1514   //          |
1515   //  {MemBarCPUOrder}
1516   //   MemBarVolatile
1517   //
1518   // This is referred to as a *normal* subgraph. It can easily be
1519   // detected starting from any candidate MemBarRelease,
1520   // StoreX[mo_release] or MemBarVolatile.
1521   //
1522   // A simple variation on this normal case occurs for an unsafe CAS
1523   // operation. The basic graph for a non-object CAS is
1524   //
1525   //   MemBarRelease
1526   //         ||
1527   //   MemBarCPUOrder
1528   //         ||     \\   . . .
1529   //         ||     CompareAndSwapX
1530   //         ||       |
1531   //         ||     SCMemProj
1532   //         | \     /
1533   //         | MergeMem
1534   //         | /
1535   //   MemBarCPUOrder
1536   //         ||
1537   //   MemBarAcquire
1538   //
1539   // The same basic variations on this arrangement (mutatis mutandis)
1540   // occur when a card mark is introduced. i.e. we se the same basic
1541   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1542   // tail of the graph is a pair comprising a MemBarCPUOrder +
1543   // MemBarAcquire.
1544   //
1545   // So, in the case of a CAS the normal graph has the variant form
1546   //
1547   //   MemBarRelease
1548   //   MemBarCPUOrder
1549   //          |   \      . . .
1550   //          |  CompareAndSwapX  . . .
1551   //          |    |
1552   //          |   SCMemProj
1553   //          |   /  . . .
1554   //         MergeMem
1555   //          |
1556   //   MemBarCPUOrder
1557   //   MemBarAcquire
1558   //
1559   // This graph can also easily be detected starting from any
1560   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1561   //
1562   // the code below uses two helper predicates, leading_to_normal and
1563   // normal_to_leading to identify these normal graphs, one validating
1564   // the layout starting from the top membar and searching down and
1565   // the other validating the layout starting from the lower membar
1566   // and searching up.
1567   //
1568   // There are two special case GC configurations when a normal graph
1569   // may not be generated: when using G1 (which always employs a
1570   // conditional card mark); and when using CMS with conditional card
1571   // marking configured. These GCs are both concurrent rather than
1572   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1573   // graph between the leading and trailing membar nodes, in
1574   // particular enforcing stronger memory serialisation beween the
1575   // object put and the corresponding conditional card mark. CMS
1576   // employs a post-write GC barrier while G1 employs both a pre- and
1577   // post-write GC barrier. Of course the extra nodes may be absent --
1578   // they are only inserted for object puts/swaps. This significantly
1579   // complicates the task of identifying whether a MemBarRelease,
1580   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1581   // when using these GC configurations (see below). It adds similar
1582   // complexity to the task of identifying whether a MemBarRelease,
1583   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1584   //
1585   // In both cases the post-write subtree includes an auxiliary
1586   // MemBarVolatile (StoreLoad barrier) separating the object put/swap
1587   // and the read of the corresponding card. This poses two additional
1588   // problems.
1589   //
1590   // Firstly, a card mark MemBarVolatile needs to be distinguished
1591   // from a normal trailing MemBarVolatile. Resolving this first
1592   // problem is straightforward: a card mark MemBarVolatile always
1593   // projects a Mem feed to a StoreCM node and that is a unique marker
1594   //
1595   //      MemBarVolatile (card mark)
1596   //       C |    \     . . .
1597   //         |   StoreCM   . . .
1598   //       . . .
1599   //
1600   // The second problem is how the code generator is to translate the
1601   // card mark barrier? It always needs to be translated to a "dmb
1602   // ish" instruction whether or not it occurs as part of a volatile
1603   // put. A StoreLoad barrier is needed after the object put to ensure
1604   // i) visibility to GC threads of the object put and ii) visibility
1605   // to the mutator thread of any card clearing write by a GC
1606   // thread. Clearly a normal store (str) will not guarantee this
1607   // ordering but neither will a releasing store (stlr). The latter
1608   // guarantees that the object put is visible but does not guarantee
1609   // that writes by other threads have also been observed.
1610   //
1611   // So, returning to the task of translating the object put and the
1612   // leading/trailing membar nodes: what do the non-normal node graph
1613   // look like for these 2 special cases? and how can we determine the
1614   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1615   // in both normal and non-normal cases?
1616   //
1617   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1618   // which selects conditonal execution based on the value loaded
1619   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1620   // intervening StoreLoad barrier (MemBarVolatile).
1621   //
1622   // So, with CMS we may see a node graph for a volatile object store
1623   // which looks like this
1624   //
1625   //   MemBarRelease
1626   //  {MemBarCPUOrder}_(leading)_________________
1627   //     C |    M \       \\                   C \
1628   //       |       \    StoreN/P[mo_release]  CastP2X
1629   //       |    Bot \    /
1630   //       |       MergeMem
1631   //       |         /
1632   //      MemBarVolatile (card mark)
1633   //     C |  ||    M |
1634   //       | LoadB    |
1635   //       |   |      |
1636   //       | Cmp      |\
1637   //       | /        | \
1638   //       If         |  \
1639   //       | \        |   \
1640   // IfFalse  IfTrue  |    \
1641   //       \     / \  |     \
1642   //        \   / StoreCM    |
1643   //         \ /      |      |
1644   //        Region   . . .   |
1645   //          | \           /
1646   //          |  . . .  \  / Bot
1647   //          |       MergeMem
1648   //          |          |
1649   //       {MemBarCPUOrder}
1650   //        MemBarVolatile (trailing)
1651   //
1652   // The first MergeMem merges the AliasIdxBot Mem slice from the
1653   // leading membar and the oopptr Mem slice from the Store into the
1654   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1655   // Mem slice from the card mark membar and the AliasIdxRaw slice
1656   // from the StoreCM into the trailing membar (n.b. the latter
1657   // proceeds via a Phi associated with the If region).
1658   //
1659   // The graph for a CAS varies slightly, the difference being
1660   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1661   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1662   // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
1663   //
1664   //   MemBarRelease
1665   //   MemBarCPUOrder_(leading)_______________
1666   //     C |    M \       \\                C \
1667   //       |       \    CompareAndSwapN/P  CastP2X
1668   //       |        \      |
1669   //       |         \   SCMemProj
1670   //       |      Bot \   /
1671   //       |        MergeMem
1672   //       |         /
1673   //      MemBarVolatile (card mark)
1674   //     C |  ||    M |
1675   //       | LoadB    |
1676   //       |   |      |
1677   //       | Cmp      |\
1678   //       | /        | \
1679   //       If         |  \
1680   //       | \        |   \
1681   // IfFalse  IfTrue  |    \
1682   //       \     / \  |     \
1683   //        \   / StoreCM    |
1684   //         \ /      |      |
1685   //        Region   . . .   |
1686   //          | \           /
1687   //          |  . . .  \  / Bot
1688   //          |       MergeMem
1689   //          |          |
1690   //        MemBarCPUOrder
1691   //        MemBarVolatile (trailing)
1692   //
1693   //
1694   // G1 is quite a lot more complicated. The nodes inserted on behalf
1695   // of G1 may comprise: a pre-write graph which adds the old value to
1696   // the SATB queue; the releasing store itself; and, finally, a
1697   // post-write graph which performs a card mark.
1698   //
1699   // The pre-write graph may be omitted, but only when the put is
1700   // writing to a newly allocated (young gen) object and then only if
1701   // there is a direct memory chain to the Initialize node for the
1702   // object allocation. This will not happen for a volatile put since
1703   // any memory chain passes through the leading membar.
1704   //
1705   // The pre-write graph includes a series of 3 If tests. The outermost
1706   // If tests whether SATB is enabled (no else case). The next If tests
1707   // whether the old value is non-NULL (no else case). The third tests
1708   // whether the SATB queue index is > 0, if so updating the queue. The
1709   // else case for this third If calls out to the runtime to allocate a
1710   // new queue buffer.
1711   //
1712   // So with G1 the pre-write and releasing store subgraph looks like
1713   // this (the nested Ifs are omitted).
1714   //
1715   //  MemBarRelease
1716   // {MemBarCPUOrder}_(leading)___________
1717   //     C |  ||  M \   M \    M \  M \ . . .
1718   //       | LoadB   \  LoadL  LoadN   \
1719   //       | /        \                 \
1720   //       If         |\                 \
1721   //       | \        | \                 \
1722   //  IfFalse  IfTrue |  \                 \
1723   //       |     |    |   \                 |
1724   //       |     If   |   /\                |
1725   //       |     |          \               |
1726   //       |                 \              |
1727   //       |    . . .         \             |
1728   //       | /       | /       |            |
1729   //      Region  Phi[M]       |            |
1730   //       | \       |         |            |
1731   //       |  \_____ | ___     |            |
1732   //     C | C \     |   C \ M |            |
1733   //       | CastP2X | StoreN/P[mo_release] |
1734   //       |         |         |            |
1735   //     C |       M |       M |          M |
1736   //        \        |         |           /
1737   //                  . . .
1738   //          (post write subtree elided)
1739   //                    . . .
1740   //             C \         M /
1741   //                \         /
1742   //             {MemBarCPUOrder}
1743   //              MemBarVolatile (trailing)
1744   //
1745   // n.b. the LoadB in this subgraph is not the card read -- it's a
1746   // read of the SATB queue active flag.
1747   //
1748   // The G1 post-write subtree is also optional, this time when the
1749   // new value being written is either null or can be identified as a
1750   // newly allocated (young gen) object with no intervening control
1751   // flow. The latter cannot happen but the former may, in which case
1752   // the card mark membar is omitted and the memory feeds form the
1753   // leading membar and the SToreN/P are merged direct into the
1754   // trailing membar as per the normal subgraph. So, the only special
1755   // case which arises is when the post-write subgraph is generated.
1756   //
1757   // The kernel of the post-write G1 subgraph is the card mark itself
1758   // which includes a card mark memory barrier (MemBarVolatile), a
1759   // card test (LoadB), and a conditional update (If feeding a
1760   // StoreCM). These nodes are surrounded by a series of nested Ifs
1761   // which try to avoid doing the card mark. The top level If skips if
1762   // the object reference does not cross regions (i.e. it tests if
1763   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1764   // need not be recorded. The next If, which skips on a NULL value,
1765   // may be absent (it is not generated if the type of value is >=
1766   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1767   // checking if card_val != young).  n.b. although this test requires
1768   // a pre-read of the card it can safely be done before the StoreLoad
1769   // barrier. However that does not bypass the need to reread the card
1770   // after the barrier. A final, 4th If tests if the card is already
1771   // marked.
1772   //
1773   //                (pre-write subtree elided)
1774   //        . . .                  . . .    . . .  . . .
1775   //        C |                    M |     M |    M |
1776   //       Region                  Phi[M] StoreN    |
1777   //          |                     / \      |      |
1778   //         / \_______            /   \     |      |
1779   //      C / C \      . . .            \    |      |
1780   //       If   CastP2X . . .            |   |      |
1781   //       / \                           |   |      |
1782   //      /   \                          |   |      |
1783   // IfFalse IfTrue                      |   |      |
1784   //   |       |                         |   |     /|
1785   //   |       If                        |   |    / |
1786   //   |      / \                        |   |   /  |
1787   //   |     /   \                        \  |  /   |
1788   //   | IfFalse IfTrue                   MergeMem  |
1789   //   |  . . .    / \                       /      |
1790   //   |          /   \                     /       |
1791   //   |     IfFalse IfTrue                /        |
1792   //   |      . . .    |                  /         |
1793   //   |               If                /          |
1794   //   |               / \              /           |
1795   //   |              /   \            /            |
1796   //   |         IfFalse IfTrue       /             |
1797   //   |           . . .   |         /              |
1798   //   |                    \       /               |
1799   //   |                     \     /                |
1800   //   |             MemBarVolatile__(card mark)    |
1801   //   |                ||   C |  M \  M \          |
1802   //   |               LoadB   If    |    |         |
1803   //   |                      / \    |    |         |
1804   //   |                     . . .   |    |         |
1805   //   |                          \  |    |        /
1806   //   |                        StoreCM   |       /
1807   //   |                          . . .   |      /
1808   //   |                        _________/      /
1809   //   |                       /  _____________/
1810   //   |   . . .       . . .  |  /            /
1811   //   |    |                 | /   _________/
1812   //   |    |               Phi[M] /        /
1813   //   |    |                 |   /        /
1814   //   |    |                 |  /        /
1815   //   |  Region  . . .     Phi[M]  _____/
1816   //   |    /                 |    /
1817   //   |                      |   /
1818   //   | . . .   . . .        |  /
1819   //   | /                    | /
1820   // Region           |  |  Phi[M]
1821   //   |              |  |  / Bot
1822   //    \            MergeMem
1823   //     \            /
1824   //    {MemBarCPUOrder}
1825   //     MemBarVolatile
1826   //
1827   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1828   // from the leading membar and the oopptr Mem slice from the Store
1829   // into the card mark membar i.e. the memory flow to the card mark
1830   // membar still looks like a normal graph.
1831   //
1832   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1833   // Mem slices (from the StoreCM and other card mark queue stores).
1834   // However in this case the AliasIdxBot Mem slice does not come
1835   // direct from the card mark membar. It is merged through a series
1836   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1837   // from the leading membar with the Mem feed from the card mark
1838   // membar. Each Phi corresponds to one of the Ifs which may skip
1839   // around the card mark membar. So when the If implementing the NULL
1840   // value check has been elided the total number of Phis is 2
1841   // otherwise it is 3.
1842   //
1843   // The CAS graph when using G1GC also includes a pre-write subgraph
1844   // and an optional post-write subgraph. The same variations are
1845   // introduced as for CMS with conditional card marking i.e. the
1846   // StoreP/N is swapped for a CompareAndSwapP/N with a following
1847   // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
1848   // MemBarAcquire pair. There may be an extra If test introduced in
1849   // the CAS case, when the boolean result of the CAS is tested by the
1850   // caller. In that case an extra Region and AliasIdxBot Phi may be
1851   // introduced before the MergeMem
1852   //
1853   // So, the upshot is that in all cases the subgraph will include a
1854   // *normal* memory subgraph betwen the leading membar and its child
1855   // membar: either a normal volatile put graph including a releasing
1856   // StoreX and terminating with a trailing volatile membar or card
1857   // mark volatile membar; or a normal CAS graph including a
1858   // CompareAndSwapX + SCMemProj pair and terminating with a card mark
1859   // volatile membar or a trailing cpu order and acquire membar
1860   // pair. If the child membar is not a (volatile) card mark membar
1861   // then it marks the end of the volatile put or CAS subgraph. If the
1862   // child is a card mark membar then the normal subgraph will form
1863   // part of a larger volatile put or CAS subgraph if and only if the
1864   // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
1865   // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
1866   // Phi nodes merging the leading barrier memory flow (for G1).
1867   //
1868   // The predicates controlling generation of instructions for store
1869   // and barrier nodes employ a few simple helper functions (described
1870   // below) which identify the presence or absence of all these
1871   // subgraph configurations and provide a means of traversing from
1872   // one node in the subgraph to another.
1873 
1874   // is_CAS(int opcode)
1875   //
1876   // return true if opcode is one of the possible CompareAndSwapX
1877   // values otherwise false.
1878 
1879   bool is_CAS(int opcode)
1880   {
1881     switch(opcode) {
1882       // We handle these
1883     case Op_CompareAndSwapI:
1884     case Op_CompareAndSwapL:
1885     case Op_CompareAndSwapP:
1886     case Op_CompareAndSwapN:
1887  // case Op_CompareAndSwapB:
1888  // case Op_CompareAndSwapS:
1889       return true;
1890       // These are TBD
1891     case Op_WeakCompareAndSwapB:
1892     case Op_WeakCompareAndSwapS:
1893     case Op_WeakCompareAndSwapI:
1894     case Op_WeakCompareAndSwapL:
1895     case Op_WeakCompareAndSwapP:
1896     case Op_WeakCompareAndSwapN:
1897     case Op_CompareAndExchangeB:
1898     case Op_CompareAndExchangeS:
1899     case Op_CompareAndExchangeI:
1900     case Op_CompareAndExchangeL:
1901     case Op_CompareAndExchangeP:
1902     case Op_CompareAndExchangeN:
1903       return false;
1904     default:
1905       return false;
1906     }
1907   }
1908 
1909   // helper to determine the maximum number of Phi nodes we may need to
1910   // traverse when searching from a card mark membar for the merge mem
1911   // feeding a trailing membar or vice versa
1912 
1913   int max_phis()
1914   {
1915     if (UseG1GC) {
1916       return 4;
1917     } else if (UseConcMarkSweepGC && UseCondCardMark) {
1918       return 1;
1919     } else {
1920       return 0;
1921     }
1922   }
1923 
1924   // leading_to_normal
1925   //
1926   // graph traversal helper which detects the normal case Mem feed
1927   // from a release membar (or, optionally, its cpuorder child) to a
1928   // dependent volatile or acquire membar i.e. it ensures that one of
1929   // the following 3 Mem flow subgraphs is present.
1930   //
1931   //   MemBarRelease
1932   //  {MemBarCPUOrder} {leading}
1933   //          |  \      . . .
1934   //          |  StoreN/P[mo_release]  . . .
1935   //          |   /
1936   //         MergeMem
1937   //          |
1938   //  {MemBarCPUOrder}
1939   //   MemBarVolatile {trailing or card mark}
1940   //
1941   //   MemBarRelease
1942   //   MemBarCPUOrder {leading}
1943   //          |  \      . . .
1944   //          |  CompareAndSwapX  . . .
1945   //          |   /
1946   //         MergeMem
1947   //          |
1948   //   MemBarVolatile {card mark}
1949   //
1950   //   MemBarRelease
1951   //   MemBarCPUOrder {leading}
1952   //          |  \      . . .
1953   //          |  CompareAndSwapX  . . .
1954   //          |   /
1955   //         MergeMem
1956   //          |
1957   //   MemBarCPUOrder
1958   //   MemBarAcquire {trailing}
1959   //
1960   // if the correct configuration is present returns the trailing
1961   // or cardmark membar otherwise NULL.
1962   //
1963   // the input membar is expected to be either a cpuorder membar or a
1964   // release membar. in the latter case it should not have a cpu membar
1965   // child.
1966   //
1967   // the returned value may be a card mark or trailing membar
1968   //
1969 
1970   MemBarNode *leading_to_normal(MemBarNode *leading)
1971   {
1972     assert((leading->Opcode() == Op_MemBarRelease ||
1973             leading->Opcode() == Op_MemBarCPUOrder),
1974            "expecting a volatile or cpuroder membar!");
1975 
1976     // check the mem flow
1977     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1978 
1979     if (!mem) {
1980       return NULL;
1981     }
1982 
1983     Node *x = NULL;
1984     StoreNode * st = NULL;
1985     LoadStoreNode *cas = NULL;
1986     MergeMemNode *mm = NULL;
1987 
1988     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1989       x = mem->fast_out(i);
1990       if (x->is_MergeMem()) {
1991         if (mm != NULL) {
1992           return NULL;
1993         }
1994         // two merge mems is one too many
1995         mm = x->as_MergeMem();
1996       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1997         // two releasing stores/CAS nodes is one too many
1998         if (st != NULL || cas != NULL) {
1999           return NULL;
2000         }
2001         st = x->as_Store();
2002       } else if (is_CAS(x->Opcode())) {
2003         if (st != NULL || cas != NULL) {
2004           return NULL;
2005         }
2006         cas = x->as_LoadStore();
2007       }
2008     }
2009 
2010     // must have a store or a cas
2011     if (!st && !cas) {
2012       return NULL;
2013     }
2014 
2015     // must have a merge
2016     if (!mm) {
2017       return NULL;
2018     }
2019 
2020     Node *feed = NULL;
2021     if (cas) {
2022       // look for an SCMemProj
2023       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2024         x = cas->fast_out(i);
2025         if (x->Opcode() == Op_SCMemProj) {
2026           feed = x;
2027           break;
2028         }
2029       }
2030       if (feed == NULL) {
2031         return NULL;
2032       }
2033     } else {
2034       feed = st;
2035     }
2036     // ensure the feed node feeds the existing mergemem;
2037     for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2038       x = feed->fast_out(i);
2039       if (x == mm) {
2040         break;
2041       }
2042     }
2043     if (x != mm) {
2044       return NULL;
2045     }
2046 
2047     MemBarNode *mbar = NULL;
2048     // ensure the merge feeds to the expected type of membar
2049     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2050       x = mm->fast_out(i);
2051       if (x->is_MemBar()) {
2052         if (x->Opcode() == Op_MemBarCPUOrder) {
2053           // with a store any cpu order membar should precede a
2054           // trailing volatile membar. with a cas it should precede a
2055           // trailing acquire membar. in either case try to skip to
2056           // that next membar
2057           MemBarNode *y =  x->as_MemBar();
2058           y = child_membar(y);
2059           if (y != NULL) {
2060             // skip to this new membar to do the check
2061             x = y;
2062           }
2063           
2064         }
2065         if (x->Opcode() == Op_MemBarVolatile) {
2066           mbar = x->as_MemBar();
2067           // for a volatile store this can be either a trailing membar
2068           // or a card mark membar. for a cas it must be a card mark
2069           // membar
2070           guarantee(cas == NULL || is_card_mark_membar(mbar),
2071                     "in CAS graph volatile membar must be a card mark");
2072         } else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
2073           mbar = x->as_MemBar();
2074         }
2075         break;
2076       }
2077     }
2078 
2079     return mbar;
2080   }
2081 
2082   // normal_to_leading
2083   //
2084   // graph traversal helper which detects the normal case Mem feed
2085   // from either a card mark or a trailing membar to a preceding
2086   // release membar (optionally its cpuorder child) i.e. it ensures
2087   // that one of the following 3 Mem flow subgraphs is present.
2088   //
2089   //   MemBarRelease
2090   //  {MemBarCPUOrder} {leading}
2091   //          |  \      . . .
2092   //          |  StoreN/P[mo_release]  . . .
2093   //          |   /
2094   //         MergeMem
2095   //          |
2096   //  {MemBarCPUOrder}
2097   //   MemBarVolatile {trailing or card mark}
2098   //
2099   //   MemBarRelease
2100   //   MemBarCPUOrder {leading}
2101   //          |  \      . . .
2102   //          |  CompareAndSwapX  . . .
2103   //          |   /
2104   //         MergeMem
2105   //          |
2106   //   MemBarVolatile {card mark}
2107   //
2108   //   MemBarRelease
2109   //   MemBarCPUOrder {leading}
2110   //          |  \      . . .
2111   //          |  CompareAndSwapX  . . .
2112   //          |   /
2113   //         MergeMem
2114   //          |
2115   //   MemBarCPUOrder
2116   //   MemBarAcquire {trailing}
2117   //
2118   // this predicate checks for the same flow as the previous predicate
2119   // but starting from the bottom rather than the top.
2120   //
2121   // if the configuration is present returns the cpuorder member for
2122   // preference or when absent the release membar otherwise NULL.
2123   //
2124   // n.b. the input membar is expected to be a MemBarVolatile but
2125   // need not be a card mark membar.
2126 
2127   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2128   {
2129     // input must be a volatile membar
2130     assert((barrier->Opcode() == Op_MemBarVolatile ||
2131             barrier->Opcode() == Op_MemBarAcquire),
2132            "expecting a volatile or an acquire membar");
2133     bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
2134 
2135     // if we have an intervening cpu order membar then start the
2136     // search from it
2137     
2138     Node *x = parent_membar(barrier);
2139 
2140     if (x == NULL) {
2141       // stick with the original barrier
2142       x = (Node *)barrier;
2143     } else if (x->Opcode() != Op_MemBarCPUOrder) {
2144       // any other barrier means this is not the graph we want
2145       return NULL;
2146     }
2147 
2148     // the Mem feed to the membar should be a merge
2149     x = x ->in(TypeFunc::Memory);
2150     if (!x->is_MergeMem())
2151       return NULL;
2152 
2153     MergeMemNode *mm = x->as_MergeMem();
2154 
2155     // the merge should get its Bottom mem feed from the leading membar
2156     x = mm->in(Compile::AliasIdxBot);
2157 
2158     // ensure this is a non control projection
2159     if (!x->is_Proj() || x->is_CFG()) {
2160       return NULL;
2161     }
2162     // if it is fed by a membar that's the one we want
2163     x = x->in(0);
2164 
2165     if (!x->is_MemBar()) {
2166       return NULL;
2167     }
2168 
2169     MemBarNode *leading = x->as_MemBar();
2170     // reject invalid candidates
2171     if (!leading_membar(leading)) {
2172       return NULL;
2173     }
2174 
2175     // ok, we have a leading membar, now for the sanity clauses
2176 
2177     // the leading membar must feed Mem to a releasing store or CAS
2178     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2179     StoreNode *st = NULL;
2180     LoadStoreNode *cas = NULL;
2181     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2182       x = mem->fast_out(i);
2183       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2184         // two stores or CASes is one too many
2185         if (st != NULL || cas != NULL) {
2186           return NULL;
2187         }
2188         st = x->as_Store();
2189       } else if (is_CAS(x->Opcode())) {
2190         if (st != NULL || cas != NULL) {
2191           return NULL;
2192         }
2193         cas = x->as_LoadStore();
2194       }
2195     }
2196 
2197     // we cannot have both a store and a cas
2198     if (st == NULL && cas == NULL) {
2199       // we have neither -- this is not a normal graph
2200       return NULL;
2201     }
2202     if (st == NULL) {
2203       // if we started from a volatile membar and found a CAS then the
2204       // original membar ought to be for a card mark
2205       guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
2206                 "unexpected volatile barrier (i.e. not card mark) in CAS graph");
2207       // check that the CAS feeds the merge we used to get here via an
2208       // intermediary SCMemProj
2209       Node *scmemproj = NULL;
2210       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2211         x = cas->fast_out(i);
2212         if (x->Opcode() == Op_SCMemProj) {
2213           scmemproj = x;
2214           break;
2215         }
2216       }
2217       if (scmemproj == NULL) {
2218         return NULL;
2219       }
2220       for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
2221         x = scmemproj->fast_out(i);
2222         if (x == mm) {
2223           return leading;
2224         }
2225       }
2226     } else {
2227       // we should not have found a store if we started from an acquire
2228       guarantee(!barrier_is_acquire,
2229                 "unexpected trailing acquire barrier in volatile store graph");
2230 
2231       // the store should feed the merge we used to get here
2232       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2233         if (st->fast_out(i) == mm) {
2234           return leading;
2235         }
2236       }
2237     }
2238 
2239     return NULL;
2240   }
2241 
2242   // card_mark_to_trailing
2243   //
2244   // graph traversal helper which detects extra, non-normal Mem feed
2245   // from a card mark volatile membar to a trailing membar i.e. it
2246   // ensures that one of the following three GC post-write Mem flow
2247   // subgraphs is present.
2248   //
2249   // 1)
2250   //     . . .
2251   //       |
2252   //   MemBarVolatile (card mark)
2253   //      |          |
2254   //      |        StoreCM
2255   //      |          |
2256   //      |        . . .
2257   //  Bot |  /
2258   //   MergeMem
2259   //      |
2260   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2261   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2262   //                                 
2263   //
2264   // 2)
2265   //   MemBarRelease/CPUOrder (leading)
2266   //    |
2267   //    |
2268   //    |\       . . .
2269   //    | \        |
2270   //    |  \  MemBarVolatile (card mark)
2271   //    |   \   |     |
2272   //     \   \  |   StoreCM    . . .
2273   //      \   \ |
2274   //       \  Phi
2275   //        \ /
2276   //        Phi  . . .
2277   //     Bot |   /
2278   //       MergeMem
2279   //         |
2280   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2281   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2282   //
2283   // 3)
2284   //   MemBarRelease/CPUOrder (leading)
2285   //    |
2286   //    |\
2287   //    | \
2288   //    |  \      . . .
2289   //    |   \       |
2290   //    |\   \  MemBarVolatile (card mark)
2291   //    | \   \   |     |
2292   //    |  \   \  |   StoreCM    . . .
2293   //    |   \   \ |
2294   //     \   \  Phi
2295   //      \   \ /
2296   //       \  Phi
2297   //        \ /
2298   //        Phi  . . .
2299   //     Bot |   /
2300   //       MergeMem
2301   //         |
2302   //         |
2303   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2304   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2305   //
2306   // 4)
2307   //   MemBarRelease/CPUOrder (leading)
2308   //    |
2309   //    |\
2310   //    | \
2311   //    |  \
2312   //    |   \
2313   //    |\   \
2314   //    | \   \
2315   //    |  \   \        . . .
2316   //    |   \   \         |
2317   //    |\   \   \   MemBarVolatile (card mark)
2318   //    | \   \   \   /   |
2319   //    |  \   \   \ /  StoreCM    . . .
2320   //    |   \   \  Phi
2321   //     \   \   \ /
2322   //      \   \  Phi
2323   //       \   \ /
2324   //        \  Phi
2325   //         \ /
2326   //         Phi  . . .
2327   //      Bot |   /
2328   //       MergeMem
2329   //          |
2330   //          |
2331   //    MemBarCPUOrder
2332   //    MemBarAcquire {trailing}
2333   //
2334   // configuration 1 is only valid if UseConcMarkSweepGC &&
2335   // UseCondCardMark
2336   //
2337   // configuration 2, is only valid if UseConcMarkSweepGC &&
2338   // UseCondCardMark or if UseG1GC
2339   //
2340   // configurations 3 and 4 are only valid if UseG1GC.
2341   //
2342   // if a valid configuration is present returns the trailing membar
2343   // otherwise NULL.
2344   //
2345   // n.b. the supplied membar is expected to be a card mark
2346   // MemBarVolatile i.e. the caller must ensure the input node has the
2347   // correct operand and feeds Mem to a StoreCM node
2348 
2349   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2350   {
2351     // input must be a card mark volatile membar
2352     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2353 
2354     Node *feed = barrier->proj_out(TypeFunc::Memory);
2355     Node *x;
2356     MergeMemNode *mm = NULL;
2357 
2358     const int MAX_PHIS = max_phis(); // max phis we will search through
2359     int phicount = 0;                // current search count
2360 
2361     bool retry_feed = true;
2362     while (retry_feed) {
2363       // see if we have a direct MergeMem feed
2364       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2365         x = feed->fast_out(i);
2366         // the correct Phi will be merging a Bot memory slice
2367         if (x->is_MergeMem()) {
2368           mm = x->as_MergeMem();
2369           break;
2370         }
2371       }
2372       if (mm) {
2373         retry_feed = false;
2374       } else if (phicount++ < MAX_PHIS) {
2375         // the barrier may feed indirectly via one or two Phi nodes
2376         PhiNode *phi = NULL;
2377         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2378           x = feed->fast_out(i);
2379           // the correct Phi will be merging a Bot memory slice
2380           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2381             phi = x->as_Phi();
2382             break;
2383           }
2384         }
2385         if (!phi) {
2386           return NULL;
2387         }
2388         // look for another merge below this phi
2389         feed = phi;
2390       } else {
2391         // couldn't find a merge
2392         return NULL;
2393       }
2394     }
2395 
2396     // sanity check this feed turns up as the expected slice
2397     guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2398 
2399     MemBarNode *trailing = NULL;
2400     // be sure we have a trailing membar fed by the merge
2401     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2402       x = mm->fast_out(i);
2403       if (x->is_MemBar()) {
2404         // if this is an intervening cpu order membar skip to the
2405         // following membar
2406         if (x->Opcode() == Op_MemBarCPUOrder) {
2407           MemBarNode *y =  x->as_MemBar();
2408           y = child_membar(y);
2409           if (y != NULL) {
2410             x = y;
2411           }
2412         }
2413         if (x->Opcode() == Op_MemBarVolatile ||
2414             x->Opcode() == Op_MemBarAcquire) {
2415           trailing = x->as_MemBar();
2416         }
2417         break;
2418       }
2419     }
2420 
2421     return trailing;
2422   }
2423 
2424   // trailing_to_card_mark
2425   //
2426   // graph traversal helper which detects extra, non-normal Mem feed
2427   // from a trailing volatile membar to a preceding card mark volatile
2428   // membar i.e. it identifies whether one of the three possible extra
2429   // GC post-write Mem flow subgraphs is present
2430   //
2431   // this predicate checks for the same flow as the previous predicate
2432   // but starting from the bottom rather than the top.
2433   //
2434   // if the configuration is present returns the card mark membar
2435   // otherwise NULL
2436   //
2437   // n.b. the supplied membar is expected to be a trailing
2438   // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
2439   // input node has the correct opcode
2440 
2441   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2442   {
2443     assert(trailing->Opcode() == Op_MemBarVolatile ||
2444            trailing->Opcode() == Op_MemBarAcquire,
2445            "expecting a volatile or acquire membar");
2446     assert(!is_card_mark_membar(trailing),
2447            "not expecting a card mark membar");
2448 
2449     Node *x = (Node *)trailing;
2450 
2451     // look for a preceding cpu order membar
2452     MemBarNode *y = parent_membar(x->as_MemBar());
2453     if (y != NULL) {
2454       // make sure it is a cpu order membar
2455       if (y->Opcode() != Op_MemBarCPUOrder) {
2456         // this is nto the graph we were looking for
2457         return NULL;
2458       }
2459       // start the search from here
2460       x = y;
2461     }
2462 
2463     // the Mem feed to the membar should be a merge
2464     x = x->in(TypeFunc::Memory);
2465     if (!x->is_MergeMem()) {
2466       return NULL;
2467     }
2468 
2469     MergeMemNode *mm = x->as_MergeMem();
2470 
2471     x = mm->in(Compile::AliasIdxBot);
2472     // with G1 we may possibly see a Phi or two before we see a Memory
2473     // Proj from the card mark membar
2474 
2475     const int MAX_PHIS = max_phis(); // max phis we will search through
2476     int phicount = 0;                    // current search count
2477 
2478     bool retry_feed = !x->is_Proj();
2479 
2480     while (retry_feed) {
2481       if (x->is_Phi() && phicount++ < MAX_PHIS) {
2482         PhiNode *phi = x->as_Phi();
2483         ProjNode *proj = NULL;
2484         PhiNode *nextphi = NULL;
2485         bool found_leading = false;
2486         for (uint i = 1; i < phi->req(); i++) {
2487           x = phi->in(i);
2488           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2489             nextphi = x->as_Phi();
2490           } else if (x->is_Proj()) {
2491             int opcode = x->in(0)->Opcode();
2492             if (opcode == Op_MemBarVolatile) {
2493               proj = x->as_Proj();
2494             } else if (opcode == Op_MemBarRelease ||
2495                        opcode == Op_MemBarCPUOrder) {
2496               // probably a leading membar
2497               found_leading = true;
2498             }
2499           }
2500         }
2501         // if we found a correct looking proj then retry from there
2502         // otherwise we must see a leading and a phi or this the
2503         // wrong config
2504         if (proj != NULL) {
2505           x = proj;
2506           retry_feed = false;
2507         } else if (found_leading && nextphi != NULL) {
2508           // retry from this phi to check phi2
2509           x = nextphi;
2510         } else {
2511           // not what we were looking for
2512           return NULL;
2513         }
2514       } else {
2515         return NULL;
2516       }
2517     }
2518     // the proj has to come from the card mark membar
2519     x = x->in(0);
2520     if (!x->is_MemBar()) {
2521       return NULL;
2522     }
2523 
2524     MemBarNode *card_mark_membar = x->as_MemBar();
2525 
2526     if (!is_card_mark_membar(card_mark_membar)) {
2527       return NULL;
2528     }
2529 
2530     return card_mark_membar;
2531   }
2532 
2533   // trailing_to_leading
2534   //
2535   // graph traversal helper which checks the Mem flow up the graph
2536   // from a (non-card mark) trailing membar attempting to locate and
2537   // return an associated leading membar. it first looks for a
2538   // subgraph in the normal configuration (relying on helper
2539   // normal_to_leading). failing that it then looks for one of the
2540   // possible post-write card mark subgraphs linking the trailing node
2541   // to a the card mark membar (relying on helper
2542   // trailing_to_card_mark), and then checks that the card mark membar
2543   // is fed by a leading membar (once again relying on auxiliary
2544   // predicate normal_to_leading).
2545   //
2546   // if the configuration is valid returns the cpuorder member for
2547   // preference or when absent the release membar otherwise NULL.
2548   //
2549   // n.b. the input membar is expected to be either a volatile or
2550   // acquire membar but in the former case must *not* be a card mark
2551   // membar.
2552 
2553   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2554   {
2555     assert((trailing->Opcode() == Op_MemBarAcquire ||
2556             trailing->Opcode() == Op_MemBarVolatile),
2557            "expecting an acquire or volatile membar");
2558     assert((trailing->Opcode() != Op_MemBarVolatile ||
2559             !is_card_mark_membar(trailing)),
2560            "not expecting a card mark membar");
2561 
2562     MemBarNode *leading = normal_to_leading(trailing);
2563 
2564     if (leading) {
2565       return leading;
2566     }
2567 
2568     // there is no normal path from trailing to leading membar. see if
2569     // we can arrive via a card mark membar
2570 
2571     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2572 
2573     if (!card_mark_membar) {
2574       return NULL;
2575     }
2576 
2577     return normal_to_leading(card_mark_membar);
2578   }
2579 
2580   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2581 
2582 bool unnecessary_acquire(const Node *barrier)
2583 {
2584   assert(barrier->is_MemBar(), "expecting a membar");
2585 
2586   if (UseBarriersForVolatile) {
2587     // we need to plant a dmb
2588     return false;
2589   }
2590 
2591   // a volatile read derived from bytecode (or also from an inlined
2592   // SHA field read via LibraryCallKit::load_field_from_object)
2593   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2594   // with a bogus read dependency on it's preceding load. so in those
2595   // cases we will find the load node at the PARMS offset of the
2596   // acquire membar.  n.b. there may be an intervening DecodeN node.
2597 
2598   Node *x = barrier->lookup(TypeFunc::Parms);
2599   if (x) {
2600     // we are starting from an acquire and it has a fake dependency
2601     //
2602     // need to check for
2603     //
2604     //   LoadX[mo_acquire]
2605     //   {  |1   }
2606     //   {DecodeN}
2607     //      |Parms
2608     //   MemBarAcquire*
2609     //
2610     // where * tags node we were passed
2611     // and |k means input k
2612     if (x->is_DecodeNarrowPtr()) {
2613       x = x->in(1);
2614     }
2615 
2616     return (x->is_Load() && x->as_Load()->is_acquire());
2617   }
2618 
2619   // other option for unnecessary membar is that it is a trailing node
2620   // belonging to a CAS
2621 
2622   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2623 
2624   return leading != NULL;
2625 }
2626 
2627 bool needs_acquiring_load(const Node *n)
2628 {
2629   assert(n->is_Load(), "expecting a load");
2630   if (UseBarriersForVolatile) {
2631     // we use a normal load and a dmb
2632     return false;
2633   }
2634 
2635   LoadNode *ld = n->as_Load();
2636 
2637   if (!ld->is_acquire()) {
2638     return false;
2639   }
2640 
2641   // check if this load is feeding an acquire membar
2642   //
2643   //   LoadX[mo_acquire]
2644   //   {  |1   }
2645   //   {DecodeN}
2646   //      |Parms
2647   //   MemBarAcquire*
2648   //
2649   // where * tags node we were passed
2650   // and |k means input k
2651 
2652   Node *start = ld;
2653   Node *mbacq = NULL;
2654 
2655   // if we hit a DecodeNarrowPtr we reset the start node and restart
2656   // the search through the outputs
2657  restart:
2658 
2659   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2660     Node *x = start->fast_out(i);
2661     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2662       mbacq = x;
2663     } else if (!mbacq &&
2664                (x->is_DecodeNarrowPtr() ||
2665                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2666       start = x;
2667       goto restart;
2668     }
2669   }
2670 
2671   if (mbacq) {
2672     return true;
2673   }
2674 
2675   return false;
2676 }
2677 
2678 bool unnecessary_release(const Node *n)
2679 {
2680   assert((n->is_MemBar() &&
2681           n->Opcode() == Op_MemBarRelease),
2682          "expecting a release membar");
2683 
2684   if (UseBarriersForVolatile) {
2685     // we need to plant a dmb
2686     return false;
2687   }
2688 
2689   // if there is a dependent CPUOrder barrier then use that as the
2690   // leading
2691 
2692   MemBarNode *barrier = n->as_MemBar();
2693   // check for an intervening cpuorder membar
2694   MemBarNode *b = child_membar(barrier);
2695   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2696     // ok, so start the check from the dependent cpuorder barrier
2697     barrier = b;
2698   }
2699 
2700   // must start with a normal feed
2701   MemBarNode *child_barrier = leading_to_normal(barrier);
2702 
2703   if (!child_barrier) {
2704     return false;
2705   }
2706 
2707   if (!is_card_mark_membar(child_barrier)) {
2708     // this is the trailing membar and we are done
2709     return true;
2710   }
2711 
2712   // must be sure this card mark feeds a trailing membar
2713   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2714   return (trailing != NULL);
2715 }
2716 
2717 bool unnecessary_volatile(const Node *n)
2718 {
2719   // assert n->is_MemBar();
2720   if (UseBarriersForVolatile) {
2721     // we need to plant a dmb
2722     return false;
2723   }
2724 
2725   MemBarNode *mbvol = n->as_MemBar();
2726 
2727   // first we check if this is part of a card mark. if so then we have
2728   // to generate a StoreLoad barrier
2729 
2730   if (is_card_mark_membar(mbvol)) {
2731       return false;
2732   }
2733 
2734   // ok, if it's not a card mark then we still need to check if it is
2735   // a trailing membar of a volatile put graph.
2736 
2737   return (trailing_to_leading(mbvol) != NULL);
2738 }
2739 
2740 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2741 
2742 bool needs_releasing_store(const Node *n)
2743 {
2744   // assert n->is_Store();
2745   if (UseBarriersForVolatile) {
2746     // we use a normal store and dmb combination
2747     return false;
2748   }
2749 
2750   StoreNode *st = n->as_Store();
2751 
2752   // the store must be marked as releasing
2753   if (!st->is_release()) {
2754     return false;
2755   }
2756 
2757   // the store must be fed by a membar
2758 
2759   Node *x = st->lookup(StoreNode::Memory);
2760 
2761   if (! x || !x->is_Proj()) {
2762     return false;
2763   }
2764 
2765   ProjNode *proj = x->as_Proj();
2766 
2767   x = proj->lookup(0);
2768 
2769   if (!x || !x->is_MemBar()) {
2770     return false;
2771   }
2772 
2773   MemBarNode *barrier = x->as_MemBar();
2774 
2775   // if the barrier is a release membar or a cpuorder mmebar fed by a
2776   // release membar then we need to check whether that forms part of a
2777   // volatile put graph.
2778 
2779   // reject invalid candidates
2780   if (!leading_membar(barrier)) {
2781     return false;
2782   }
2783 
2784   // does this lead a normal subgraph?
2785   MemBarNode *mbvol = leading_to_normal(barrier);
2786 
2787   if (!mbvol) {
2788     return false;
2789   }
2790 
2791   // all done unless this is a card mark
2792   if (!is_card_mark_membar(mbvol)) {
2793     return true;
2794   }
2795 
2796   // we found a card mark -- just make sure we have a trailing barrier
2797 
2798   return (card_mark_to_trailing(mbvol) != NULL);
2799 }
2800 
2801 // predicate controlling translation of CAS
2802 //
2803 // returns true if CAS needs to use an acquiring load otherwise false
2804 
2805 bool needs_acquiring_load_exclusive(const Node *n)
2806 {
2807   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2808   if (UseBarriersForVolatile) {
2809     return false;
2810   }
2811 
2812   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2813 #ifdef ASSERT
2814   LoadStoreNode *st = n->as_LoadStore();
2815 
2816   // the store must be fed by a membar
2817 
2818   Node *x = st->lookup(StoreNode::Memory);
2819 
2820   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2821 
2822   ProjNode *proj = x->as_Proj();
2823 
2824   x = proj->lookup(0);
2825 
2826   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2827 
2828   MemBarNode *barrier = x->as_MemBar();
2829 
2830   // the barrier must be a cpuorder mmebar fed by a release membar
2831 
2832   guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
2833             "CAS not fed by cpuorder membar!");
2834 
2835   MemBarNode *b = parent_membar(barrier);
2836   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2837           "CAS not fed by cpuorder+release membar pair!");
2838 
2839   // does this lead a normal subgraph?
2840   MemBarNode *mbar = leading_to_normal(barrier);
2841 
2842   guarantee(mbar != NULL, "CAS not embedded in normal graph!");
2843 
2844   // if this is a card mark membar check we have a trailing acquire
2845 
2846   if (is_card_mark_membar(mbar)) {
2847     mbar = card_mark_to_trailing(mbar);
2848   }
2849 
2850   guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
2851 
2852   guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2853 #endif // ASSERT
2854   // so we can just return true here
2855   return true;
2856 }
2857 
2858 // predicate controlling translation of StoreCM
2859 //
2860 // returns true if a StoreStore must precede the card write otherwise
2861 // false
2862 
2863 bool unnecessary_storestore(const Node *storecm)
2864 {
2865   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2866 
2867   // we need to generate a dmb ishst between an object put and the
2868   // associated card mark when we are using CMS without conditional
2869   // card marking
2870 
2871   if (UseConcMarkSweepGC && !UseCondCardMark) {
2872     return false;
2873   }
2874 
2875   // a storestore is unnecesary in all other cases
2876 
2877   return true;
2878 }
2879 
2880 
2881 #define __ _masm.
2882 
2883 // advance declarations for helper functions to convert register
2884 // indices to register objects
2885 
2886 // the ad file has to provide implementations of certain methods
2887 // expected by the generic code
2888 //
2889 // REQUIRED FUNCTIONALITY
2890 
2891 //=============================================================================
2892 
2893 // !!!!! Special hack to get all types of calls to specify the byte offset
2894 //       from the start of the call to the point where the return address
2895 //       will point.
2896 
2897 int MachCallStaticJavaNode::ret_addr_offset()
2898 {
2899   // call should be a simple bl
2900   int off = 4;
2901   return off;
2902 }
2903 
2904 int MachCallDynamicJavaNode::ret_addr_offset()
2905 {
2906   return 16; // movz, movk, movk, bl
2907 }
2908 
2909 int MachCallRuntimeNode::ret_addr_offset() {
2910   // for generated stubs the call will be
2911   //   far_call(addr)
2912   // for real runtime callouts it will be six instructions
2913   // see aarch64_enc_java_to_runtime
2914   //   adr(rscratch2, retaddr)
2915   //   lea(rscratch1, RuntimeAddress(addr)
2916   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2917   //   blrt rscratch1
2918   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2919   if (cb) {
2920     return MacroAssembler::far_branch_size();
2921   } else {
2922     return 6 * NativeInstruction::instruction_size;
2923   }
2924 }
2925 
2926 // Indicate if the safepoint node needs the polling page as an input
2927 
2928 // the shared code plants the oop data at the start of the generated
2929 // code for the safepoint node and that needs ot be at the load
2930 // instruction itself. so we cannot plant a mov of the safepoint poll
2931 // address followed by a load. setting this to true means the mov is
2932 // scheduled as a prior instruction. that's better for scheduling
2933 // anyway.
2934 
2935 bool SafePointNode::needs_polling_address_input()
2936 {
2937   return true;
2938 }
2939 
2940 //=============================================================================
2941 
2942 #ifndef PRODUCT
2943 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2944   st->print("BREAKPOINT");
2945 }
2946 #endif
2947 
2948 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2949   MacroAssembler _masm(&cbuf);
2950   __ brk(0);
2951 }
2952 
2953 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2954   return MachNode::size(ra_);
2955 }
2956 
2957 //=============================================================================
2958 
2959 #ifndef PRODUCT
2960   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2961     st->print("nop \t# %d bytes pad for loops and calls", _count);
2962   }
2963 #endif
2964 
2965   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2966     MacroAssembler _masm(&cbuf);
2967     for (int i = 0; i < _count; i++) {
2968       __ nop();
2969     }
2970   }
2971 
2972   uint MachNopNode::size(PhaseRegAlloc*) const {
2973     return _count * NativeInstruction::instruction_size;
2974   }
2975 
2976 //=============================================================================
2977 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2978 
2979 int Compile::ConstantTable::calculate_table_base_offset() const {
2980   return 0;  // absolute addressing, no offset
2981 }
2982 
2983 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2984 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2985   ShouldNotReachHere();
2986 }
2987 
2988 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2989   // Empty encoding
2990 }
2991 
2992 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2993   return 0;
2994 }
2995 
2996 #ifndef PRODUCT
2997 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2998   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2999 }
3000 #endif
3001 
3002 #ifndef PRODUCT
3003 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3004   Compile* C = ra_->C;
3005 
3006   int framesize = C->frame_slots() << LogBytesPerInt;
3007 
3008   if (C->need_stack_bang(framesize))
3009     st->print("# stack bang size=%d\n\t", framesize);
3010 
3011   if (framesize < ((1 << 9) + 2 * wordSize)) {
3012     st->print("sub  sp, sp, #%d\n\t", framesize);
3013     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3014     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3015   } else {
3016     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3017     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3018     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3019     st->print("sub  sp, sp, rscratch1");
3020   }
3021 }
3022 #endif
3023 
3024 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3025   Compile* C = ra_->C;
3026   MacroAssembler _masm(&cbuf);
3027 
3028   // n.b. frame size includes space for return pc and rfp
3029   const long framesize = C->frame_size_in_bytes();
3030   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3031 
3032   // insert a nop at the start of the prolog so we can patch in a
3033   // branch if we need to invalidate the method later
3034   __ nop();
3035 
3036   int bangsize = C->bang_size_in_bytes();
3037   if (C->need_stack_bang(bangsize) && UseStackBanging)
3038     __ generate_stack_overflow_check(bangsize);
3039 
3040   __ build_frame(framesize);
3041 
3042   if (NotifySimulator) {
3043     __ notify(Assembler::method_entry);
3044   }
3045 
3046   if (VerifyStackAtCalls) {
3047     Unimplemented();
3048   }
3049 
3050   C->set_frame_complete(cbuf.insts_size());
3051 
3052   if (C->has_mach_constant_base_node()) {
3053     // NOTE: We set the table base offset here because users might be
3054     // emitted before MachConstantBaseNode.
3055     Compile::ConstantTable& constant_table = C->constant_table();
3056     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3057   }
3058 }
3059 
3060 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3061 {
3062   return MachNode::size(ra_); // too many variables; just compute it
3063                               // the hard way
3064 }
3065 
3066 int MachPrologNode::reloc() const
3067 {
3068   return 0;
3069 }
3070 
3071 //=============================================================================
3072 
3073 #ifndef PRODUCT
3074 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3075   Compile* C = ra_->C;
3076   int framesize = C->frame_slots() << LogBytesPerInt;
3077 
3078   st->print("# pop frame %d\n\t",framesize);
3079 
3080   if (framesize == 0) {
3081     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3082   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3083     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3084     st->print("add  sp, sp, #%d\n\t", framesize);
3085   } else {
3086     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3087     st->print("add  sp, sp, rscratch1\n\t");
3088     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3089   }
3090 
3091   if (do_polling() && C->is_method_compilation()) {
3092     st->print("# touch polling page\n\t");
3093     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3094     st->print("ldr zr, [rscratch1]");
3095   }
3096 }
3097 #endif
3098 
3099 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3100   Compile* C = ra_->C;
3101   MacroAssembler _masm(&cbuf);
3102   int framesize = C->frame_slots() << LogBytesPerInt;
3103 
3104   __ remove_frame(framesize);
3105 
3106   if (NotifySimulator) {
3107     __ notify(Assembler::method_reentry);
3108   }
3109 
3110   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3111     __ reserved_stack_check();
3112   }
3113 
3114   if (do_polling() && C->is_method_compilation()) {
3115     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3116   }
3117 }
3118 
3119 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3120   // Variable size. Determine dynamically.
3121   return MachNode::size(ra_);
3122 }
3123 
3124 int MachEpilogNode::reloc() const {
3125   // Return number of relocatable values contained in this instruction.
3126   return 1; // 1 for polling page.
3127 }
3128 
3129 const Pipeline * MachEpilogNode::pipeline() const {
3130   return MachNode::pipeline_class();
3131 }
3132 
3133 // This method seems to be obsolete. It is declared in machnode.hpp
3134 // and defined in all *.ad files, but it is never called. Should we
3135 // get rid of it?
3136 int MachEpilogNode::safepoint_offset() const {
3137   assert(do_polling(), "no return for this epilog node");
3138   return 4;
3139 }
3140 
3141 //=============================================================================
3142 
3143 // Figure out which register class each belongs in: rc_int, rc_float or
3144 // rc_stack.
3145 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3146 
3147 static enum RC rc_class(OptoReg::Name reg) {
3148 
3149   if (reg == OptoReg::Bad) {
3150     return rc_bad;
3151   }
3152 
3153   // we have 30 int registers * 2 halves
3154   // (rscratch1 and rscratch2 are omitted)
3155 
3156   if (reg < 60) {
3157     return rc_int;
3158   }
3159 
3160   // we have 32 float register * 2 halves
3161   if (reg < 60 + 128) {
3162     return rc_float;
3163   }
3164 
3165   // Between float regs & stack is the flags regs.
3166   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3167 
3168   return rc_stack;
3169 }
3170 
3171 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3172   Compile* C = ra_->C;
3173 
3174   // Get registers to move.
3175   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3176   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3177   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3178   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3179 
3180   enum RC src_hi_rc = rc_class(src_hi);
3181   enum RC src_lo_rc = rc_class(src_lo);
3182   enum RC dst_hi_rc = rc_class(dst_hi);
3183   enum RC dst_lo_rc = rc_class(dst_lo);
3184 
3185   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3186 
3187   if (src_hi != OptoReg::Bad) {
3188     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3189            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3190            "expected aligned-adjacent pairs");
3191   }
3192 
3193   if (src_lo == dst_lo && src_hi == dst_hi) {
3194     return 0;            // Self copy, no move.
3195   }
3196 
3197   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3198               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3199   int src_offset = ra_->reg2offset(src_lo);
3200   int dst_offset = ra_->reg2offset(dst_lo);
3201 
3202   if (bottom_type()->isa_vect() != NULL) {
3203     uint ireg = ideal_reg();
3204     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3205     if (cbuf) {
3206       MacroAssembler _masm(cbuf);
3207       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3208       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3209         // stack->stack
3210         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3211         if (ireg == Op_VecD) {
3212           __ unspill(rscratch1, true, src_offset);
3213           __ spill(rscratch1, true, dst_offset);
3214         } else {
3215           __ spill_copy128(src_offset, dst_offset);
3216         }
3217       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3218         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3219                ireg == Op_VecD ? __ T8B : __ T16B,
3220                as_FloatRegister(Matcher::_regEncode[src_lo]));
3221       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3222         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3223                        ireg == Op_VecD ? __ D : __ Q,
3224                        ra_->reg2offset(dst_lo));
3225       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3226         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3227                        ireg == Op_VecD ? __ D : __ Q,
3228                        ra_->reg2offset(src_lo));
3229       } else {
3230         ShouldNotReachHere();
3231       }
3232     }
3233   } else if (cbuf) {
3234     MacroAssembler _masm(cbuf);
3235     switch (src_lo_rc) {
3236     case rc_int:
3237       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3238         if (is64) {
3239             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3240                    as_Register(Matcher::_regEncode[src_lo]));
3241         } else {
3242             MacroAssembler _masm(cbuf);
3243             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3244                     as_Register(Matcher::_regEncode[src_lo]));
3245         }
3246       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3247         if (is64) {
3248             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3249                      as_Register(Matcher::_regEncode[src_lo]));
3250         } else {
3251             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3252                      as_Register(Matcher::_regEncode[src_lo]));
3253         }
3254       } else {                    // gpr --> stack spill
3255         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3256         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3257       }
3258       break;
3259     case rc_float:
3260       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3261         if (is64) {
3262             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3263                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3264         } else {
3265             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3266                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3267         }
3268       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3269           if (cbuf) {
3270             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3271                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3272         } else {
3273             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3274                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3275         }
3276       } else {                    // fpr --> stack spill
3277         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3278         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3279                  is64 ? __ D : __ S, dst_offset);
3280       }
3281       break;
3282     case rc_stack:
3283       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3284         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3285       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3286         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3287                    is64 ? __ D : __ S, src_offset);
3288       } else {                    // stack --> stack copy
3289         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3290         __ unspill(rscratch1, is64, src_offset);
3291         __ spill(rscratch1, is64, dst_offset);
3292       }
3293       break;
3294     default:
3295       assert(false, "bad rc_class for spill");
3296       ShouldNotReachHere();
3297     }
3298   }
3299 
3300   if (st) {
3301     st->print("spill ");
3302     if (src_lo_rc == rc_stack) {
3303       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3304     } else {
3305       st->print("%s -> ", Matcher::regName[src_lo]);
3306     }
3307     if (dst_lo_rc == rc_stack) {
3308       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3309     } else {
3310       st->print("%s", Matcher::regName[dst_lo]);
3311     }
3312     if (bottom_type()->isa_vect() != NULL) {
3313       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3314     } else {
3315       st->print("\t# spill size = %d", is64 ? 64:32);
3316     }
3317   }
3318 
3319   return 0;
3320 
3321 }
3322 
3323 #ifndef PRODUCT
3324 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3325   if (!ra_)
3326     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3327   else
3328     implementation(NULL, ra_, false, st);
3329 }
3330 #endif
3331 
3332 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3333   implementation(&cbuf, ra_, false, NULL);
3334 }
3335 
3336 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3337   return MachNode::size(ra_);
3338 }
3339 
3340 //=============================================================================
3341 
3342 #ifndef PRODUCT
3343 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3344   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3345   int reg = ra_->get_reg_first(this);
3346   st->print("add %s, rsp, #%d]\t# box lock",
3347             Matcher::regName[reg], offset);
3348 }
3349 #endif
3350 
3351 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3352   MacroAssembler _masm(&cbuf);
3353 
3354   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3355   int reg    = ra_->get_encode(this);
3356 
3357   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3358     __ add(as_Register(reg), sp, offset);
3359   } else {
3360     ShouldNotReachHere();
3361   }
3362 }
3363 
3364 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3365   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3366   return 4;
3367 }
3368 
3369 //=============================================================================
3370 
3371 #ifndef PRODUCT
3372 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3373 {
3374   st->print_cr("# MachUEPNode");
3375   if (UseCompressedClassPointers) {
3376     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3377     if (Universe::narrow_klass_shift() != 0) {
3378       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3379     }
3380   } else {
3381    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3382   }
3383   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3384   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3385 }
3386 #endif
3387 
3388 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3389 {
3390   // This is the unverified entry point.
3391   MacroAssembler _masm(&cbuf);
3392 
3393   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3394   Label skip;
3395   // TODO
3396   // can we avoid this skip and still use a reloc?
3397   __ br(Assembler::EQ, skip);
3398   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3399   __ bind(skip);
3400 }
3401 
3402 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3403 {
3404   return MachNode::size(ra_);
3405 }
3406 
3407 // REQUIRED EMIT CODE
3408 
3409 //=============================================================================
3410 
3411 // Emit exception handler code.
3412 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3413 {
3414   // mov rscratch1 #exception_blob_entry_point
3415   // br rscratch1
3416   // Note that the code buffer's insts_mark is always relative to insts.
3417   // That's why we must use the macroassembler to generate a handler.
3418   MacroAssembler _masm(&cbuf);
3419   address base = __ start_a_stub(size_exception_handler());
3420   if (base == NULL) {
3421     ciEnv::current()->record_failure("CodeCache is full");
3422     return 0;  // CodeBuffer::expand failed
3423   }
3424   int offset = __ offset();
3425   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3426   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3427   __ end_a_stub();
3428   return offset;
3429 }
3430 
3431 // Emit deopt handler code.
3432 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3433 {
3434   // Note that the code buffer's insts_mark is always relative to insts.
3435   // That's why we must use the macroassembler to generate a handler.
3436   MacroAssembler _masm(&cbuf);
3437   address base = __ start_a_stub(size_deopt_handler());
3438   if (base == NULL) {
3439     ciEnv::current()->record_failure("CodeCache is full");
3440     return 0;  // CodeBuffer::expand failed
3441   }
3442   int offset = __ offset();
3443 
3444   __ adr(lr, __ pc());
3445   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3446 
3447   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3448   __ end_a_stub();
3449   return offset;
3450 }
3451 
3452 // REQUIRED MATCHER CODE
3453 
3454 //=============================================================================
3455 
3456 const bool Matcher::match_rule_supported(int opcode) {
3457 
3458   switch (opcode) {
3459   default:
3460     break;
3461   }
3462 
3463   if (!has_match_rule(opcode)) {
3464     return false;
3465   }
3466 
3467   return true;  // Per default match rules are supported.
3468 }
3469 
3470 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3471 
3472   // TODO
3473   // identify extra cases that we might want to provide match rules for
3474   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3475   bool ret_value = match_rule_supported(opcode);
3476   // Add rules here.
3477 
3478   return ret_value;  // Per default match rules are supported.
3479 }
3480 
3481 const bool Matcher::has_predicated_vectors(void) {
3482   return false;
3483 }
3484 
3485 const int Matcher::float_pressure(int default_pressure_threshold) {
3486   return default_pressure_threshold;
3487 }
3488 
3489 int Matcher::regnum_to_fpu_offset(int regnum)
3490 {
3491   Unimplemented();
3492   return 0;
3493 }
3494 
3495 // Is this branch offset short enough that a short branch can be used?
3496 //
3497 // NOTE: If the platform does not provide any short branch variants, then
3498 //       this method should return false for offset 0.
3499 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3500   // The passed offset is relative to address of the branch.
3501 
3502   return (-32768 <= offset && offset < 32768);
3503 }
3504 
3505 const bool Matcher::isSimpleConstant64(jlong value) {
3506   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3507   // Probably always true, even if a temp register is required.
3508   return true;
3509 }
3510 
3511 // true just means we have fast l2f conversion
3512 const bool Matcher::convL2FSupported(void) {
3513   return true;
3514 }
3515 
3516 // Vector width in bytes.
3517 const int Matcher::vector_width_in_bytes(BasicType bt) {
3518   int size = MIN2(16,(int)MaxVectorSize);
3519   // Minimum 2 values in vector
3520   if (size < 2*type2aelembytes(bt)) size = 0;
3521   // But never < 4
3522   if (size < 4) size = 0;
3523   return size;
3524 }
3525 
3526 // Limits on vector size (number of elements) loaded into vector.
3527 const int Matcher::max_vector_size(const BasicType bt) {
3528   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3529 }
3530 const int Matcher::min_vector_size(const BasicType bt) {
3531 //  For the moment limit the vector size to 8 bytes
3532     int size = 8 / type2aelembytes(bt);
3533     if (size < 2) size = 2;
3534     return size;
3535 }
3536 
3537 // Vector ideal reg.
3538 const uint Matcher::vector_ideal_reg(int len) {
3539   switch(len) {
3540     case  8: return Op_VecD;
3541     case 16: return Op_VecX;
3542   }
3543   ShouldNotReachHere();
3544   return 0;
3545 }
3546 
3547 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3548   return Op_VecX;
3549 }
3550 
3551 // AES support not yet implemented
3552 const bool Matcher::pass_original_key_for_aes() {
3553   return false;
3554 }
3555 
3556 // x86 supports misaligned vectors store/load.
3557 const bool Matcher::misaligned_vectors_ok() {
3558   return !AlignVector; // can be changed by flag
3559 }
3560 
3561 // false => size gets scaled to BytesPerLong, ok.
3562 const bool Matcher::init_array_count_is_in_bytes = false;
3563 
3564 // Use conditional move (CMOVL)
3565 const int Matcher::long_cmove_cost() {
3566   // long cmoves are no more expensive than int cmoves
3567   return 0;
3568 }
3569 
3570 const int Matcher::float_cmove_cost() {
3571   // float cmoves are no more expensive than int cmoves
3572   return 0;
3573 }
3574 
3575 // Does the CPU require late expand (see block.cpp for description of late expand)?
3576 const bool Matcher::require_postalloc_expand = false;
3577 
3578 // Do we need to mask the count passed to shift instructions or does
3579 // the cpu only look at the lower 5/6 bits anyway?
3580 const bool Matcher::need_masked_shift_count = false;
3581 
3582 // This affects two different things:
3583 //  - how Decode nodes are matched
3584 //  - how ImplicitNullCheck opportunities are recognized
3585 // If true, the matcher will try to remove all Decodes and match them
3586 // (as operands) into nodes. NullChecks are not prepared to deal with
3587 // Decodes by final_graph_reshaping().
3588 // If false, final_graph_reshaping() forces the decode behind the Cmp
3589 // for a NullCheck. The matcher matches the Decode node into a register.
3590 // Implicit_null_check optimization moves the Decode along with the
3591 // memory operation back up before the NullCheck.
3592 bool Matcher::narrow_oop_use_complex_address() {
3593   return Universe::narrow_oop_shift() == 0;
3594 }
3595 
3596 bool Matcher::narrow_klass_use_complex_address() {
3597 // TODO
3598 // decide whether we need to set this to true
3599   return false;
3600 }
3601 
3602 bool Matcher::const_oop_prefer_decode() {
3603   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3604   return Universe::narrow_oop_base() == NULL;
3605 }
3606 
3607 bool Matcher::const_klass_prefer_decode() {
3608   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3609   return Universe::narrow_klass_base() == NULL;
3610 }
3611 
3612 // Is it better to copy float constants, or load them directly from
3613 // memory?  Intel can load a float constant from a direct address,
3614 // requiring no extra registers.  Most RISCs will have to materialize
3615 // an address into a register first, so they would do better to copy
3616 // the constant from stack.
3617 const bool Matcher::rematerialize_float_constants = false;
3618 
3619 // If CPU can load and store mis-aligned doubles directly then no
3620 // fixup is needed.  Else we split the double into 2 integer pieces
3621 // and move it piece-by-piece.  Only happens when passing doubles into
3622 // C code as the Java calling convention forces doubles to be aligned.
3623 const bool Matcher::misaligned_doubles_ok = true;
3624 
3625 // No-op on amd64
3626 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3627   Unimplemented();
3628 }
3629 
3630 // Advertise here if the CPU requires explicit rounding operations to
3631 // implement the UseStrictFP mode.
3632 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3633 
3634 // Are floats converted to double when stored to stack during
3635 // deoptimization?
3636 bool Matcher::float_in_double() { return false; }
3637 
3638 // Do ints take an entire long register or just half?
3639 // The relevant question is how the int is callee-saved:
3640 // the whole long is written but de-opt'ing will have to extract
3641 // the relevant 32 bits.
3642 const bool Matcher::int_in_long = true;
3643 
3644 // Return whether or not this register is ever used as an argument.
3645 // This function is used on startup to build the trampoline stubs in
3646 // generateOptoStub.  Registers not mentioned will be killed by the VM
3647 // call in the trampoline, and arguments in those registers not be
3648 // available to the callee.
3649 bool Matcher::can_be_java_arg(int reg)
3650 {
3651   return
3652     reg ==  R0_num || reg == R0_H_num ||
3653     reg ==  R1_num || reg == R1_H_num ||
3654     reg ==  R2_num || reg == R2_H_num ||
3655     reg ==  R3_num || reg == R3_H_num ||
3656     reg ==  R4_num || reg == R4_H_num ||
3657     reg ==  R5_num || reg == R5_H_num ||
3658     reg ==  R6_num || reg == R6_H_num ||
3659     reg ==  R7_num || reg == R7_H_num ||
3660     reg ==  V0_num || reg == V0_H_num ||
3661     reg ==  V1_num || reg == V1_H_num ||
3662     reg ==  V2_num || reg == V2_H_num ||
3663     reg ==  V3_num || reg == V3_H_num ||
3664     reg ==  V4_num || reg == V4_H_num ||
3665     reg ==  V5_num || reg == V5_H_num ||
3666     reg ==  V6_num || reg == V6_H_num ||
3667     reg ==  V7_num || reg == V7_H_num;
3668 }
3669 
3670 bool Matcher::is_spillable_arg(int reg)
3671 {
3672   return can_be_java_arg(reg);
3673 }
3674 
3675 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3676   return false;
3677 }
3678 
3679 RegMask Matcher::divI_proj_mask() {
3680   ShouldNotReachHere();
3681   return RegMask();
3682 }
3683 
3684 // Register for MODI projection of divmodI.
3685 RegMask Matcher::modI_proj_mask() {
3686   ShouldNotReachHere();
3687   return RegMask();
3688 }
3689 
3690 // Register for DIVL projection of divmodL.
3691 RegMask Matcher::divL_proj_mask() {
3692   ShouldNotReachHere();
3693   return RegMask();
3694 }
3695 
3696 // Register for MODL projection of divmodL.
3697 RegMask Matcher::modL_proj_mask() {
3698   ShouldNotReachHere();
3699   return RegMask();
3700 }
3701 
3702 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3703   return FP_REG_mask();
3704 }
3705 
3706 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3707   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3708     Node* u = addp->fast_out(i);
3709     if (u->is_Mem()) {
3710       int opsize = u->as_Mem()->memory_size();
3711       assert(opsize > 0, "unexpected memory operand size");
3712       if (u->as_Mem()->memory_size() != (1<<shift)) {
3713         return false;
3714       }
3715     }
3716   }
3717   return true;
3718 }
3719 
3720 const bool Matcher::convi2l_type_required = false;
3721 
3722 // Should the Matcher clone shifts on addressing modes, expecting them
3723 // to be subsumed into complex addressing expressions or compute them
3724 // into registers?
3725 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3726   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3727     return true;
3728   }
3729 
3730   Node *off = m->in(AddPNode::Offset);
3731   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3732       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3733       // Are there other uses besides address expressions?
3734       !is_visited(off)) {
3735     address_visited.set(off->_idx); // Flag as address_visited
3736     mstack.push(off->in(2), Visit);
3737     Node *conv = off->in(1);
3738     if (conv->Opcode() == Op_ConvI2L &&
3739         // Are there other uses besides address expressions?
3740         !is_visited(conv)) {
3741       address_visited.set(conv->_idx); // Flag as address_visited
3742       mstack.push(conv->in(1), Pre_Visit);
3743     } else {
3744       mstack.push(conv, Pre_Visit);
3745     }
3746     address_visited.test_set(m->_idx); // Flag as address_visited
3747     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3748     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3749     return true;
3750   } else if (off->Opcode() == Op_ConvI2L &&
3751              // Are there other uses besides address expressions?
3752              !is_visited(off)) {
3753     address_visited.test_set(m->_idx); // Flag as address_visited
3754     address_visited.set(off->_idx); // Flag as address_visited
3755     mstack.push(off->in(1), Pre_Visit);
3756     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3757     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3758     return true;
3759   }
3760   return false;
3761 }
3762 
3763 void Compile::reshape_address(AddPNode* addp) {
3764 }
3765 
3766 // helper for encoding java_to_runtime calls on sim
3767 //
3768 // this is needed to compute the extra arguments required when
3769 // planting a call to the simulator blrt instruction. the TypeFunc
3770 // can be queried to identify the counts for integral, and floating
3771 // arguments and the return type
3772 
3773 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3774 {
3775   int gps = 0;
3776   int fps = 0;
3777   const TypeTuple *domain = tf->domain();
3778   int max = domain->cnt();
3779   for (int i = TypeFunc::Parms; i < max; i++) {
3780     const Type *t = domain->field_at(i);
3781     switch(t->basic_type()) {
3782     case T_FLOAT:
3783     case T_DOUBLE:
3784       fps++;
3785     default:
3786       gps++;
3787     }
3788   }
3789   gpcnt = gps;
3790   fpcnt = fps;
3791   BasicType rt = tf->return_type();
3792   switch (rt) {
3793   case T_VOID:
3794     rtype = MacroAssembler::ret_type_void;
3795     break;
3796   default:
3797     rtype = MacroAssembler::ret_type_integral;
3798     break;
3799   case T_FLOAT:
3800     rtype = MacroAssembler::ret_type_float;
3801     break;
3802   case T_DOUBLE:
3803     rtype = MacroAssembler::ret_type_double;
3804     break;
3805   }
3806 }
3807 
3808 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3809   MacroAssembler _masm(&cbuf);                                          \
3810   {                                                                     \
3811     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3812     guarantee(DISP == 0, "mode not permitted for volatile");            \
3813     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3814     __ INSN(REG, as_Register(BASE));                                    \
3815   }
3816 
3817 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3818 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3819 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3820                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3821 
3822   // Used for all non-volatile memory accesses.  The use of
3823   // $mem->opcode() to discover whether this pattern uses sign-extended
3824   // offsets is something of a kludge.
3825   static void loadStore(MacroAssembler masm, mem_insn insn,
3826                          Register reg, int opcode,
3827                          Register base, int index, int size, int disp)
3828   {
3829     Address::extend scale;
3830 
3831     // Hooboy, this is fugly.  We need a way to communicate to the
3832     // encoder that the index needs to be sign extended, so we have to
3833     // enumerate all the cases.
3834     switch (opcode) {
3835     case INDINDEXSCALEDI2L:
3836     case INDINDEXSCALEDI2LN:
3837     case INDINDEXI2L:
3838     case INDINDEXI2LN:
3839       scale = Address::sxtw(size);
3840       break;
3841     default:
3842       scale = Address::lsl(size);
3843     }
3844 
3845     if (index == -1) {
3846       (masm.*insn)(reg, Address(base, disp));
3847     } else {
3848       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3849       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3850     }
3851   }
3852 
3853   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3854                          FloatRegister reg, int opcode,
3855                          Register base, int index, int size, int disp)
3856   {
3857     Address::extend scale;
3858 
3859     switch (opcode) {
3860     case INDINDEXSCALEDI2L:
3861     case INDINDEXSCALEDI2LN:
3862       scale = Address::sxtw(size);
3863       break;
3864     default:
3865       scale = Address::lsl(size);
3866     }
3867 
3868      if (index == -1) {
3869       (masm.*insn)(reg, Address(base, disp));
3870     } else {
3871       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3872       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3873     }
3874   }
3875 
3876   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3877                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3878                          int opcode, Register base, int index, int size, int disp)
3879   {
3880     if (index == -1) {
3881       (masm.*insn)(reg, T, Address(base, disp));
3882     } else {
3883       assert(disp == 0, "unsupported address mode");
3884       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3885     }
3886   }
3887 
3888 %}
3889 
3890 
3891 
3892 //----------ENCODING BLOCK-----------------------------------------------------
3893 // This block specifies the encoding classes used by the compiler to
3894 // output byte streams.  Encoding classes are parameterized macros
3895 // used by Machine Instruction Nodes in order to generate the bit
3896 // encoding of the instruction.  Operands specify their base encoding
3897 // interface with the interface keyword.  There are currently
3898 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3899 // COND_INTER.  REG_INTER causes an operand to generate a function
3900 // which returns its register number when queried.  CONST_INTER causes
3901 // an operand to generate a function which returns the value of the
3902 // constant when queried.  MEMORY_INTER causes an operand to generate
3903 // four functions which return the Base Register, the Index Register,
3904 // the Scale Value, and the Offset Value of the operand when queried.
3905 // COND_INTER causes an operand to generate six functions which return
3906 // the encoding code (ie - encoding bits for the instruction)
3907 // associated with each basic boolean condition for a conditional
3908 // instruction.
3909 //
3910 // Instructions specify two basic values for encoding.  Again, a
3911 // function is available to check if the constant displacement is an
3912 // oop. They use the ins_encode keyword to specify their encoding
3913 // classes (which must be a sequence of enc_class names, and their
3914 // parameters, specified in the encoding block), and they use the
3915 // opcode keyword to specify, in order, their primary, secondary, and
3916 // tertiary opcode.  Only the opcode sections which a particular
3917 // instruction needs for encoding need to be specified.
3918 encode %{
3919   // Build emit functions for each basic byte or larger field in the
3920   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3921   // from C++ code in the enc_class source block.  Emit functions will
3922   // live in the main source block for now.  In future, we can
3923   // generalize this by adding a syntax that specifies the sizes of
3924   // fields in an order, so that the adlc can build the emit functions
3925   // automagically
3926 
3927   // catch all for unimplemented encodings
3928   enc_class enc_unimplemented %{
3929     MacroAssembler _masm(&cbuf);
3930     __ unimplemented("C2 catch all");
3931   %}
3932 
3933   // BEGIN Non-volatile memory access
3934 
3935   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3936     Register dst_reg = as_Register($dst$$reg);
3937     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3938                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3939   %}
3940 
3941   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3942     Register dst_reg = as_Register($dst$$reg);
3943     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3944                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3945   %}
3946 
3947   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3948     Register dst_reg = as_Register($dst$$reg);
3949     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3950                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3951   %}
3952 
3953   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3954     Register dst_reg = as_Register($dst$$reg);
3955     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3956                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3957   %}
3958 
3959   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3960     Register dst_reg = as_Register($dst$$reg);
3961     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3962                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3963   %}
3964 
3965   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3966     Register dst_reg = as_Register($dst$$reg);
3967     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3968                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3969   %}
3970 
3971   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3972     Register dst_reg = as_Register($dst$$reg);
3973     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3974                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3975   %}
3976 
3977   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3978     Register dst_reg = as_Register($dst$$reg);
3979     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3980                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3981   %}
3982 
3983   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3984     Register dst_reg = as_Register($dst$$reg);
3985     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3986                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3987   %}
3988 
3989   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3990     Register dst_reg = as_Register($dst$$reg);
3991     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3992                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3993   %}
3994 
3995   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3996     Register dst_reg = as_Register($dst$$reg);
3997     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3998                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3999   %}
4000 
4001   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4002     Register dst_reg = as_Register($dst$$reg);
4003     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4004                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4005   %}
4006 
4007   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4008     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4009     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4010                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4011   %}
4012 
4013   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4014     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4015     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4016                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4017   %}
4018 
4019   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4020     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4021     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4022        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4023   %}
4024 
4025   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4026     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4027     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4028        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4029   %}
4030 
4031   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4032     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4033     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4034        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4035   %}
4036 
4037   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4038     Register src_reg = as_Register($src$$reg);
4039     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4040                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4041   %}
4042 
4043   enc_class aarch64_enc_strb0(memory mem) %{
4044     MacroAssembler _masm(&cbuf);
4045     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4046                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4047   %}
4048 
4049   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4050     MacroAssembler _masm(&cbuf);
4051     __ membar(Assembler::StoreStore);
4052     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4054   %}
4055 
4056   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4057     Register src_reg = as_Register($src$$reg);
4058     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4059                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4060   %}
4061 
4062   enc_class aarch64_enc_strh0(memory mem) %{
4063     MacroAssembler _masm(&cbuf);
4064     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4065                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4066   %}
4067 
4068   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4069     Register src_reg = as_Register($src$$reg);
4070     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4071                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4072   %}
4073 
4074   enc_class aarch64_enc_strw0(memory mem) %{
4075     MacroAssembler _masm(&cbuf);
4076     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4077                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4078   %}
4079 
4080   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4081     Register src_reg = as_Register($src$$reg);
4082     // we sometimes get asked to store the stack pointer into the
4083     // current thread -- we cannot do that directly on AArch64
4084     if (src_reg == r31_sp) {
4085       MacroAssembler _masm(&cbuf);
4086       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4087       __ mov(rscratch2, sp);
4088       src_reg = rscratch2;
4089     }
4090     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4091                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4092   %}
4093 
4094   enc_class aarch64_enc_str0(memory mem) %{
4095     MacroAssembler _masm(&cbuf);
4096     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4097                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4098   %}
4099 
4100   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4101     FloatRegister src_reg = as_FloatRegister($src$$reg);
4102     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4103                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4104   %}
4105 
4106   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4107     FloatRegister src_reg = as_FloatRegister($src$$reg);
4108     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4109                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4110   %}
4111 
4112   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4113     FloatRegister src_reg = as_FloatRegister($src$$reg);
4114     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4115        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4116   %}
4117 
4118   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4119     FloatRegister src_reg = as_FloatRegister($src$$reg);
4120     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4121        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4122   %}
4123 
4124   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4125     FloatRegister src_reg = as_FloatRegister($src$$reg);
4126     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4127        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4128   %}
4129 
4130   // END Non-volatile memory access
4131 
4132   // volatile loads and stores
4133 
4134   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4135     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4136                  rscratch1, stlrb);
4137   %}
4138 
4139   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4140     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4141                  rscratch1, stlrh);
4142   %}
4143 
4144   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4145     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4146                  rscratch1, stlrw);
4147   %}
4148 
4149 
4150   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4151     Register dst_reg = as_Register($dst$$reg);
4152     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4153              rscratch1, ldarb);
4154     __ sxtbw(dst_reg, dst_reg);
4155   %}
4156 
4157   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4158     Register dst_reg = as_Register($dst$$reg);
4159     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4160              rscratch1, ldarb);
4161     __ sxtb(dst_reg, dst_reg);
4162   %}
4163 
4164   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4165     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4166              rscratch1, ldarb);
4167   %}
4168 
4169   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4170     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4171              rscratch1, ldarb);
4172   %}
4173 
4174   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4175     Register dst_reg = as_Register($dst$$reg);
4176     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4177              rscratch1, ldarh);
4178     __ sxthw(dst_reg, dst_reg);
4179   %}
4180 
4181   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4182     Register dst_reg = as_Register($dst$$reg);
4183     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4184              rscratch1, ldarh);
4185     __ sxth(dst_reg, dst_reg);
4186   %}
4187 
4188   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4189     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4190              rscratch1, ldarh);
4191   %}
4192 
4193   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4194     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4195              rscratch1, ldarh);
4196   %}
4197 
4198   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4199     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4200              rscratch1, ldarw);
4201   %}
4202 
4203   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4204     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4205              rscratch1, ldarw);
4206   %}
4207 
4208   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4209     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4210              rscratch1, ldar);
4211   %}
4212 
4213   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4214     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4215              rscratch1, ldarw);
4216     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4217   %}
4218 
4219   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4220     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4221              rscratch1, ldar);
4222     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4223   %}
4224 
4225   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4226     Register src_reg = as_Register($src$$reg);
4227     // we sometimes get asked to store the stack pointer into the
4228     // current thread -- we cannot do that directly on AArch64
4229     if (src_reg == r31_sp) {
4230         MacroAssembler _masm(&cbuf);
4231       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4232       __ mov(rscratch2, sp);
4233       src_reg = rscratch2;
4234     }
4235     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4236                  rscratch1, stlr);
4237   %}
4238 
4239   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4240     {
4241       MacroAssembler _masm(&cbuf);
4242       FloatRegister src_reg = as_FloatRegister($src$$reg);
4243       __ fmovs(rscratch2, src_reg);
4244     }
4245     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4246                  rscratch1, stlrw);
4247   %}
4248 
4249   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4250     {
4251       MacroAssembler _masm(&cbuf);
4252       FloatRegister src_reg = as_FloatRegister($src$$reg);
4253       __ fmovd(rscratch2, src_reg);
4254     }
4255     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4256                  rscratch1, stlr);
4257   %}
4258 
4259   // synchronized read/update encodings
4260 
4261   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4262     MacroAssembler _masm(&cbuf);
4263     Register dst_reg = as_Register($dst$$reg);
4264     Register base = as_Register($mem$$base);
4265     int index = $mem$$index;
4266     int scale = $mem$$scale;
4267     int disp = $mem$$disp;
4268     if (index == -1) {
4269        if (disp != 0) {
4270         __ lea(rscratch1, Address(base, disp));
4271         __ ldaxr(dst_reg, rscratch1);
4272       } else {
4273         // TODO
4274         // should we ever get anything other than this case?
4275         __ ldaxr(dst_reg, base);
4276       }
4277     } else {
4278       Register index_reg = as_Register(index);
4279       if (disp == 0) {
4280         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4281         __ ldaxr(dst_reg, rscratch1);
4282       } else {
4283         __ lea(rscratch1, Address(base, disp));
4284         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4285         __ ldaxr(dst_reg, rscratch1);
4286       }
4287     }
4288   %}
4289 
4290   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4291     MacroAssembler _masm(&cbuf);
4292     Register src_reg = as_Register($src$$reg);
4293     Register base = as_Register($mem$$base);
4294     int index = $mem$$index;
4295     int scale = $mem$$scale;
4296     int disp = $mem$$disp;
4297     if (index == -1) {
4298        if (disp != 0) {
4299         __ lea(rscratch2, Address(base, disp));
4300         __ stlxr(rscratch1, src_reg, rscratch2);
4301       } else {
4302         // TODO
4303         // should we ever get anything other than this case?
4304         __ stlxr(rscratch1, src_reg, base);
4305       }
4306     } else {
4307       Register index_reg = as_Register(index);
4308       if (disp == 0) {
4309         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4310         __ stlxr(rscratch1, src_reg, rscratch2);
4311       } else {
4312         __ lea(rscratch2, Address(base, disp));
4313         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4314         __ stlxr(rscratch1, src_reg, rscratch2);
4315       }
4316     }
4317     __ cmpw(rscratch1, zr);
4318   %}
4319 
4320   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4321     MacroAssembler _masm(&cbuf);
4322     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4323     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4324                Assembler::xword, /*acquire*/ false, /*release*/ true,
4325                /*weak*/ false, noreg);
4326   %}
4327 
4328   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4329     MacroAssembler _masm(&cbuf);
4330     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4331     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4332                Assembler::word, /*acquire*/ false, /*release*/ true,
4333                /*weak*/ false, noreg);
4334   %}
4335 
4336   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4337     MacroAssembler _masm(&cbuf);
4338     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4339     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4340                Assembler::halfword, /*acquire*/ false, /*release*/ true,
4341                /*weak*/ false, noreg);
4342   %}
4343 
4344   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4345     MacroAssembler _masm(&cbuf);
4346     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4347     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4348                Assembler::byte, /*acquire*/ false, /*release*/ true,
4349                /*weak*/ false, noreg);
4350   %}
4351 
4352 
4353   // The only difference between aarch64_enc_cmpxchg and
4354   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4355   // CompareAndSwap sequence to serve as a barrier on acquiring a
4356   // lock.
4357   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4358     MacroAssembler _masm(&cbuf);
4359     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4360     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4361                Assembler::xword, /*acquire*/ true, /*release*/ true,
4362                /*weak*/ false, noreg);
4363   %}
4364 
4365   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4366     MacroAssembler _masm(&cbuf);
4367     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4368     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4369                Assembler::word, /*acquire*/ true, /*release*/ true,
4370                /*weak*/ false, noreg);
4371   %}
4372 
4373 
4374   // auxiliary used for CompareAndSwapX to set result register
4375   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4376     MacroAssembler _masm(&cbuf);
4377     Register res_reg = as_Register($res$$reg);
4378     __ cset(res_reg, Assembler::EQ);
4379   %}
4380 
4381   // prefetch encodings
4382 
4383   enc_class aarch64_enc_prefetchw(memory mem) %{
4384     MacroAssembler _masm(&cbuf);
4385     Register base = as_Register($mem$$base);
4386     int index = $mem$$index;
4387     int scale = $mem$$scale;
4388     int disp = $mem$$disp;
4389     if (index == -1) {
4390       __ prfm(Address(base, disp), PSTL1KEEP);
4391     } else {
4392       Register index_reg = as_Register(index);
4393       if (disp == 0) {
4394         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4395       } else {
4396         __ lea(rscratch1, Address(base, disp));
4397         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4398       }
4399     }
4400   %}
4401 
4402   /// mov envcodings
4403 
4404   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4405     MacroAssembler _masm(&cbuf);
4406     u_int32_t con = (u_int32_t)$src$$constant;
4407     Register dst_reg = as_Register($dst$$reg);
4408     if (con == 0) {
4409       __ movw(dst_reg, zr);
4410     } else {
4411       __ movw(dst_reg, con);
4412     }
4413   %}
4414 
4415   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4416     MacroAssembler _masm(&cbuf);
4417     Register dst_reg = as_Register($dst$$reg);
4418     u_int64_t con = (u_int64_t)$src$$constant;
4419     if (con == 0) {
4420       __ mov(dst_reg, zr);
4421     } else {
4422       __ mov(dst_reg, con);
4423     }
4424   %}
4425 
4426   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4427     MacroAssembler _masm(&cbuf);
4428     Register dst_reg = as_Register($dst$$reg);
4429     address con = (address)$src$$constant;
4430     if (con == NULL || con == (address)1) {
4431       ShouldNotReachHere();
4432     } else {
4433       relocInfo::relocType rtype = $src->constant_reloc();
4434       if (rtype == relocInfo::oop_type) {
4435         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4436       } else if (rtype == relocInfo::metadata_type) {
4437         __ mov_metadata(dst_reg, (Metadata*)con);
4438       } else {
4439         assert(rtype == relocInfo::none, "unexpected reloc type");
4440         if (con < (address)(uintptr_t)os::vm_page_size()) {
4441           __ mov(dst_reg, con);
4442         } else {
4443           unsigned long offset;
4444           __ adrp(dst_reg, con, offset);
4445           __ add(dst_reg, dst_reg, offset);
4446         }
4447       }
4448     }
4449   %}
4450 
4451   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4452     MacroAssembler _masm(&cbuf);
4453     Register dst_reg = as_Register($dst$$reg);
4454     __ mov(dst_reg, zr);
4455   %}
4456 
4457   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4458     MacroAssembler _masm(&cbuf);
4459     Register dst_reg = as_Register($dst$$reg);
4460     __ mov(dst_reg, (u_int64_t)1);
4461   %}
4462 
4463   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4464     MacroAssembler _masm(&cbuf);
4465     address page = (address)$src$$constant;
4466     Register dst_reg = as_Register($dst$$reg);
4467     unsigned long off;
4468     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4469     assert(off == 0, "assumed offset == 0");
4470   %}
4471 
4472   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4473     MacroAssembler _masm(&cbuf);
4474     __ load_byte_map_base($dst$$Register);
4475   %}
4476 
4477   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4478     MacroAssembler _masm(&cbuf);
4479     Register dst_reg = as_Register($dst$$reg);
4480     address con = (address)$src$$constant;
4481     if (con == NULL) {
4482       ShouldNotReachHere();
4483     } else {
4484       relocInfo::relocType rtype = $src->constant_reloc();
4485       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4486       __ set_narrow_oop(dst_reg, (jobject)con);
4487     }
4488   %}
4489 
4490   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4491     MacroAssembler _masm(&cbuf);
4492     Register dst_reg = as_Register($dst$$reg);
4493     __ mov(dst_reg, zr);
4494   %}
4495 
4496   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4497     MacroAssembler _masm(&cbuf);
4498     Register dst_reg = as_Register($dst$$reg);
4499     address con = (address)$src$$constant;
4500     if (con == NULL) {
4501       ShouldNotReachHere();
4502     } else {
4503       relocInfo::relocType rtype = $src->constant_reloc();
4504       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4505       __ set_narrow_klass(dst_reg, (Klass *)con);
4506     }
4507   %}
4508 
4509   // arithmetic encodings
4510 
4511   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4512     MacroAssembler _masm(&cbuf);
4513     Register dst_reg = as_Register($dst$$reg);
4514     Register src_reg = as_Register($src1$$reg);
4515     int32_t con = (int32_t)$src2$$constant;
4516     // add has primary == 0, subtract has primary == 1
4517     if ($primary) { con = -con; }
4518     if (con < 0) {
4519       __ subw(dst_reg, src_reg, -con);
4520     } else {
4521       __ addw(dst_reg, src_reg, con);
4522     }
4523   %}
4524 
4525   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4526     MacroAssembler _masm(&cbuf);
4527     Register dst_reg = as_Register($dst$$reg);
4528     Register src_reg = as_Register($src1$$reg);
4529     int32_t con = (int32_t)$src2$$constant;
4530     // add has primary == 0, subtract has primary == 1
4531     if ($primary) { con = -con; }
4532     if (con < 0) {
4533       __ sub(dst_reg, src_reg, -con);
4534     } else {
4535       __ add(dst_reg, src_reg, con);
4536     }
4537   %}
4538 
4539   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4540     MacroAssembler _masm(&cbuf);
4541    Register dst_reg = as_Register($dst$$reg);
4542    Register src1_reg = as_Register($src1$$reg);
4543    Register src2_reg = as_Register($src2$$reg);
4544     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4545   %}
4546 
4547   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4548     MacroAssembler _masm(&cbuf);
4549    Register dst_reg = as_Register($dst$$reg);
4550    Register src1_reg = as_Register($src1$$reg);
4551    Register src2_reg = as_Register($src2$$reg);
4552     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4553   %}
4554 
4555   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4556     MacroAssembler _masm(&cbuf);
4557    Register dst_reg = as_Register($dst$$reg);
4558    Register src1_reg = as_Register($src1$$reg);
4559    Register src2_reg = as_Register($src2$$reg);
4560     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4561   %}
4562 
4563   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4564     MacroAssembler _masm(&cbuf);
4565    Register dst_reg = as_Register($dst$$reg);
4566    Register src1_reg = as_Register($src1$$reg);
4567    Register src2_reg = as_Register($src2$$reg);
4568     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4569   %}
4570 
4571   // compare instruction encodings
4572 
4573   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4574     MacroAssembler _masm(&cbuf);
4575     Register reg1 = as_Register($src1$$reg);
4576     Register reg2 = as_Register($src2$$reg);
4577     __ cmpw(reg1, reg2);
4578   %}
4579 
4580   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4581     MacroAssembler _masm(&cbuf);
4582     Register reg = as_Register($src1$$reg);
4583     int32_t val = $src2$$constant;
4584     if (val >= 0) {
4585       __ subsw(zr, reg, val);
4586     } else {
4587       __ addsw(zr, reg, -val);
4588     }
4589   %}
4590 
4591   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4592     MacroAssembler _masm(&cbuf);
4593     Register reg1 = as_Register($src1$$reg);
4594     u_int32_t val = (u_int32_t)$src2$$constant;
4595     __ movw(rscratch1, val);
4596     __ cmpw(reg1, rscratch1);
4597   %}
4598 
4599   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4600     MacroAssembler _masm(&cbuf);
4601     Register reg1 = as_Register($src1$$reg);
4602     Register reg2 = as_Register($src2$$reg);
4603     __ cmp(reg1, reg2);
4604   %}
4605 
4606   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4607     MacroAssembler _masm(&cbuf);
4608     Register reg = as_Register($src1$$reg);
4609     int64_t val = $src2$$constant;
4610     if (val >= 0) {
4611       __ subs(zr, reg, val);
4612     } else if (val != -val) {
4613       __ adds(zr, reg, -val);
4614     } else {
4615     // aargh, Long.MIN_VALUE is a special case
4616       __ orr(rscratch1, zr, (u_int64_t)val);
4617       __ subs(zr, reg, rscratch1);
4618     }
4619   %}
4620 
4621   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4622     MacroAssembler _masm(&cbuf);
4623     Register reg1 = as_Register($src1$$reg);
4624     u_int64_t val = (u_int64_t)$src2$$constant;
4625     __ mov(rscratch1, val);
4626     __ cmp(reg1, rscratch1);
4627   %}
4628 
4629   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4630     MacroAssembler _masm(&cbuf);
4631     Register reg1 = as_Register($src1$$reg);
4632     Register reg2 = as_Register($src2$$reg);
4633     __ cmp(reg1, reg2);
4634   %}
4635 
4636   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4637     MacroAssembler _masm(&cbuf);
4638     Register reg1 = as_Register($src1$$reg);
4639     Register reg2 = as_Register($src2$$reg);
4640     __ cmpw(reg1, reg2);
4641   %}
4642 
4643   enc_class aarch64_enc_testp(iRegP src) %{
4644     MacroAssembler _masm(&cbuf);
4645     Register reg = as_Register($src$$reg);
4646     __ cmp(reg, zr);
4647   %}
4648 
4649   enc_class aarch64_enc_testn(iRegN src) %{
4650     MacroAssembler _masm(&cbuf);
4651     Register reg = as_Register($src$$reg);
4652     __ cmpw(reg, zr);
4653   %}
4654 
4655   enc_class aarch64_enc_b(label lbl) %{
4656     MacroAssembler _masm(&cbuf);
4657     Label *L = $lbl$$label;
4658     __ b(*L);
4659   %}
4660 
4661   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4662     MacroAssembler _masm(&cbuf);
4663     Label *L = $lbl$$label;
4664     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4665   %}
4666 
4667   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4668     MacroAssembler _masm(&cbuf);
4669     Label *L = $lbl$$label;
4670     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4671   %}
4672 
4673   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4674   %{
4675      Register sub_reg = as_Register($sub$$reg);
4676      Register super_reg = as_Register($super$$reg);
4677      Register temp_reg = as_Register($temp$$reg);
4678      Register result_reg = as_Register($result$$reg);
4679 
4680      Label miss;
4681      MacroAssembler _masm(&cbuf);
4682      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4683                                      NULL, &miss,
4684                                      /*set_cond_codes:*/ true);
4685      if ($primary) {
4686        __ mov(result_reg, zr);
4687      }
4688      __ bind(miss);
4689   %}
4690 
4691   enc_class aarch64_enc_java_static_call(method meth) %{
4692     MacroAssembler _masm(&cbuf);
4693 
4694     address addr = (address)$meth$$method;
4695     address call;
4696     if (!_method) {
4697       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4698       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4699     } else {
4700       int method_index = resolved_method_index(cbuf);
4701       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4702                                                   : static_call_Relocation::spec(method_index);
4703       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4704 
4705       // Emit stub for static call
4706       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4707       if (stub == NULL) {
4708         ciEnv::current()->record_failure("CodeCache is full");
4709         return;
4710       }
4711     }
4712     if (call == NULL) {
4713       ciEnv::current()->record_failure("CodeCache is full");
4714       return;
4715     }
4716   %}
4717 
4718   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4719     MacroAssembler _masm(&cbuf);
4720     int method_index = resolved_method_index(cbuf);
4721     address call = __ ic_call((address)$meth$$method, method_index);
4722     if (call == NULL) {
4723       ciEnv::current()->record_failure("CodeCache is full");
4724       return;
4725     }
4726   %}
4727 
4728   enc_class aarch64_enc_call_epilog() %{
4729     MacroAssembler _masm(&cbuf);
4730     if (VerifyStackAtCalls) {
4731       // Check that stack depth is unchanged: find majik cookie on stack
4732       __ call_Unimplemented();
4733     }
4734   %}
4735 
4736   enc_class aarch64_enc_java_to_runtime(method meth) %{
4737     MacroAssembler _masm(&cbuf);
4738 
4739     // some calls to generated routines (arraycopy code) are scheduled
4740     // by C2 as runtime calls. if so we can call them using a br (they
4741     // will be in a reachable segment) otherwise we have to use a blrt
4742     // which loads the absolute address into a register.
4743     address entry = (address)$meth$$method;
4744     CodeBlob *cb = CodeCache::find_blob(entry);
4745     if (cb) {
4746       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4747       if (call == NULL) {
4748         ciEnv::current()->record_failure("CodeCache is full");
4749         return;
4750       }
4751     } else {
4752       int gpcnt;
4753       int fpcnt;
4754       int rtype;
4755       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4756       Label retaddr;
4757       __ adr(rscratch2, retaddr);
4758       __ lea(rscratch1, RuntimeAddress(entry));
4759       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4760       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4761       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4762       __ bind(retaddr);
4763       __ add(sp, sp, 2 * wordSize);
4764     }
4765   %}
4766 
4767   enc_class aarch64_enc_rethrow() %{
4768     MacroAssembler _masm(&cbuf);
4769     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4770   %}
4771 
4772   enc_class aarch64_enc_ret() %{
4773     MacroAssembler _masm(&cbuf);
4774     __ ret(lr);
4775   %}
4776 
4777   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4778     MacroAssembler _masm(&cbuf);
4779     Register target_reg = as_Register($jump_target$$reg);
4780     __ br(target_reg);
4781   %}
4782 
4783   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4784     MacroAssembler _masm(&cbuf);
4785     Register target_reg = as_Register($jump_target$$reg);
4786     // exception oop should be in r0
4787     // ret addr has been popped into lr
4788     // callee expects it in r3
4789     __ mov(r3, lr);
4790     __ br(target_reg);
4791   %}
4792 
4793   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4794     MacroAssembler _masm(&cbuf);
4795     Register oop = as_Register($object$$reg);
4796     Register box = as_Register($box$$reg);
4797     Register disp_hdr = as_Register($tmp$$reg);
4798     Register tmp = as_Register($tmp2$$reg);
4799     Label cont;
4800     Label object_has_monitor;
4801     Label cas_failed;
4802 
4803     assert_different_registers(oop, box, tmp, disp_hdr);
4804 
4805     // Load markOop from object into displaced_header.
4806     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4807 
4808     // Always do locking in runtime.
4809     if (EmitSync & 0x01) {
4810       __ cmp(oop, zr);
4811       return;
4812     }
4813 
4814     if (UseBiasedLocking && !UseOptoBiasInlining) {
4815       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4816     }
4817 
4818     // Handle existing monitor
4819     if ((EmitSync & 0x02) == 0) {
4820       // we can use AArch64's bit test and branch here but
4821       // markoopDesc does not define a bit index just the bit value
4822       // so assert in case the bit pos changes
4823 #     define __monitor_value_log2 1
4824       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4825       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4826 #     undef __monitor_value_log2
4827     }
4828 
4829     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4830     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4831 
4832     // Load Compare Value application register.
4833 
4834     // Initialize the box. (Must happen before we update the object mark!)
4835     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4836 
4837     // Compare object markOop with mark and if equal exchange scratch1
4838     // with object markOop.
4839     if (UseLSE) {
4840       __ mov(tmp, disp_hdr);
4841       __ casal(Assembler::xword, tmp, box, oop);
4842       __ cmp(tmp, disp_hdr);
4843       __ br(Assembler::EQ, cont);
4844     } else {
4845       Label retry_load;
4846       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4847         __ prfm(Address(oop), PSTL1STRM);
4848       __ bind(retry_load);
4849       __ ldaxr(tmp, oop);
4850       __ cmp(tmp, disp_hdr);
4851       __ br(Assembler::NE, cas_failed);
4852       // use stlxr to ensure update is immediately visible
4853       __ stlxr(tmp, box, oop);
4854       __ cbzw(tmp, cont);
4855       __ b(retry_load);
4856     }
4857 
4858     // Formerly:
4859     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4860     //               /*newv=*/box,
4861     //               /*addr=*/oop,
4862     //               /*tmp=*/tmp,
4863     //               cont,
4864     //               /*fail*/NULL);
4865 
4866     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4867 
4868     // If the compare-and-exchange succeeded, then we found an unlocked
4869     // object, will have now locked it will continue at label cont
4870 
4871     __ bind(cas_failed);
4872     // We did not see an unlocked object so try the fast recursive case.
4873 
4874     // Check if the owner is self by comparing the value in the
4875     // markOop of object (disp_hdr) with the stack pointer.
4876     __ mov(rscratch1, sp);
4877     __ sub(disp_hdr, disp_hdr, rscratch1);
4878     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4879     // If condition is true we are cont and hence we can store 0 as the
4880     // displaced header in the box, which indicates that it is a recursive lock.
4881     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4882     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4883 
4884     // Handle existing monitor.
4885     if ((EmitSync & 0x02) == 0) {
4886       __ b(cont);
4887 
4888       __ bind(object_has_monitor);
4889       // The object's monitor m is unlocked iff m->owner == NULL,
4890       // otherwise m->owner may contain a thread or a stack address.
4891       //
4892       // Try to CAS m->owner from NULL to current thread.
4893       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4894       __ mov(disp_hdr, zr);
4895 
4896       if (UseLSE) {
4897         __ mov(rscratch1, disp_hdr);
4898         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4899         __ cmp(rscratch1, disp_hdr);
4900       } else {
4901         Label retry_load, fail;
4902         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4903           __ prfm(Address(tmp), PSTL1STRM);
4904         __ bind(retry_load);
4905         __ ldaxr(rscratch1, tmp);
4906         __ cmp(disp_hdr, rscratch1);
4907         __ br(Assembler::NE, fail);
4908         // use stlxr to ensure update is immediately visible
4909         __ stlxr(rscratch1, rthread, tmp);
4910         __ cbnzw(rscratch1, retry_load);
4911         __ bind(fail);
4912       }
4913 
4914       // Label next;
4915       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4916       //               /*newv=*/rthread,
4917       //               /*addr=*/tmp,
4918       //               /*tmp=*/rscratch1,
4919       //               /*succeed*/next,
4920       //               /*fail*/NULL);
4921       // __ bind(next);
4922 
4923       // store a non-null value into the box.
4924       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4925 
4926       // PPC port checks the following invariants
4927       // #ifdef ASSERT
4928       // bne(flag, cont);
4929       // We have acquired the monitor, check some invariants.
4930       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4931       // Invariant 1: _recursions should be 0.
4932       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4933       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4934       //                        "monitor->_recursions should be 0", -1);
4935       // Invariant 2: OwnerIsThread shouldn't be 0.
4936       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4937       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4938       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4939       // #endif
4940     }
4941 
4942     __ bind(cont);
4943     // flag == EQ indicates success
4944     // flag == NE indicates failure
4945 
4946   %}
4947 
4948   // TODO
4949   // reimplement this with custom cmpxchgptr code
4950   // which avoids some of the unnecessary branching
4951   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4952     MacroAssembler _masm(&cbuf);
4953     Register oop = as_Register($object$$reg);
4954     Register box = as_Register($box$$reg);
4955     Register disp_hdr = as_Register($tmp$$reg);
4956     Register tmp = as_Register($tmp2$$reg);
4957     Label cont;
4958     Label object_has_monitor;
4959     Label cas_failed;
4960 
4961     assert_different_registers(oop, box, tmp, disp_hdr);
4962 
4963     // Always do locking in runtime.
4964     if (EmitSync & 0x01) {
4965       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4966       return;
4967     }
4968 
4969     if (UseBiasedLocking && !UseOptoBiasInlining) {
4970       __ biased_locking_exit(oop, tmp, cont);
4971     }
4972 
4973     // Find the lock address and load the displaced header from the stack.
4974     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4975 
4976     // If the displaced header is 0, we have a recursive unlock.
4977     __ cmp(disp_hdr, zr);
4978     __ br(Assembler::EQ, cont);
4979 
4980 
4981     // Handle existing monitor.
4982     if ((EmitSync & 0x02) == 0) {
4983       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4984       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4985     }
4986 
4987     // Check if it is still a light weight lock, this is is true if we
4988     // see the stack address of the basicLock in the markOop of the
4989     // object.
4990 
4991       if (UseLSE) {
4992         __ mov(tmp, box);
4993         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4994         __ cmp(tmp, box);
4995       } else {
4996         Label retry_load;
4997         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4998           __ prfm(Address(oop), PSTL1STRM);
4999         __ bind(retry_load);
5000         __ ldxr(tmp, oop);
5001         __ cmp(box, tmp);
5002         __ br(Assembler::NE, cas_failed);
5003         // use stlxr to ensure update is immediately visible
5004         __ stlxr(tmp, disp_hdr, oop);
5005         __ cbzw(tmp, cont);
5006         __ b(retry_load);
5007       }
5008 
5009     // __ cmpxchgptr(/*compare_value=*/box,
5010     //               /*exchange_value=*/disp_hdr,
5011     //               /*where=*/oop,
5012     //               /*result=*/tmp,
5013     //               cont,
5014     //               /*cas_failed*/NULL);
5015     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5016 
5017     __ bind(cas_failed);
5018 
5019     // Handle existing monitor.
5020     if ((EmitSync & 0x02) == 0) {
5021       __ b(cont);
5022 
5023       __ bind(object_has_monitor);
5024       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5025       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5026       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5027       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5028       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5029       __ cmp(rscratch1, zr);
5030       __ br(Assembler::NE, cont);
5031 
5032       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5033       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5034       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5035       __ cmp(rscratch1, zr);
5036       __ cbnz(rscratch1, cont);
5037       // need a release store here
5038       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5039       __ stlr(rscratch1, tmp); // rscratch1 is zero
5040     }
5041 
5042     __ bind(cont);
5043     // flag == EQ indicates success
5044     // flag == NE indicates failure
5045   %}
5046 
5047 %}
5048 
5049 //----------FRAME--------------------------------------------------------------
5050 // Definition of frame structure and management information.
5051 //
5052 //  S T A C K   L A Y O U T    Allocators stack-slot number
5053 //                             |   (to get allocators register number
5054 //  G  Owned by    |        |  v    add OptoReg::stack0())
5055 //  r   CALLER     |        |
5056 //  o     |        +--------+      pad to even-align allocators stack-slot
5057 //  w     V        |  pad0  |        numbers; owned by CALLER
5058 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5059 //  h     ^        |   in   |  5
5060 //        |        |  args  |  4   Holes in incoming args owned by SELF
5061 //  |     |        |        |  3
5062 //  |     |        +--------+
5063 //  V     |        | old out|      Empty on Intel, window on Sparc
5064 //        |    old |preserve|      Must be even aligned.
5065 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5066 //        |        |   in   |  3   area for Intel ret address
5067 //     Owned by    |preserve|      Empty on Sparc.
5068 //       SELF      +--------+
5069 //        |        |  pad2  |  2   pad to align old SP
5070 //        |        +--------+  1
5071 //        |        | locks  |  0
5072 //        |        +--------+----> OptoReg::stack0(), even aligned
5073 //        |        |  pad1  | 11   pad to align new SP
5074 //        |        +--------+
5075 //        |        |        | 10
5076 //        |        | spills |  9   spills
5077 //        V        |        |  8   (pad0 slot for callee)
5078 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5079 //        ^        |  out   |  7
5080 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5081 //     Owned by    +--------+
5082 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5083 //        |    new |preserve|      Must be even-aligned.
5084 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5085 //        |        |        |
5086 //
5087 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5088 //         known from SELF's arguments and the Java calling convention.
5089 //         Region 6-7 is determined per call site.
5090 // Note 2: If the calling convention leaves holes in the incoming argument
5091 //         area, those holes are owned by SELF.  Holes in the outgoing area
5092 //         are owned by the CALLEE.  Holes should not be nessecary in the
5093 //         incoming area, as the Java calling convention is completely under
5094 //         the control of the AD file.  Doubles can be sorted and packed to
5095 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5096 //         varargs C calling conventions.
5097 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5098 //         even aligned with pad0 as needed.
5099 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5100 //           (the latter is true on Intel but is it false on AArch64?)
5101 //         region 6-11 is even aligned; it may be padded out more so that
5102 //         the region from SP to FP meets the minimum stack alignment.
5103 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5104 //         alignment.  Region 11, pad1, may be dynamically extended so that
5105 //         SP meets the minimum alignment.
5106 
5107 frame %{
5108   // What direction does stack grow in (assumed to be same for C & Java)
5109   stack_direction(TOWARDS_LOW);
5110 
5111   // These three registers define part of the calling convention
5112   // between compiled code and the interpreter.
5113 
5114   // Inline Cache Register or methodOop for I2C.
5115   inline_cache_reg(R12);
5116 
5117   // Method Oop Register when calling interpreter.
5118   interpreter_method_oop_reg(R12);
5119 
5120   // Number of stack slots consumed by locking an object
5121   sync_stack_slots(2);
5122 
5123   // Compiled code's Frame Pointer
5124   frame_pointer(R31);
5125 
5126   // Interpreter stores its frame pointer in a register which is
5127   // stored to the stack by I2CAdaptors.
5128   // I2CAdaptors convert from interpreted java to compiled java.
5129   interpreter_frame_pointer(R29);
5130 
5131   // Stack alignment requirement
5132   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5133 
5134   // Number of stack slots between incoming argument block and the start of
5135   // a new frame.  The PROLOG must add this many slots to the stack.  The
5136   // EPILOG must remove this many slots. aarch64 needs two slots for
5137   // return address and fp.
5138   // TODO think this is correct but check
5139   in_preserve_stack_slots(4);
5140 
5141   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5142   // for calls to C.  Supports the var-args backing area for register parms.
5143   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5144 
5145   // The after-PROLOG location of the return address.  Location of
5146   // return address specifies a type (REG or STACK) and a number
5147   // representing the register number (i.e. - use a register name) or
5148   // stack slot.
5149   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5150   // Otherwise, it is above the locks and verification slot and alignment word
5151   // TODO this may well be correct but need to check why that - 2 is there
5152   // ppc port uses 0 but we definitely need to allow for fixed_slots
5153   // which folds in the space used for monitors
5154   return_addr(STACK - 2 +
5155               align_up((Compile::current()->in_preserve_stack_slots() +
5156                         Compile::current()->fixed_slots()),
5157                        stack_alignment_in_slots()));
5158 
5159   // Body of function which returns an integer array locating
5160   // arguments either in registers or in stack slots.  Passed an array
5161   // of ideal registers called "sig" and a "length" count.  Stack-slot
5162   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5163   // arguments for a CALLEE.  Incoming stack arguments are
5164   // automatically biased by the preserve_stack_slots field above.
5165 
5166   calling_convention
5167   %{
5168     // No difference between ingoing/outgoing just pass false
5169     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5170   %}
5171 
5172   c_calling_convention
5173   %{
5174     // This is obviously always outgoing
5175     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5176   %}
5177 
5178   // Location of compiled Java return values.  Same as C for now.
5179   return_value
5180   %{
5181     // TODO do we allow ideal_reg == Op_RegN???
5182     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5183            "only return normal values");
5184 
5185     static const int lo[Op_RegL + 1] = { // enum name
5186       0,                                 // Op_Node
5187       0,                                 // Op_Set
5188       R0_num,                            // Op_RegN
5189       R0_num,                            // Op_RegI
5190       R0_num,                            // Op_RegP
5191       V0_num,                            // Op_RegF
5192       V0_num,                            // Op_RegD
5193       R0_num                             // Op_RegL
5194     };
5195 
5196     static const int hi[Op_RegL + 1] = { // enum name
5197       0,                                 // Op_Node
5198       0,                                 // Op_Set
5199       OptoReg::Bad,                       // Op_RegN
5200       OptoReg::Bad,                      // Op_RegI
5201       R0_H_num,                          // Op_RegP
5202       OptoReg::Bad,                      // Op_RegF
5203       V0_H_num,                          // Op_RegD
5204       R0_H_num                           // Op_RegL
5205     };
5206 
5207     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5208   %}
5209 %}
5210 
5211 //----------ATTRIBUTES---------------------------------------------------------
5212 //----------Operand Attributes-------------------------------------------------
5213 op_attrib op_cost(1);        // Required cost attribute
5214 
5215 //----------Instruction Attributes---------------------------------------------
5216 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5217 ins_attrib ins_size(32);        // Required size attribute (in bits)
5218 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5219                                 // a non-matching short branch variant
5220                                 // of some long branch?
5221 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5222                                 // be a power of 2) specifies the
5223                                 // alignment that some part of the
5224                                 // instruction (not necessarily the
5225                                 // start) requires.  If > 1, a
5226                                 // compute_padding() function must be
5227                                 // provided for the instruction
5228 
5229 //----------OPERANDS-----------------------------------------------------------
5230 // Operand definitions must precede instruction definitions for correct parsing
5231 // in the ADLC because operands constitute user defined types which are used in
5232 // instruction definitions.
5233 
5234 //----------Simple Operands----------------------------------------------------
5235 
5236 // Integer operands 32 bit
5237 // 32 bit immediate
5238 operand immI()
5239 %{
5240   match(ConI);
5241 
5242   op_cost(0);
5243   format %{ %}
5244   interface(CONST_INTER);
5245 %}
5246 
5247 // 32 bit zero
5248 operand immI0()
5249 %{
5250   predicate(n->get_int() == 0);
5251   match(ConI);
5252 
5253   op_cost(0);
5254   format %{ %}
5255   interface(CONST_INTER);
5256 %}
5257 
5258 // 32 bit unit increment
5259 operand immI_1()
5260 %{
5261   predicate(n->get_int() == 1);
5262   match(ConI);
5263 
5264   op_cost(0);
5265   format %{ %}
5266   interface(CONST_INTER);
5267 %}
5268 
5269 // 32 bit unit decrement
5270 operand immI_M1()
5271 %{
5272   predicate(n->get_int() == -1);
5273   match(ConI);
5274 
5275   op_cost(0);
5276   format %{ %}
5277   interface(CONST_INTER);
5278 %}
5279 
5280 // Shift values for add/sub extension shift
5281 operand immIExt()
5282 %{
5283   predicate(0 <= n->get_int() && (n->get_int() <= 4));
5284   match(ConI);
5285 
5286   op_cost(0);
5287   format %{ %}
5288   interface(CONST_INTER);
5289 %}
5290 
5291 operand immI_le_4()
5292 %{
5293   predicate(n->get_int() <= 4);
5294   match(ConI);
5295 
5296   op_cost(0);
5297   format %{ %}
5298   interface(CONST_INTER);
5299 %}
5300 
5301 operand immI_31()
5302 %{
5303   predicate(n->get_int() == 31);
5304   match(ConI);
5305 
5306   op_cost(0);
5307   format %{ %}
5308   interface(CONST_INTER);
5309 %}
5310 
5311 operand immI_8()
5312 %{
5313   predicate(n->get_int() == 8);
5314   match(ConI);
5315 
5316   op_cost(0);
5317   format %{ %}
5318   interface(CONST_INTER);
5319 %}
5320 
5321 operand immI_16()
5322 %{
5323   predicate(n->get_int() == 16);
5324   match(ConI);
5325 
5326   op_cost(0);
5327   format %{ %}
5328   interface(CONST_INTER);
5329 %}
5330 
5331 operand immI_24()
5332 %{
5333   predicate(n->get_int() == 24);
5334   match(ConI);
5335 
5336   op_cost(0);
5337   format %{ %}
5338   interface(CONST_INTER);
5339 %}
5340 
5341 operand immI_32()
5342 %{
5343   predicate(n->get_int() == 32);
5344   match(ConI);
5345 
5346   op_cost(0);
5347   format %{ %}
5348   interface(CONST_INTER);
5349 %}
5350 
5351 operand immI_48()
5352 %{
5353   predicate(n->get_int() == 48);
5354   match(ConI);
5355 
5356   op_cost(0);
5357   format %{ %}
5358   interface(CONST_INTER);
5359 %}
5360 
5361 operand immI_56()
5362 %{
5363   predicate(n->get_int() == 56);
5364   match(ConI);
5365 
5366   op_cost(0);
5367   format %{ %}
5368   interface(CONST_INTER);
5369 %}
5370 
5371 operand immI_63()
5372 %{
5373   predicate(n->get_int() == 63);
5374   match(ConI);
5375 
5376   op_cost(0);
5377   format %{ %}
5378   interface(CONST_INTER);
5379 %}
5380 
5381 operand immI_64()
5382 %{
5383   predicate(n->get_int() == 64);
5384   match(ConI);
5385 
5386   op_cost(0);
5387   format %{ %}
5388   interface(CONST_INTER);
5389 %}
5390 
5391 operand immI_255()
5392 %{
5393   predicate(n->get_int() == 255);
5394   match(ConI);
5395 
5396   op_cost(0);
5397   format %{ %}
5398   interface(CONST_INTER);
5399 %}
5400 
5401 operand immI_65535()
5402 %{
5403   predicate(n->get_int() == 65535);
5404   match(ConI);
5405 
5406   op_cost(0);
5407   format %{ %}
5408   interface(CONST_INTER);
5409 %}
5410 
5411 operand immL_255()
5412 %{
5413   predicate(n->get_long() == 255L);
5414   match(ConL);
5415 
5416   op_cost(0);
5417   format %{ %}
5418   interface(CONST_INTER);
5419 %}
5420 
5421 operand immL_65535()
5422 %{
5423   predicate(n->get_long() == 65535L);
5424   match(ConL);
5425 
5426   op_cost(0);
5427   format %{ %}
5428   interface(CONST_INTER);
5429 %}
5430 
5431 operand immL_4294967295()
5432 %{
5433   predicate(n->get_long() == 4294967295L);
5434   match(ConL);
5435 
5436   op_cost(0);
5437   format %{ %}
5438   interface(CONST_INTER);
5439 %}
5440 
5441 operand immL_bitmask()
5442 %{
5443   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5444             && is_power_of_2(n->get_long() + 1));
5445   match(ConL);
5446 
5447   op_cost(0);
5448   format %{ %}
5449   interface(CONST_INTER);
5450 %}
5451 
5452 operand immI_bitmask()
5453 %{
5454   predicate(((n->get_int() & 0xc0000000) == 0)
5455             && is_power_of_2(n->get_int() + 1));
5456   match(ConI);
5457 
5458   op_cost(0);
5459   format %{ %}
5460   interface(CONST_INTER);
5461 %}
5462 
5463 // Scale values for scaled offset addressing modes (up to long but not quad)
5464 operand immIScale()
5465 %{
5466   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5467   match(ConI);
5468 
5469   op_cost(0);
5470   format %{ %}
5471   interface(CONST_INTER);
5472 %}
5473 
5474 // 26 bit signed offset -- for pc-relative branches
5475 operand immI26()
5476 %{
5477   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5478   match(ConI);
5479 
5480   op_cost(0);
5481   format %{ %}
5482   interface(CONST_INTER);
5483 %}
5484 
5485 // 19 bit signed offset -- for pc-relative loads
5486 operand immI19()
5487 %{
5488   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5489   match(ConI);
5490 
5491   op_cost(0);
5492   format %{ %}
5493   interface(CONST_INTER);
5494 %}
5495 
5496 // 12 bit unsigned offset -- for base plus immediate loads
5497 operand immIU12()
5498 %{
5499   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5500   match(ConI);
5501 
5502   op_cost(0);
5503   format %{ %}
5504   interface(CONST_INTER);
5505 %}
5506 
5507 operand immLU12()
5508 %{
5509   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5510   match(ConL);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // Offset for scaled or unscaled immediate loads and stores
5518 operand immIOffset()
5519 %{
5520   predicate(Address::offset_ok_for_immed(n->get_int()));
5521   match(ConI);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 operand immIOffset4()
5529 %{
5530   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5531   match(ConI);
5532 
5533   op_cost(0);
5534   format %{ %}
5535   interface(CONST_INTER);
5536 %}
5537 
5538 operand immIOffset8()
5539 %{
5540   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5541   match(ConI);
5542 
5543   op_cost(0);
5544   format %{ %}
5545   interface(CONST_INTER);
5546 %}
5547 
5548 operand immIOffset16()
5549 %{
5550   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5551   match(ConI);
5552 
5553   op_cost(0);
5554   format %{ %}
5555   interface(CONST_INTER);
5556 %}
5557 
5558 operand immLoffset()
5559 %{
5560   predicate(Address::offset_ok_for_immed(n->get_long()));
5561   match(ConL);
5562 
5563   op_cost(0);
5564   format %{ %}
5565   interface(CONST_INTER);
5566 %}
5567 
5568 operand immLoffset4()
5569 %{
5570   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5571   match(ConL);
5572 
5573   op_cost(0);
5574   format %{ %}
5575   interface(CONST_INTER);
5576 %}
5577 
5578 operand immLoffset8()
5579 %{
5580   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5581   match(ConL);
5582 
5583   op_cost(0);
5584   format %{ %}
5585   interface(CONST_INTER);
5586 %}
5587 
5588 operand immLoffset16()
5589 %{
5590   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5591   match(ConL);
5592 
5593   op_cost(0);
5594   format %{ %}
5595   interface(CONST_INTER);
5596 %}
5597 
5598 // 32 bit integer valid for add sub immediate
5599 operand immIAddSub()
5600 %{
5601   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5602   match(ConI);
5603   op_cost(0);
5604   format %{ %}
5605   interface(CONST_INTER);
5606 %}
5607 
5608 // 32 bit unsigned integer valid for logical immediate
5609 // TODO -- check this is right when e.g the mask is 0x80000000
5610 operand immILog()
5611 %{
5612   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5613   match(ConI);
5614 
5615   op_cost(0);
5616   format %{ %}
5617   interface(CONST_INTER);
5618 %}
5619 
5620 // Integer operands 64 bit
5621 // 64 bit immediate
5622 operand immL()
5623 %{
5624   match(ConL);
5625 
5626   op_cost(0);
5627   format %{ %}
5628   interface(CONST_INTER);
5629 %}
5630 
5631 // 64 bit zero
5632 operand immL0()
5633 %{
5634   predicate(n->get_long() == 0);
5635   match(ConL);
5636 
5637   op_cost(0);
5638   format %{ %}
5639   interface(CONST_INTER);
5640 %}
5641 
5642 // 64 bit unit increment
5643 operand immL_1()
5644 %{
5645   predicate(n->get_long() == 1);
5646   match(ConL);
5647 
5648   op_cost(0);
5649   format %{ %}
5650   interface(CONST_INTER);
5651 %}
5652 
5653 // 64 bit unit decrement
5654 operand immL_M1()
5655 %{
5656   predicate(n->get_long() == -1);
5657   match(ConL);
5658 
5659   op_cost(0);
5660   format %{ %}
5661   interface(CONST_INTER);
5662 %}
5663 
5664 // 32 bit offset of pc in thread anchor
5665 
5666 operand immL_pc_off()
5667 %{
5668   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5669                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5670   match(ConL);
5671 
5672   op_cost(0);
5673   format %{ %}
5674   interface(CONST_INTER);
5675 %}
5676 
5677 // 64 bit integer valid for add sub immediate
5678 operand immLAddSub()
5679 %{
5680   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5681   match(ConL);
5682   op_cost(0);
5683   format %{ %}
5684   interface(CONST_INTER);
5685 %}
5686 
5687 // 64 bit integer valid for logical immediate
5688 operand immLLog()
5689 %{
5690   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5691   match(ConL);
5692   op_cost(0);
5693   format %{ %}
5694   interface(CONST_INTER);
5695 %}
5696 
5697 // Long Immediate: low 32-bit mask
5698 operand immL_32bits()
5699 %{
5700   predicate(n->get_long() == 0xFFFFFFFFL);
5701   match(ConL);
5702   op_cost(0);
5703   format %{ %}
5704   interface(CONST_INTER);
5705 %}
5706 
5707 // Pointer operands
5708 // Pointer Immediate
5709 operand immP()
5710 %{
5711   match(ConP);
5712 
5713   op_cost(0);
5714   format %{ %}
5715   interface(CONST_INTER);
5716 %}
5717 
5718 // NULL Pointer Immediate
5719 operand immP0()
5720 %{
5721   predicate(n->get_ptr() == 0);
5722   match(ConP);
5723 
5724   op_cost(0);
5725   format %{ %}
5726   interface(CONST_INTER);
5727 %}
5728 
5729 // Pointer Immediate One
5730 // this is used in object initialization (initial object header)
5731 operand immP_1()
5732 %{
5733   predicate(n->get_ptr() == 1);
5734   match(ConP);
5735 
5736   op_cost(0);
5737   format %{ %}
5738   interface(CONST_INTER);
5739 %}
5740 
5741 // Polling Page Pointer Immediate
5742 operand immPollPage()
5743 %{
5744   predicate((address)n->get_ptr() == os::get_polling_page());
5745   match(ConP);
5746 
5747   op_cost(0);
5748   format %{ %}
5749   interface(CONST_INTER);
5750 %}
5751 
5752 // Card Table Byte Map Base
5753 operand immByteMapBase()
5754 %{
5755   // Get base of card map
5756   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
5757             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
5758   match(ConP);
5759 
5760   op_cost(0);
5761   format %{ %}
5762   interface(CONST_INTER);
5763 %}
5764 
5765 // Pointer Immediate Minus One
5766 // this is used when we want to write the current PC to the thread anchor
5767 operand immP_M1()
5768 %{
5769   predicate(n->get_ptr() == -1);
5770   match(ConP);
5771 
5772   op_cost(0);
5773   format %{ %}
5774   interface(CONST_INTER);
5775 %}
5776 
5777 // Pointer Immediate Minus Two
5778 // this is used when we want to write the current PC to the thread anchor
5779 operand immP_M2()
5780 %{
5781   predicate(n->get_ptr() == -2);
5782   match(ConP);
5783 
5784   op_cost(0);
5785   format %{ %}
5786   interface(CONST_INTER);
5787 %}
5788 
5789 // Float and Double operands
5790 // Double Immediate
5791 operand immD()
5792 %{
5793   match(ConD);
5794   op_cost(0);
5795   format %{ %}
5796   interface(CONST_INTER);
5797 %}
5798 
5799 // Double Immediate: +0.0d
5800 operand immD0()
5801 %{
5802   predicate(jlong_cast(n->getd()) == 0);
5803   match(ConD);
5804 
5805   op_cost(0);
5806   format %{ %}
5807   interface(CONST_INTER);
5808 %}
5809 
5810 // constant 'double +0.0'.
5811 operand immDPacked()
5812 %{
5813   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5814   match(ConD);
5815   op_cost(0);
5816   format %{ %}
5817   interface(CONST_INTER);
5818 %}
5819 
5820 // Float Immediate
5821 operand immF()
5822 %{
5823   match(ConF);
5824   op_cost(0);
5825   format %{ %}
5826   interface(CONST_INTER);
5827 %}
5828 
5829 // Float Immediate: +0.0f.
5830 operand immF0()
5831 %{
5832   predicate(jint_cast(n->getf()) == 0);
5833   match(ConF);
5834 
5835   op_cost(0);
5836   format %{ %}
5837   interface(CONST_INTER);
5838 %}
5839 
5840 //
5841 operand immFPacked()
5842 %{
5843   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5844   match(ConF);
5845   op_cost(0);
5846   format %{ %}
5847   interface(CONST_INTER);
5848 %}
5849 
5850 // Narrow pointer operands
5851 // Narrow Pointer Immediate
5852 operand immN()
5853 %{
5854   match(ConN);
5855 
5856   op_cost(0);
5857   format %{ %}
5858   interface(CONST_INTER);
5859 %}
5860 
5861 // Narrow NULL Pointer Immediate
5862 operand immN0()
5863 %{
5864   predicate(n->get_narrowcon() == 0);
5865   match(ConN);
5866 
5867   op_cost(0);
5868   format %{ %}
5869   interface(CONST_INTER);
5870 %}
5871 
5872 operand immNKlass()
5873 %{
5874   match(ConNKlass);
5875 
5876   op_cost(0);
5877   format %{ %}
5878   interface(CONST_INTER);
5879 %}
5880 
5881 // Integer 32 bit Register Operands
5882 // Integer 32 bitRegister (excludes SP)
5883 operand iRegI()
5884 %{
5885   constraint(ALLOC_IN_RC(any_reg32));
5886   match(RegI);
5887   match(iRegINoSp);
5888   op_cost(0);
5889   format %{ %}
5890   interface(REG_INTER);
5891 %}
5892 
5893 // Integer 32 bit Register not Special
5894 operand iRegINoSp()
5895 %{
5896   constraint(ALLOC_IN_RC(no_special_reg32));
5897   match(RegI);
5898   op_cost(0);
5899   format %{ %}
5900   interface(REG_INTER);
5901 %}
5902 
5903 // Integer 64 bit Register Operands
5904 // Integer 64 bit Register (includes SP)
5905 operand iRegL()
5906 %{
5907   constraint(ALLOC_IN_RC(any_reg));
5908   match(RegL);
5909   match(iRegLNoSp);
5910   op_cost(0);
5911   format %{ %}
5912   interface(REG_INTER);
5913 %}
5914 
5915 // Integer 64 bit Register not Special
5916 operand iRegLNoSp()
5917 %{
5918   constraint(ALLOC_IN_RC(no_special_reg));
5919   match(RegL);
5920   match(iRegL_R0);
5921   format %{ %}
5922   interface(REG_INTER);
5923 %}
5924 
5925 // Pointer Register Operands
5926 // Pointer Register
5927 operand iRegP()
5928 %{
5929   constraint(ALLOC_IN_RC(ptr_reg));
5930   match(RegP);
5931   match(iRegPNoSp);
5932   match(iRegP_R0);
5933   //match(iRegP_R2);
5934   //match(iRegP_R4);
5935   //match(iRegP_R5);
5936   match(thread_RegP);
5937   op_cost(0);
5938   format %{ %}
5939   interface(REG_INTER);
5940 %}
5941 
5942 // Pointer 64 bit Register not Special
5943 operand iRegPNoSp()
5944 %{
5945   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5946   match(RegP);
5947   // match(iRegP);
5948   // match(iRegP_R0);
5949   // match(iRegP_R2);
5950   // match(iRegP_R4);
5951   // match(iRegP_R5);
5952   // match(thread_RegP);
5953   op_cost(0);
5954   format %{ %}
5955   interface(REG_INTER);
5956 %}
5957 
5958 // Pointer 64 bit Register R0 only
5959 operand iRegP_R0()
5960 %{
5961   constraint(ALLOC_IN_RC(r0_reg));
5962   match(RegP);
5963   // match(iRegP);
5964   match(iRegPNoSp);
5965   op_cost(0);
5966   format %{ %}
5967   interface(REG_INTER);
5968 %}
5969 
5970 // Pointer 64 bit Register R1 only
5971 operand iRegP_R1()
5972 %{
5973   constraint(ALLOC_IN_RC(r1_reg));
5974   match(RegP);
5975   // match(iRegP);
5976   match(iRegPNoSp);
5977   op_cost(0);
5978   format %{ %}
5979   interface(REG_INTER);
5980 %}
5981 
5982 // Pointer 64 bit Register R2 only
5983 operand iRegP_R2()
5984 %{
5985   constraint(ALLOC_IN_RC(r2_reg));
5986   match(RegP);
5987   // match(iRegP);
5988   match(iRegPNoSp);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 // Pointer 64 bit Register R3 only
5995 operand iRegP_R3()
5996 %{
5997   constraint(ALLOC_IN_RC(r3_reg));
5998   match(RegP);
5999   // match(iRegP);
6000   match(iRegPNoSp);
6001   op_cost(0);
6002   format %{ %}
6003   interface(REG_INTER);
6004 %}
6005 
6006 // Pointer 64 bit Register R4 only
6007 operand iRegP_R4()
6008 %{
6009   constraint(ALLOC_IN_RC(r4_reg));
6010   match(RegP);
6011   // match(iRegP);
6012   match(iRegPNoSp);
6013   op_cost(0);
6014   format %{ %}
6015   interface(REG_INTER);
6016 %}
6017 
6018 // Pointer 64 bit Register R5 only
6019 operand iRegP_R5()
6020 %{
6021   constraint(ALLOC_IN_RC(r5_reg));
6022   match(RegP);
6023   // match(iRegP);
6024   match(iRegPNoSp);
6025   op_cost(0);
6026   format %{ %}
6027   interface(REG_INTER);
6028 %}
6029 
6030 // Pointer 64 bit Register R10 only
6031 operand iRegP_R10()
6032 %{
6033   constraint(ALLOC_IN_RC(r10_reg));
6034   match(RegP);
6035   // match(iRegP);
6036   match(iRegPNoSp);
6037   op_cost(0);
6038   format %{ %}
6039   interface(REG_INTER);
6040 %}
6041 
6042 // Long 64 bit Register R0 only
6043 operand iRegL_R0()
6044 %{
6045   constraint(ALLOC_IN_RC(r0_reg));
6046   match(RegL);
6047   match(iRegLNoSp);
6048   op_cost(0);
6049   format %{ %}
6050   interface(REG_INTER);
6051 %}
6052 
6053 // Long 64 bit Register R2 only
6054 operand iRegL_R2()
6055 %{
6056   constraint(ALLOC_IN_RC(r2_reg));
6057   match(RegL);
6058   match(iRegLNoSp);
6059   op_cost(0);
6060   format %{ %}
6061   interface(REG_INTER);
6062 %}
6063 
6064 // Long 64 bit Register R3 only
6065 operand iRegL_R3()
6066 %{
6067   constraint(ALLOC_IN_RC(r3_reg));
6068   match(RegL);
6069   match(iRegLNoSp);
6070   op_cost(0);
6071   format %{ %}
6072   interface(REG_INTER);
6073 %}
6074 
6075 // Long 64 bit Register R11 only
6076 operand iRegL_R11()
6077 %{
6078   constraint(ALLOC_IN_RC(r11_reg));
6079   match(RegL);
6080   match(iRegLNoSp);
6081   op_cost(0);
6082   format %{ %}
6083   interface(REG_INTER);
6084 %}
6085 
6086 // Pointer 64 bit Register FP only
6087 operand iRegP_FP()
6088 %{
6089   constraint(ALLOC_IN_RC(fp_reg));
6090   match(RegP);
6091   // match(iRegP);
6092   op_cost(0);
6093   format %{ %}
6094   interface(REG_INTER);
6095 %}
6096 
6097 // Register R0 only
6098 operand iRegI_R0()
6099 %{
6100   constraint(ALLOC_IN_RC(int_r0_reg));
6101   match(RegI);
6102   match(iRegINoSp);
6103   op_cost(0);
6104   format %{ %}
6105   interface(REG_INTER);
6106 %}
6107 
6108 // Register R2 only
6109 operand iRegI_R2()
6110 %{
6111   constraint(ALLOC_IN_RC(int_r2_reg));
6112   match(RegI);
6113   match(iRegINoSp);
6114   op_cost(0);
6115   format %{ %}
6116   interface(REG_INTER);
6117 %}
6118 
6119 // Register R3 only
6120 operand iRegI_R3()
6121 %{
6122   constraint(ALLOC_IN_RC(int_r3_reg));
6123   match(RegI);
6124   match(iRegINoSp);
6125   op_cost(0);
6126   format %{ %}
6127   interface(REG_INTER);
6128 %}
6129 
6130 
6131 // Register R4 only
6132 operand iRegI_R4()
6133 %{
6134   constraint(ALLOC_IN_RC(int_r4_reg));
6135   match(RegI);
6136   match(iRegINoSp);
6137   op_cost(0);
6138   format %{ %}
6139   interface(REG_INTER);
6140 %}
6141 
6142 
6143 // Pointer Register Operands
6144 // Narrow Pointer Register
6145 operand iRegN()
6146 %{
6147   constraint(ALLOC_IN_RC(any_reg32));
6148   match(RegN);
6149   match(iRegNNoSp);
6150   op_cost(0);
6151   format %{ %}
6152   interface(REG_INTER);
6153 %}
6154 
6155 operand iRegN_R0()
6156 %{
6157   constraint(ALLOC_IN_RC(r0_reg));
6158   match(iRegN);
6159   op_cost(0);
6160   format %{ %}
6161   interface(REG_INTER);
6162 %}
6163 
6164 operand iRegN_R2()
6165 %{
6166   constraint(ALLOC_IN_RC(r2_reg));
6167   match(iRegN);
6168   op_cost(0);
6169   format %{ %}
6170   interface(REG_INTER);
6171 %}
6172 
6173 operand iRegN_R3()
6174 %{
6175   constraint(ALLOC_IN_RC(r3_reg));
6176   match(iRegN);
6177   op_cost(0);
6178   format %{ %}
6179   interface(REG_INTER);
6180 %}
6181 
6182 // Integer 64 bit Register not Special
6183 operand iRegNNoSp()
6184 %{
6185   constraint(ALLOC_IN_RC(no_special_reg32));
6186   match(RegN);
6187   op_cost(0);
6188   format %{ %}
6189   interface(REG_INTER);
6190 %}
6191 
6192 // heap base register -- used for encoding immN0
6193 
6194 operand iRegIHeapbase()
6195 %{
6196   constraint(ALLOC_IN_RC(heapbase_reg));
6197   match(RegI);
6198   op_cost(0);
6199   format %{ %}
6200   interface(REG_INTER);
6201 %}
6202 
6203 // Float Register
6204 // Float register operands
6205 operand vRegF()
6206 %{
6207   constraint(ALLOC_IN_RC(float_reg));
6208   match(RegF);
6209 
6210   op_cost(0);
6211   format %{ %}
6212   interface(REG_INTER);
6213 %}
6214 
6215 // Double Register
6216 // Double register operands
6217 operand vRegD()
6218 %{
6219   constraint(ALLOC_IN_RC(double_reg));
6220   match(RegD);
6221 
6222   op_cost(0);
6223   format %{ %}
6224   interface(REG_INTER);
6225 %}
6226 
6227 operand vecD()
6228 %{
6229   constraint(ALLOC_IN_RC(vectord_reg));
6230   match(VecD);
6231 
6232   op_cost(0);
6233   format %{ %}
6234   interface(REG_INTER);
6235 %}
6236 
6237 operand vecX()
6238 %{
6239   constraint(ALLOC_IN_RC(vectorx_reg));
6240   match(VecX);
6241 
6242   op_cost(0);
6243   format %{ %}
6244   interface(REG_INTER);
6245 %}
6246 
6247 operand vRegD_V0()
6248 %{
6249   constraint(ALLOC_IN_RC(v0_reg));
6250   match(RegD);
6251   op_cost(0);
6252   format %{ %}
6253   interface(REG_INTER);
6254 %}
6255 
6256 operand vRegD_V1()
6257 %{
6258   constraint(ALLOC_IN_RC(v1_reg));
6259   match(RegD);
6260   op_cost(0);
6261   format %{ %}
6262   interface(REG_INTER);
6263 %}
6264 
6265 operand vRegD_V2()
6266 %{
6267   constraint(ALLOC_IN_RC(v2_reg));
6268   match(RegD);
6269   op_cost(0);
6270   format %{ %}
6271   interface(REG_INTER);
6272 %}
6273 
6274 operand vRegD_V3()
6275 %{
6276   constraint(ALLOC_IN_RC(v3_reg));
6277   match(RegD);
6278   op_cost(0);
6279   format %{ %}
6280   interface(REG_INTER);
6281 %}
6282 
6283 // Flags register, used as output of signed compare instructions
6284 
6285 // note that on AArch64 we also use this register as the output for
6286 // for floating point compare instructions (CmpF CmpD). this ensures
6287 // that ordered inequality tests use GT, GE, LT or LE none of which
6288 // pass through cases where the result is unordered i.e. one or both
6289 // inputs to the compare is a NaN. this means that the ideal code can
6290 // replace e.g. a GT with an LE and not end up capturing the NaN case
6291 // (where the comparison should always fail). EQ and NE tests are
6292 // always generated in ideal code so that unordered folds into the NE
6293 // case, matching the behaviour of AArch64 NE.
6294 //
6295 // This differs from x86 where the outputs of FP compares use a
6296 // special FP flags registers and where compares based on this
6297 // register are distinguished into ordered inequalities (cmpOpUCF) and
6298 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6299 // to explicitly handle the unordered case in branches. x86 also has
6300 // to include extra CMoveX rules to accept a cmpOpUCF input.
6301 
6302 operand rFlagsReg()
6303 %{
6304   constraint(ALLOC_IN_RC(int_flags));
6305   match(RegFlags);
6306 
6307   op_cost(0);
6308   format %{ "RFLAGS" %}
6309   interface(REG_INTER);
6310 %}
6311 
6312 // Flags register, used as output of unsigned compare instructions
6313 operand rFlagsRegU()
6314 %{
6315   constraint(ALLOC_IN_RC(int_flags));
6316   match(RegFlags);
6317 
6318   op_cost(0);
6319   format %{ "RFLAGSU" %}
6320   interface(REG_INTER);
6321 %}
6322 
6323 // Special Registers
6324 
6325 // Method Register
6326 operand inline_cache_RegP(iRegP reg)
6327 %{
6328   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6329   match(reg);
6330   match(iRegPNoSp);
6331   op_cost(0);
6332   format %{ %}
6333   interface(REG_INTER);
6334 %}
6335 
6336 operand interpreter_method_oop_RegP(iRegP reg)
6337 %{
6338   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6339   match(reg);
6340   match(iRegPNoSp);
6341   op_cost(0);
6342   format %{ %}
6343   interface(REG_INTER);
6344 %}
6345 
6346 // Thread Register
6347 operand thread_RegP(iRegP reg)
6348 %{
6349   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6350   match(reg);
6351   op_cost(0);
6352   format %{ %}
6353   interface(REG_INTER);
6354 %}
6355 
6356 operand lr_RegP(iRegP reg)
6357 %{
6358   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6359   match(reg);
6360   op_cost(0);
6361   format %{ %}
6362   interface(REG_INTER);
6363 %}
6364 
6365 //----------Memory Operands----------------------------------------------------
6366 
6367 operand indirect(iRegP reg)
6368 %{
6369   constraint(ALLOC_IN_RC(ptr_reg));
6370   match(reg);
6371   op_cost(0);
6372   format %{ "[$reg]" %}
6373   interface(MEMORY_INTER) %{
6374     base($reg);
6375     index(0xffffffff);
6376     scale(0x0);
6377     disp(0x0);
6378   %}
6379 %}
6380 
6381 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6382 %{
6383   constraint(ALLOC_IN_RC(ptr_reg));
6384   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6385   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6386   op_cost(0);
6387   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6388   interface(MEMORY_INTER) %{
6389     base($reg);
6390     index($ireg);
6391     scale($scale);
6392     disp(0x0);
6393   %}
6394 %}
6395 
6396 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6397 %{
6398   constraint(ALLOC_IN_RC(ptr_reg));
6399   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6400   match(AddP reg (LShiftL lreg scale));
6401   op_cost(0);
6402   format %{ "$reg, $lreg lsl($scale)" %}
6403   interface(MEMORY_INTER) %{
6404     base($reg);
6405     index($lreg);
6406     scale($scale);
6407     disp(0x0);
6408   %}
6409 %}
6410 
6411 operand indIndexI2L(iRegP reg, iRegI ireg)
6412 %{
6413   constraint(ALLOC_IN_RC(ptr_reg));
6414   match(AddP reg (ConvI2L ireg));
6415   op_cost(0);
6416   format %{ "$reg, $ireg, 0, I2L" %}
6417   interface(MEMORY_INTER) %{
6418     base($reg);
6419     index($ireg);
6420     scale(0x0);
6421     disp(0x0);
6422   %}
6423 %}
6424 
6425 operand indIndex(iRegP reg, iRegL lreg)
6426 %{
6427   constraint(ALLOC_IN_RC(ptr_reg));
6428   match(AddP reg lreg);
6429   op_cost(0);
6430   format %{ "$reg, $lreg" %}
6431   interface(MEMORY_INTER) %{
6432     base($reg);
6433     index($lreg);
6434     scale(0x0);
6435     disp(0x0);
6436   %}
6437 %}
6438 
6439 operand indOffI(iRegP reg, immIOffset off)
6440 %{
6441   constraint(ALLOC_IN_RC(ptr_reg));
6442   match(AddP reg off);
6443   op_cost(0);
6444   format %{ "[$reg, $off]" %}
6445   interface(MEMORY_INTER) %{
6446     base($reg);
6447     index(0xffffffff);
6448     scale(0x0);
6449     disp($off);
6450   %}
6451 %}
6452 
6453 operand indOffI4(iRegP reg, immIOffset4 off)
6454 %{
6455   constraint(ALLOC_IN_RC(ptr_reg));
6456   match(AddP reg off);
6457   op_cost(0);
6458   format %{ "[$reg, $off]" %}
6459   interface(MEMORY_INTER) %{
6460     base($reg);
6461     index(0xffffffff);
6462     scale(0x0);
6463     disp($off);
6464   %}
6465 %}
6466 
6467 operand indOffI8(iRegP reg, immIOffset8 off)
6468 %{
6469   constraint(ALLOC_IN_RC(ptr_reg));
6470   match(AddP reg off);
6471   op_cost(0);
6472   format %{ "[$reg, $off]" %}
6473   interface(MEMORY_INTER) %{
6474     base($reg);
6475     index(0xffffffff);
6476     scale(0x0);
6477     disp($off);
6478   %}
6479 %}
6480 
6481 operand indOffI16(iRegP reg, immIOffset16 off)
6482 %{
6483   constraint(ALLOC_IN_RC(ptr_reg));
6484   match(AddP reg off);
6485   op_cost(0);
6486   format %{ "[$reg, $off]" %}
6487   interface(MEMORY_INTER) %{
6488     base($reg);
6489     index(0xffffffff);
6490     scale(0x0);
6491     disp($off);
6492   %}
6493 %}
6494 
6495 operand indOffL(iRegP reg, immLoffset off)
6496 %{
6497   constraint(ALLOC_IN_RC(ptr_reg));
6498   match(AddP reg off);
6499   op_cost(0);
6500   format %{ "[$reg, $off]" %}
6501   interface(MEMORY_INTER) %{
6502     base($reg);
6503     index(0xffffffff);
6504     scale(0x0);
6505     disp($off);
6506   %}
6507 %}
6508 
6509 operand indOffL4(iRegP reg, immLoffset4 off)
6510 %{
6511   constraint(ALLOC_IN_RC(ptr_reg));
6512   match(AddP reg off);
6513   op_cost(0);
6514   format %{ "[$reg, $off]" %}
6515   interface(MEMORY_INTER) %{
6516     base($reg);
6517     index(0xffffffff);
6518     scale(0x0);
6519     disp($off);
6520   %}
6521 %}
6522 
6523 operand indOffL8(iRegP reg, immLoffset8 off)
6524 %{
6525   constraint(ALLOC_IN_RC(ptr_reg));
6526   match(AddP reg off);
6527   op_cost(0);
6528   format %{ "[$reg, $off]" %}
6529   interface(MEMORY_INTER) %{
6530     base($reg);
6531     index(0xffffffff);
6532     scale(0x0);
6533     disp($off);
6534   %}
6535 %}
6536 
6537 operand indOffL16(iRegP reg, immLoffset16 off)
6538 %{
6539   constraint(ALLOC_IN_RC(ptr_reg));
6540   match(AddP reg off);
6541   op_cost(0);
6542   format %{ "[$reg, $off]" %}
6543   interface(MEMORY_INTER) %{
6544     base($reg);
6545     index(0xffffffff);
6546     scale(0x0);
6547     disp($off);
6548   %}
6549 %}
6550 
6551 operand indirectN(iRegN reg)
6552 %{
6553   predicate(Universe::narrow_oop_shift() == 0);
6554   constraint(ALLOC_IN_RC(ptr_reg));
6555   match(DecodeN reg);
6556   op_cost(0);
6557   format %{ "[$reg]\t# narrow" %}
6558   interface(MEMORY_INTER) %{
6559     base($reg);
6560     index(0xffffffff);
6561     scale(0x0);
6562     disp(0x0);
6563   %}
6564 %}
6565 
6566 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6567 %{
6568   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6569   constraint(ALLOC_IN_RC(ptr_reg));
6570   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6571   op_cost(0);
6572   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6573   interface(MEMORY_INTER) %{
6574     base($reg);
6575     index($ireg);
6576     scale($scale);
6577     disp(0x0);
6578   %}
6579 %}
6580 
6581 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6582 %{
6583   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6584   constraint(ALLOC_IN_RC(ptr_reg));
6585   match(AddP (DecodeN reg) (LShiftL lreg scale));
6586   op_cost(0);
6587   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6588   interface(MEMORY_INTER) %{
6589     base($reg);
6590     index($lreg);
6591     scale($scale);
6592     disp(0x0);
6593   %}
6594 %}
6595 
6596 operand indIndexI2LN(iRegN reg, iRegI ireg)
6597 %{
6598   predicate(Universe::narrow_oop_shift() == 0);
6599   constraint(ALLOC_IN_RC(ptr_reg));
6600   match(AddP (DecodeN reg) (ConvI2L ireg));
6601   op_cost(0);
6602   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6603   interface(MEMORY_INTER) %{
6604     base($reg);
6605     index($ireg);
6606     scale(0x0);
6607     disp(0x0);
6608   %}
6609 %}
6610 
6611 operand indIndexN(iRegN reg, iRegL lreg)
6612 %{
6613   predicate(Universe::narrow_oop_shift() == 0);
6614   constraint(ALLOC_IN_RC(ptr_reg));
6615   match(AddP (DecodeN reg) lreg);
6616   op_cost(0);
6617   format %{ "$reg, $lreg\t# narrow" %}
6618   interface(MEMORY_INTER) %{
6619     base($reg);
6620     index($lreg);
6621     scale(0x0);
6622     disp(0x0);
6623   %}
6624 %}
6625 
6626 operand indOffIN(iRegN reg, immIOffset off)
6627 %{
6628   predicate(Universe::narrow_oop_shift() == 0);
6629   constraint(ALLOC_IN_RC(ptr_reg));
6630   match(AddP (DecodeN reg) off);
6631   op_cost(0);
6632   format %{ "[$reg, $off]\t# narrow" %}
6633   interface(MEMORY_INTER) %{
6634     base($reg);
6635     index(0xffffffff);
6636     scale(0x0);
6637     disp($off);
6638   %}
6639 %}
6640 
6641 operand indOffLN(iRegN reg, immLoffset off)
6642 %{
6643   predicate(Universe::narrow_oop_shift() == 0);
6644   constraint(ALLOC_IN_RC(ptr_reg));
6645   match(AddP (DecodeN reg) off);
6646   op_cost(0);
6647   format %{ "[$reg, $off]\t# narrow" %}
6648   interface(MEMORY_INTER) %{
6649     base($reg);
6650     index(0xffffffff);
6651     scale(0x0);
6652     disp($off);
6653   %}
6654 %}
6655 
6656 
6657 
6658 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6659 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6660 %{
6661   constraint(ALLOC_IN_RC(ptr_reg));
6662   match(AddP reg off);
6663   op_cost(0);
6664   format %{ "[$reg, $off]" %}
6665   interface(MEMORY_INTER) %{
6666     base($reg);
6667     index(0xffffffff);
6668     scale(0x0);
6669     disp($off);
6670   %}
6671 %}
6672 
6673 //----------Special Memory Operands--------------------------------------------
6674 // Stack Slot Operand - This operand is used for loading and storing temporary
6675 //                      values on the stack where a match requires a value to
6676 //                      flow through memory.
6677 operand stackSlotP(sRegP reg)
6678 %{
6679   constraint(ALLOC_IN_RC(stack_slots));
6680   op_cost(100);
6681   // No match rule because this operand is only generated in matching
6682   // match(RegP);
6683   format %{ "[$reg]" %}
6684   interface(MEMORY_INTER) %{
6685     base(0x1e);  // RSP
6686     index(0x0);  // No Index
6687     scale(0x0);  // No Scale
6688     disp($reg);  // Stack Offset
6689   %}
6690 %}
6691 
6692 operand stackSlotI(sRegI reg)
6693 %{
6694   constraint(ALLOC_IN_RC(stack_slots));
6695   // No match rule because this operand is only generated in matching
6696   // match(RegI);
6697   format %{ "[$reg]" %}
6698   interface(MEMORY_INTER) %{
6699     base(0x1e);  // RSP
6700     index(0x0);  // No Index
6701     scale(0x0);  // No Scale
6702     disp($reg);  // Stack Offset
6703   %}
6704 %}
6705 
6706 operand stackSlotF(sRegF reg)
6707 %{
6708   constraint(ALLOC_IN_RC(stack_slots));
6709   // No match rule because this operand is only generated in matching
6710   // match(RegF);
6711   format %{ "[$reg]" %}
6712   interface(MEMORY_INTER) %{
6713     base(0x1e);  // RSP
6714     index(0x0);  // No Index
6715     scale(0x0);  // No Scale
6716     disp($reg);  // Stack Offset
6717   %}
6718 %}
6719 
6720 operand stackSlotD(sRegD reg)
6721 %{
6722   constraint(ALLOC_IN_RC(stack_slots));
6723   // No match rule because this operand is only generated in matching
6724   // match(RegD);
6725   format %{ "[$reg]" %}
6726   interface(MEMORY_INTER) %{
6727     base(0x1e);  // RSP
6728     index(0x0);  // No Index
6729     scale(0x0);  // No Scale
6730     disp($reg);  // Stack Offset
6731   %}
6732 %}
6733 
6734 operand stackSlotL(sRegL reg)
6735 %{
6736   constraint(ALLOC_IN_RC(stack_slots));
6737   // No match rule because this operand is only generated in matching
6738   // match(RegL);
6739   format %{ "[$reg]" %}
6740   interface(MEMORY_INTER) %{
6741     base(0x1e);  // RSP
6742     index(0x0);  // No Index
6743     scale(0x0);  // No Scale
6744     disp($reg);  // Stack Offset
6745   %}
6746 %}
6747 
6748 // Operands for expressing Control Flow
6749 // NOTE: Label is a predefined operand which should not be redefined in
6750 //       the AD file. It is generically handled within the ADLC.
6751 
6752 //----------Conditional Branch Operands----------------------------------------
6753 // Comparison Op  - This is the operation of the comparison, and is limited to
6754 //                  the following set of codes:
6755 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6756 //
6757 // Other attributes of the comparison, such as unsignedness, are specified
6758 // by the comparison instruction that sets a condition code flags register.
6759 // That result is represented by a flags operand whose subtype is appropriate
6760 // to the unsignedness (etc.) of the comparison.
6761 //
6762 // Later, the instruction which matches both the Comparison Op (a Bool) and
6763 // the flags (produced by the Cmp) specifies the coding of the comparison op
6764 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6765 
6766 // used for signed integral comparisons and fp comparisons
6767 
6768 operand cmpOp()
6769 %{
6770   match(Bool);
6771 
6772   format %{ "" %}
6773   interface(COND_INTER) %{
6774     equal(0x0, "eq");
6775     not_equal(0x1, "ne");
6776     less(0xb, "lt");
6777     greater_equal(0xa, "ge");
6778     less_equal(0xd, "le");
6779     greater(0xc, "gt");
6780     overflow(0x6, "vs");
6781     no_overflow(0x7, "vc");
6782   %}
6783 %}
6784 
6785 // used for unsigned integral comparisons
6786 
6787 operand cmpOpU()
6788 %{
6789   match(Bool);
6790 
6791   format %{ "" %}
6792   interface(COND_INTER) %{
6793     equal(0x0, "eq");
6794     not_equal(0x1, "ne");
6795     less(0x3, "lo");
6796     greater_equal(0x2, "hs");
6797     less_equal(0x9, "ls");
6798     greater(0x8, "hi");
6799     overflow(0x6, "vs");
6800     no_overflow(0x7, "vc");
6801   %}
6802 %}
6803 
6804 // used for certain integral comparisons which can be
6805 // converted to cbxx or tbxx instructions
6806 
6807 operand cmpOpEqNe()
6808 %{
6809   match(Bool);
6810   match(CmpOp);
6811   op_cost(0);
6812   predicate(n->as_Bool()->_test._test == BoolTest::ne
6813             || n->as_Bool()->_test._test == BoolTest::eq);
6814 
6815   format %{ "" %}
6816   interface(COND_INTER) %{
6817     equal(0x0, "eq");
6818     not_equal(0x1, "ne");
6819     less(0xb, "lt");
6820     greater_equal(0xa, "ge");
6821     less_equal(0xd, "le");
6822     greater(0xc, "gt");
6823     overflow(0x6, "vs");
6824     no_overflow(0x7, "vc");
6825   %}
6826 %}
6827 
6828 // used for certain integral comparisons which can be
6829 // converted to cbxx or tbxx instructions
6830 
6831 operand cmpOpLtGe()
6832 %{
6833   match(Bool);
6834   match(CmpOp);
6835   op_cost(0);
6836 
6837   predicate(n->as_Bool()->_test._test == BoolTest::lt
6838             || n->as_Bool()->_test._test == BoolTest::ge);
6839 
6840   format %{ "" %}
6841   interface(COND_INTER) %{
6842     equal(0x0, "eq");
6843     not_equal(0x1, "ne");
6844     less(0xb, "lt");
6845     greater_equal(0xa, "ge");
6846     less_equal(0xd, "le");
6847     greater(0xc, "gt");
6848     overflow(0x6, "vs");
6849     no_overflow(0x7, "vc");
6850   %}
6851 %}
6852 
6853 // used for certain unsigned integral comparisons which can be
6854 // converted to cbxx or tbxx instructions
6855 
6856 operand cmpOpUEqNeLtGe()
6857 %{
6858   match(Bool);
6859   match(CmpOp);
6860   op_cost(0);
6861 
6862   predicate(n->as_Bool()->_test._test == BoolTest::eq
6863             || n->as_Bool()->_test._test == BoolTest::ne
6864             || n->as_Bool()->_test._test == BoolTest::lt
6865             || n->as_Bool()->_test._test == BoolTest::ge);
6866 
6867   format %{ "" %}
6868   interface(COND_INTER) %{
6869     equal(0x0, "eq");
6870     not_equal(0x1, "ne");
6871     less(0xb, "lt");
6872     greater_equal(0xa, "ge");
6873     less_equal(0xd, "le");
6874     greater(0xc, "gt");
6875     overflow(0x6, "vs");
6876     no_overflow(0x7, "vc");
6877   %}
6878 %}
6879 
6880 // Special operand allowing long args to int ops to be truncated for free
6881 
6882 operand iRegL2I(iRegL reg) %{
6883 
6884   op_cost(0);
6885 
6886   match(ConvL2I reg);
6887 
6888   format %{ "l2i($reg)" %}
6889 
6890   interface(REG_INTER)
6891 %}
6892 
6893 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6894 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6895 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6896 
6897 //----------OPERAND CLASSES----------------------------------------------------
6898 // Operand Classes are groups of operands that are used as to simplify
6899 // instruction definitions by not requiring the AD writer to specify
6900 // separate instructions for every form of operand when the
6901 // instruction accepts multiple operand types with the same basic
6902 // encoding and format. The classic case of this is memory operands.
6903 
6904 // memory is used to define read/write location for load/store
6905 // instruction defs. we can turn a memory op into an Address
6906 
6907 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6908                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6909 
6910 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6911 // operations. it allows the src to be either an iRegI or a (ConvL2I
6912 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6913 // can be elided because the 32-bit instruction will just employ the
6914 // lower 32 bits anyway.
6915 //
6916 // n.b. this does not elide all L2I conversions. if the truncated
6917 // value is consumed by more than one operation then the ConvL2I
6918 // cannot be bundled into the consuming nodes so an l2i gets planted
6919 // (actually a movw $dst $src) and the downstream instructions consume
6920 // the result of the l2i as an iRegI input. That's a shame since the
6921 // movw is actually redundant but its not too costly.
6922 
6923 opclass iRegIorL2I(iRegI, iRegL2I);
6924 
6925 //----------PIPELINE-----------------------------------------------------------
6926 // Rules which define the behavior of the target architectures pipeline.
6927 
6928 // For specific pipelines, eg A53, define the stages of that pipeline
6929 //pipe_desc(ISS, EX1, EX2, WR);
6930 #define ISS S0
6931 #define EX1 S1
6932 #define EX2 S2
6933 #define WR  S3
6934 
6935 // Integer ALU reg operation
6936 pipeline %{
6937 
6938 attributes %{
6939   // ARM instructions are of fixed length
6940   fixed_size_instructions;        // Fixed size instructions TODO does
6941   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6942   // ARM instructions come in 32-bit word units
6943   instruction_unit_size = 4;         // An instruction is 4 bytes long
6944   instruction_fetch_unit_size = 64;  // The processor fetches one line
6945   instruction_fetch_units = 1;       // of 64 bytes
6946 
6947   // List of nop instructions
6948   nops( MachNop );
6949 %}
6950 
6951 // We don't use an actual pipeline model so don't care about resources
6952 // or description. we do use pipeline classes to introduce fixed
6953 // latencies
6954 
6955 //----------RESOURCES----------------------------------------------------------
6956 // Resources are the functional units available to the machine
6957 
6958 resources( INS0, INS1, INS01 = INS0 | INS1,
6959            ALU0, ALU1, ALU = ALU0 | ALU1,
6960            MAC,
6961            DIV,
6962            BRANCH,
6963            LDST,
6964            NEON_FP);
6965 
6966 //----------PIPELINE DESCRIPTION-----------------------------------------------
6967 // Pipeline Description specifies the stages in the machine's pipeline
6968 
6969 // Define the pipeline as a generic 6 stage pipeline
6970 pipe_desc(S0, S1, S2, S3, S4, S5);
6971 
6972 //----------PIPELINE CLASSES---------------------------------------------------
6973 // Pipeline Classes describe the stages in which input and output are
6974 // referenced by the hardware pipeline.
6975 
6976 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6977 %{
6978   single_instruction;
6979   src1   : S1(read);
6980   src2   : S2(read);
6981   dst    : S5(write);
6982   INS01  : ISS;
6983   NEON_FP : S5;
6984 %}
6985 
6986 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6987 %{
6988   single_instruction;
6989   src1   : S1(read);
6990   src2   : S2(read);
6991   dst    : S5(write);
6992   INS01  : ISS;
6993   NEON_FP : S5;
6994 %}
6995 
6996 pipe_class fp_uop_s(vRegF dst, vRegF src)
6997 %{
6998   single_instruction;
6999   src    : S1(read);
7000   dst    : S5(write);
7001   INS01  : ISS;
7002   NEON_FP : S5;
7003 %}
7004 
7005 pipe_class fp_uop_d(vRegD dst, vRegD src)
7006 %{
7007   single_instruction;
7008   src    : S1(read);
7009   dst    : S5(write);
7010   INS01  : ISS;
7011   NEON_FP : S5;
7012 %}
7013 
7014 pipe_class fp_d2f(vRegF dst, vRegD src)
7015 %{
7016   single_instruction;
7017   src    : S1(read);
7018   dst    : S5(write);
7019   INS01  : ISS;
7020   NEON_FP : S5;
7021 %}
7022 
7023 pipe_class fp_f2d(vRegD dst, vRegF src)
7024 %{
7025   single_instruction;
7026   src    : S1(read);
7027   dst    : S5(write);
7028   INS01  : ISS;
7029   NEON_FP : S5;
7030 %}
7031 
7032 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7033 %{
7034   single_instruction;
7035   src    : S1(read);
7036   dst    : S5(write);
7037   INS01  : ISS;
7038   NEON_FP : S5;
7039 %}
7040 
7041 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7042 %{
7043   single_instruction;
7044   src    : S1(read);
7045   dst    : S5(write);
7046   INS01  : ISS;
7047   NEON_FP : S5;
7048 %}
7049 
7050 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7051 %{
7052   single_instruction;
7053   src    : S1(read);
7054   dst    : S5(write);
7055   INS01  : ISS;
7056   NEON_FP : S5;
7057 %}
7058 
7059 pipe_class fp_l2f(vRegF dst, iRegL src)
7060 %{
7061   single_instruction;
7062   src    : S1(read);
7063   dst    : S5(write);
7064   INS01  : ISS;
7065   NEON_FP : S5;
7066 %}
7067 
7068 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7069 %{
7070   single_instruction;
7071   src    : S1(read);
7072   dst    : S5(write);
7073   INS01  : ISS;
7074   NEON_FP : S5;
7075 %}
7076 
7077 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7078 %{
7079   single_instruction;
7080   src    : S1(read);
7081   dst    : S5(write);
7082   INS01  : ISS;
7083   NEON_FP : S5;
7084 %}
7085 
7086 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7087 %{
7088   single_instruction;
7089   src    : S1(read);
7090   dst    : S5(write);
7091   INS01  : ISS;
7092   NEON_FP : S5;
7093 %}
7094 
7095 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7096 %{
7097   single_instruction;
7098   src    : S1(read);
7099   dst    : S5(write);
7100   INS01  : ISS;
7101   NEON_FP : S5;
7102 %}
7103 
7104 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7105 %{
7106   single_instruction;
7107   src1   : S1(read);
7108   src2   : S2(read);
7109   dst    : S5(write);
7110   INS0   : ISS;
7111   NEON_FP : S5;
7112 %}
7113 
7114 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7115 %{
7116   single_instruction;
7117   src1   : S1(read);
7118   src2   : S2(read);
7119   dst    : S5(write);
7120   INS0   : ISS;
7121   NEON_FP : S5;
7122 %}
7123 
7124 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7125 %{
7126   single_instruction;
7127   cr     : S1(read);
7128   src1   : S1(read);
7129   src2   : S1(read);
7130   dst    : S3(write);
7131   INS01  : ISS;
7132   NEON_FP : S3;
7133 %}
7134 
7135 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7136 %{
7137   single_instruction;
7138   cr     : S1(read);
7139   src1   : S1(read);
7140   src2   : S1(read);
7141   dst    : S3(write);
7142   INS01  : ISS;
7143   NEON_FP : S3;
7144 %}
7145 
7146 pipe_class fp_imm_s(vRegF dst)
7147 %{
7148   single_instruction;
7149   dst    : S3(write);
7150   INS01  : ISS;
7151   NEON_FP : S3;
7152 %}
7153 
7154 pipe_class fp_imm_d(vRegD dst)
7155 %{
7156   single_instruction;
7157   dst    : S3(write);
7158   INS01  : ISS;
7159   NEON_FP : S3;
7160 %}
7161 
7162 pipe_class fp_load_constant_s(vRegF dst)
7163 %{
7164   single_instruction;
7165   dst    : S4(write);
7166   INS01  : ISS;
7167   NEON_FP : S4;
7168 %}
7169 
7170 pipe_class fp_load_constant_d(vRegD dst)
7171 %{
7172   single_instruction;
7173   dst    : S4(write);
7174   INS01  : ISS;
7175   NEON_FP : S4;
7176 %}
7177 
7178 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7179 %{
7180   single_instruction;
7181   dst    : S5(write);
7182   src1   : S1(read);
7183   src2   : S1(read);
7184   INS01  : ISS;
7185   NEON_FP : S5;
7186 %}
7187 
7188 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7189 %{
7190   single_instruction;
7191   dst    : S5(write);
7192   src1   : S1(read);
7193   src2   : S1(read);
7194   INS0   : ISS;
7195   NEON_FP : S5;
7196 %}
7197 
7198 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7199 %{
7200   single_instruction;
7201   dst    : S5(write);
7202   src1   : S1(read);
7203   src2   : S1(read);
7204   dst    : S1(read);
7205   INS01  : ISS;
7206   NEON_FP : S5;
7207 %}
7208 
7209 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7210 %{
7211   single_instruction;
7212   dst    : S5(write);
7213   src1   : S1(read);
7214   src2   : S1(read);
7215   dst    : S1(read);
7216   INS0   : ISS;
7217   NEON_FP : S5;
7218 %}
7219 
7220 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7221 %{
7222   single_instruction;
7223   dst    : S4(write);
7224   src1   : S2(read);
7225   src2   : S2(read);
7226   INS01  : ISS;
7227   NEON_FP : S4;
7228 %}
7229 
7230 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7231 %{
7232   single_instruction;
7233   dst    : S4(write);
7234   src1   : S2(read);
7235   src2   : S2(read);
7236   INS0   : ISS;
7237   NEON_FP : S4;
7238 %}
7239 
7240 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7241 %{
7242   single_instruction;
7243   dst    : S3(write);
7244   src1   : S2(read);
7245   src2   : S2(read);
7246   INS01  : ISS;
7247   NEON_FP : S3;
7248 %}
7249 
7250 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7251 %{
7252   single_instruction;
7253   dst    : S3(write);
7254   src1   : S2(read);
7255   src2   : S2(read);
7256   INS0   : ISS;
7257   NEON_FP : S3;
7258 %}
7259 
7260 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7261 %{
7262   single_instruction;
7263   dst    : S3(write);
7264   src    : S1(read);
7265   shift  : S1(read);
7266   INS01  : ISS;
7267   NEON_FP : S3;
7268 %}
7269 
7270 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7271 %{
7272   single_instruction;
7273   dst    : S3(write);
7274   src    : S1(read);
7275   shift  : S1(read);
7276   INS0   : ISS;
7277   NEON_FP : S3;
7278 %}
7279 
7280 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7281 %{
7282   single_instruction;
7283   dst    : S3(write);
7284   src    : S1(read);
7285   INS01  : ISS;
7286   NEON_FP : S3;
7287 %}
7288 
7289 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7290 %{
7291   single_instruction;
7292   dst    : S3(write);
7293   src    : S1(read);
7294   INS0   : ISS;
7295   NEON_FP : S3;
7296 %}
7297 
7298 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7299 %{
7300   single_instruction;
7301   dst    : S5(write);
7302   src1   : S1(read);
7303   src2   : S1(read);
7304   INS01  : ISS;
7305   NEON_FP : S5;
7306 %}
7307 
7308 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7309 %{
7310   single_instruction;
7311   dst    : S5(write);
7312   src1   : S1(read);
7313   src2   : S1(read);
7314   INS0   : ISS;
7315   NEON_FP : S5;
7316 %}
7317 
7318 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7319 %{
7320   single_instruction;
7321   dst    : S5(write);
7322   src1   : S1(read);
7323   src2   : S1(read);
7324   INS0   : ISS;
7325   NEON_FP : S5;
7326 %}
7327 
7328 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7329 %{
7330   single_instruction;
7331   dst    : S5(write);
7332   src1   : S1(read);
7333   src2   : S1(read);
7334   INS0   : ISS;
7335   NEON_FP : S5;
7336 %}
7337 
7338 pipe_class vsqrt_fp128(vecX dst, vecX src)
7339 %{
7340   single_instruction;
7341   dst    : S5(write);
7342   src    : S1(read);
7343   INS0   : ISS;
7344   NEON_FP : S5;
7345 %}
7346 
7347 pipe_class vunop_fp64(vecD dst, vecD src)
7348 %{
7349   single_instruction;
7350   dst    : S5(write);
7351   src    : S1(read);
7352   INS01  : ISS;
7353   NEON_FP : S5;
7354 %}
7355 
7356 pipe_class vunop_fp128(vecX dst, vecX src)
7357 %{
7358   single_instruction;
7359   dst    : S5(write);
7360   src    : S1(read);
7361   INS0   : ISS;
7362   NEON_FP : S5;
7363 %}
7364 
7365 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7366 %{
7367   single_instruction;
7368   dst    : S3(write);
7369   src    : S1(read);
7370   INS01  : ISS;
7371   NEON_FP : S3;
7372 %}
7373 
7374 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7375 %{
7376   single_instruction;
7377   dst    : S3(write);
7378   src    : S1(read);
7379   INS01  : ISS;
7380   NEON_FP : S3;
7381 %}
7382 
7383 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7384 %{
7385   single_instruction;
7386   dst    : S3(write);
7387   src    : S1(read);
7388   INS01  : ISS;
7389   NEON_FP : S3;
7390 %}
7391 
7392 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7393 %{
7394   single_instruction;
7395   dst    : S3(write);
7396   src    : S1(read);
7397   INS01  : ISS;
7398   NEON_FP : S3;
7399 %}
7400 
7401 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7402 %{
7403   single_instruction;
7404   dst    : S3(write);
7405   src    : S1(read);
7406   INS01  : ISS;
7407   NEON_FP : S3;
7408 %}
7409 
7410 pipe_class vmovi_reg_imm64(vecD dst)
7411 %{
7412   single_instruction;
7413   dst    : S3(write);
7414   INS01  : ISS;
7415   NEON_FP : S3;
7416 %}
7417 
7418 pipe_class vmovi_reg_imm128(vecX dst)
7419 %{
7420   single_instruction;
7421   dst    : S3(write);
7422   INS0   : ISS;
7423   NEON_FP : S3;
7424 %}
7425 
7426 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7427 %{
7428   single_instruction;
7429   dst    : S5(write);
7430   mem    : ISS(read);
7431   INS01  : ISS;
7432   NEON_FP : S3;
7433 %}
7434 
7435 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7436 %{
7437   single_instruction;
7438   dst    : S5(write);
7439   mem    : ISS(read);
7440   INS01  : ISS;
7441   NEON_FP : S3;
7442 %}
7443 
7444 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7445 %{
7446   single_instruction;
7447   mem    : ISS(read);
7448   src    : S2(read);
7449   INS01  : ISS;
7450   NEON_FP : S3;
7451 %}
7452 
7453 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7454 %{
7455   single_instruction;
7456   mem    : ISS(read);
7457   src    : S2(read);
7458   INS01  : ISS;
7459   NEON_FP : S3;
7460 %}
7461 
7462 //------- Integer ALU operations --------------------------
7463 
7464 // Integer ALU reg-reg operation
7465 // Operands needed in EX1, result generated in EX2
7466 // Eg.  ADD     x0, x1, x2
7467 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7468 %{
7469   single_instruction;
7470   dst    : EX2(write);
7471   src1   : EX1(read);
7472   src2   : EX1(read);
7473   INS01  : ISS; // Dual issue as instruction 0 or 1
7474   ALU    : EX2;
7475 %}
7476 
7477 // Integer ALU reg-reg operation with constant shift
7478 // Shifted register must be available in LATE_ISS instead of EX1
7479 // Eg.  ADD     x0, x1, x2, LSL #2
7480 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7481 %{
7482   single_instruction;
7483   dst    : EX2(write);
7484   src1   : EX1(read);
7485   src2   : ISS(read);
7486   INS01  : ISS;
7487   ALU    : EX2;
7488 %}
7489 
7490 // Integer ALU reg operation with constant shift
7491 // Eg.  LSL     x0, x1, #shift
7492 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7493 %{
7494   single_instruction;
7495   dst    : EX2(write);
7496   src1   : ISS(read);
7497   INS01  : ISS;
7498   ALU    : EX2;
7499 %}
7500 
7501 // Integer ALU reg-reg operation with variable shift
7502 // Both operands must be available in LATE_ISS instead of EX1
7503 // Result is available in EX1 instead of EX2
7504 // Eg.  LSLV    x0, x1, x2
7505 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7506 %{
7507   single_instruction;
7508   dst    : EX1(write);
7509   src1   : ISS(read);
7510   src2   : ISS(read);
7511   INS01  : ISS;
7512   ALU    : EX1;
7513 %}
7514 
7515 // Integer ALU reg-reg operation with extract
7516 // As for _vshift above, but result generated in EX2
7517 // Eg.  EXTR    x0, x1, x2, #N
7518 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7519 %{
7520   single_instruction;
7521   dst    : EX2(write);
7522   src1   : ISS(read);
7523   src2   : ISS(read);
7524   INS1   : ISS; // Can only dual issue as Instruction 1
7525   ALU    : EX1;
7526 %}
7527 
7528 // Integer ALU reg operation
7529 // Eg.  NEG     x0, x1
7530 pipe_class ialu_reg(iRegI dst, iRegI src)
7531 %{
7532   single_instruction;
7533   dst    : EX2(write);
7534   src    : EX1(read);
7535   INS01  : ISS;
7536   ALU    : EX2;
7537 %}
7538 
7539 // Integer ALU reg mmediate operation
7540 // Eg.  ADD     x0, x1, #N
7541 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7542 %{
7543   single_instruction;
7544   dst    : EX2(write);
7545   src1   : EX1(read);
7546   INS01  : ISS;
7547   ALU    : EX2;
7548 %}
7549 
7550 // Integer ALU immediate operation (no source operands)
7551 // Eg.  MOV     x0, #N
7552 pipe_class ialu_imm(iRegI dst)
7553 %{
7554   single_instruction;
7555   dst    : EX1(write);
7556   INS01  : ISS;
7557   ALU    : EX1;
7558 %}
7559 
7560 //------- Compare operation -------------------------------
7561 
7562 // Compare reg-reg
7563 // Eg.  CMP     x0, x1
7564 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7565 %{
7566   single_instruction;
7567 //  fixed_latency(16);
7568   cr     : EX2(write);
7569   op1    : EX1(read);
7570   op2    : EX1(read);
7571   INS01  : ISS;
7572   ALU    : EX2;
7573 %}
7574 
7575 // Compare reg-reg
7576 // Eg.  CMP     x0, #N
7577 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7578 %{
7579   single_instruction;
7580 //  fixed_latency(16);
7581   cr     : EX2(write);
7582   op1    : EX1(read);
7583   INS01  : ISS;
7584   ALU    : EX2;
7585 %}
7586 
7587 //------- Conditional instructions ------------------------
7588 
7589 // Conditional no operands
7590 // Eg.  CSINC   x0, zr, zr, <cond>
7591 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7592 %{
7593   single_instruction;
7594   cr     : EX1(read);
7595   dst    : EX2(write);
7596   INS01  : ISS;
7597   ALU    : EX2;
7598 %}
7599 
7600 // Conditional 2 operand
7601 // EG.  CSEL    X0, X1, X2, <cond>
7602 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7603 %{
7604   single_instruction;
7605   cr     : EX1(read);
7606   src1   : EX1(read);
7607   src2   : EX1(read);
7608   dst    : EX2(write);
7609   INS01  : ISS;
7610   ALU    : EX2;
7611 %}
7612 
7613 // Conditional 2 operand
7614 // EG.  CSEL    X0, X1, X2, <cond>
7615 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7616 %{
7617   single_instruction;
7618   cr     : EX1(read);
7619   src    : EX1(read);
7620   dst    : EX2(write);
7621   INS01  : ISS;
7622   ALU    : EX2;
7623 %}
7624 
7625 //------- Multiply pipeline operations --------------------
7626 
7627 // Multiply reg-reg
7628 // Eg.  MUL     w0, w1, w2
7629 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7630 %{
7631   single_instruction;
7632   dst    : WR(write);
7633   src1   : ISS(read);
7634   src2   : ISS(read);
7635   INS01  : ISS;
7636   MAC    : WR;
7637 %}
7638 
7639 // Multiply accumulate
7640 // Eg.  MADD    w0, w1, w2, w3
7641 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7642 %{
7643   single_instruction;
7644   dst    : WR(write);
7645   src1   : ISS(read);
7646   src2   : ISS(read);
7647   src3   : ISS(read);
7648   INS01  : ISS;
7649   MAC    : WR;
7650 %}
7651 
7652 // Eg.  MUL     w0, w1, w2
7653 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7654 %{
7655   single_instruction;
7656   fixed_latency(3); // Maximum latency for 64 bit mul
7657   dst    : WR(write);
7658   src1   : ISS(read);
7659   src2   : ISS(read);
7660   INS01  : ISS;
7661   MAC    : WR;
7662 %}
7663 
7664 // Multiply accumulate
7665 // Eg.  MADD    w0, w1, w2, w3
7666 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7667 %{
7668   single_instruction;
7669   fixed_latency(3); // Maximum latency for 64 bit mul
7670   dst    : WR(write);
7671   src1   : ISS(read);
7672   src2   : ISS(read);
7673   src3   : ISS(read);
7674   INS01  : ISS;
7675   MAC    : WR;
7676 %}
7677 
7678 //------- Divide pipeline operations --------------------
7679 
7680 // Eg.  SDIV    w0, w1, w2
7681 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7682 %{
7683   single_instruction;
7684   fixed_latency(8); // Maximum latency for 32 bit divide
7685   dst    : WR(write);
7686   src1   : ISS(read);
7687   src2   : ISS(read);
7688   INS0   : ISS; // Can only dual issue as instruction 0
7689   DIV    : WR;
7690 %}
7691 
7692 // Eg.  SDIV    x0, x1, x2
7693 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7694 %{
7695   single_instruction;
7696   fixed_latency(16); // Maximum latency for 64 bit divide
7697   dst    : WR(write);
7698   src1   : ISS(read);
7699   src2   : ISS(read);
7700   INS0   : ISS; // Can only dual issue as instruction 0
7701   DIV    : WR;
7702 %}
7703 
7704 //------- Load pipeline operations ------------------------
7705 
7706 // Load - prefetch
7707 // Eg.  PFRM    <mem>
7708 pipe_class iload_prefetch(memory mem)
7709 %{
7710   single_instruction;
7711   mem    : ISS(read);
7712   INS01  : ISS;
7713   LDST   : WR;
7714 %}
7715 
7716 // Load - reg, mem
7717 // Eg.  LDR     x0, <mem>
7718 pipe_class iload_reg_mem(iRegI dst, memory mem)
7719 %{
7720   single_instruction;
7721   dst    : WR(write);
7722   mem    : ISS(read);
7723   INS01  : ISS;
7724   LDST   : WR;
7725 %}
7726 
7727 // Load - reg, reg
7728 // Eg.  LDR     x0, [sp, x1]
7729 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7730 %{
7731   single_instruction;
7732   dst    : WR(write);
7733   src    : ISS(read);
7734   INS01  : ISS;
7735   LDST   : WR;
7736 %}
7737 
7738 //------- Store pipeline operations -----------------------
7739 
7740 // Store - zr, mem
7741 // Eg.  STR     zr, <mem>
7742 pipe_class istore_mem(memory mem)
7743 %{
7744   single_instruction;
7745   mem    : ISS(read);
7746   INS01  : ISS;
7747   LDST   : WR;
7748 %}
7749 
7750 // Store - reg, mem
7751 // Eg.  STR     x0, <mem>
7752 pipe_class istore_reg_mem(iRegI src, memory mem)
7753 %{
7754   single_instruction;
7755   mem    : ISS(read);
7756   src    : EX2(read);
7757   INS01  : ISS;
7758   LDST   : WR;
7759 %}
7760 
7761 // Store - reg, reg
7762 // Eg. STR      x0, [sp, x1]
7763 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7764 %{
7765   single_instruction;
7766   dst    : ISS(read);
7767   src    : EX2(read);
7768   INS01  : ISS;
7769   LDST   : WR;
7770 %}
7771 
7772 //------- Store pipeline operations -----------------------
7773 
7774 // Branch
7775 pipe_class pipe_branch()
7776 %{
7777   single_instruction;
7778   INS01  : ISS;
7779   BRANCH : EX1;
7780 %}
7781 
7782 // Conditional branch
7783 pipe_class pipe_branch_cond(rFlagsReg cr)
7784 %{
7785   single_instruction;
7786   cr     : EX1(read);
7787   INS01  : ISS;
7788   BRANCH : EX1;
7789 %}
7790 
7791 // Compare & Branch
7792 // EG.  CBZ/CBNZ
7793 pipe_class pipe_cmp_branch(iRegI op1)
7794 %{
7795   single_instruction;
7796   op1    : EX1(read);
7797   INS01  : ISS;
7798   BRANCH : EX1;
7799 %}
7800 
7801 //------- Synchronisation operations ----------------------
7802 
7803 // Any operation requiring serialization.
7804 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7805 pipe_class pipe_serial()
7806 %{
7807   single_instruction;
7808   force_serialization;
7809   fixed_latency(16);
7810   INS01  : ISS(2); // Cannot dual issue with any other instruction
7811   LDST   : WR;
7812 %}
7813 
7814 // Generic big/slow expanded idiom - also serialized
7815 pipe_class pipe_slow()
7816 %{
7817   instruction_count(10);
7818   multiple_bundles;
7819   force_serialization;
7820   fixed_latency(16);
7821   INS01  : ISS(2); // Cannot dual issue with any other instruction
7822   LDST   : WR;
7823 %}
7824 
7825 // Empty pipeline class
7826 pipe_class pipe_class_empty()
7827 %{
7828   single_instruction;
7829   fixed_latency(0);
7830 %}
7831 
7832 // Default pipeline class.
7833 pipe_class pipe_class_default()
7834 %{
7835   single_instruction;
7836   fixed_latency(2);
7837 %}
7838 
7839 // Pipeline class for compares.
7840 pipe_class pipe_class_compare()
7841 %{
7842   single_instruction;
7843   fixed_latency(16);
7844 %}
7845 
7846 // Pipeline class for memory operations.
7847 pipe_class pipe_class_memory()
7848 %{
7849   single_instruction;
7850   fixed_latency(16);
7851 %}
7852 
7853 // Pipeline class for call.
7854 pipe_class pipe_class_call()
7855 %{
7856   single_instruction;
7857   fixed_latency(100);
7858 %}
7859 
7860 // Define the class for the Nop node.
7861 define %{
7862    MachNop = pipe_class_empty;
7863 %}
7864 
7865 %}
7866 //----------INSTRUCTIONS-------------------------------------------------------
7867 //
7868 // match      -- States which machine-independent subtree may be replaced
7869 //               by this instruction.
7870 // ins_cost   -- The estimated cost of this instruction is used by instruction
7871 //               selection to identify a minimum cost tree of machine
7872 //               instructions that matches a tree of machine-independent
7873 //               instructions.
7874 // format     -- A string providing the disassembly for this instruction.
7875 //               The value of an instruction's operand may be inserted
7876 //               by referring to it with a '$' prefix.
7877 // opcode     -- Three instruction opcodes may be provided.  These are referred
7878 //               to within an encode class as $primary, $secondary, and $tertiary
7879 //               rrspectively.  The primary opcode is commonly used to
7880 //               indicate the type of machine instruction, while secondary
7881 //               and tertiary are often used for prefix options or addressing
7882 //               modes.
7883 // ins_encode -- A list of encode classes with parameters. The encode class
7884 //               name must have been defined in an 'enc_class' specification
7885 //               in the encode section of the architecture description.
7886 
7887 // ============================================================================
7888 // Memory (Load/Store) Instructions
7889 
7890 // Load Instructions
7891 
7892 // Load Byte (8 bit signed)
7893 instruct loadB(iRegINoSp dst, memory mem)
7894 %{
7895   match(Set dst (LoadB mem));
7896   predicate(!needs_acquiring_load(n));
7897 
7898   ins_cost(4 * INSN_COST);
7899   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7900 
7901   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7902 
7903   ins_pipe(iload_reg_mem);
7904 %}
7905 
7906 // Load Byte (8 bit signed) into long
7907 instruct loadB2L(iRegLNoSp dst, memory mem)
7908 %{
7909   match(Set dst (ConvI2L (LoadB mem)));
7910   predicate(!needs_acquiring_load(n->in(1)));
7911 
7912   ins_cost(4 * INSN_COST);
7913   format %{ "ldrsb  $dst, $mem\t# byte" %}
7914 
7915   ins_encode(aarch64_enc_ldrsb(dst, mem));
7916 
7917   ins_pipe(iload_reg_mem);
7918 %}
7919 
7920 // Load Byte (8 bit unsigned)
7921 instruct loadUB(iRegINoSp dst, memory mem)
7922 %{
7923   match(Set dst (LoadUB mem));
7924   predicate(!needs_acquiring_load(n));
7925 
7926   ins_cost(4 * INSN_COST);
7927   format %{ "ldrbw  $dst, $mem\t# byte" %}
7928 
7929   ins_encode(aarch64_enc_ldrb(dst, mem));
7930 
7931   ins_pipe(iload_reg_mem);
7932 %}
7933 
7934 // Load Byte (8 bit unsigned) into long
7935 instruct loadUB2L(iRegLNoSp dst, memory mem)
7936 %{
7937   match(Set dst (ConvI2L (LoadUB mem)));
7938   predicate(!needs_acquiring_load(n->in(1)));
7939 
7940   ins_cost(4 * INSN_COST);
7941   format %{ "ldrb  $dst, $mem\t# byte" %}
7942 
7943   ins_encode(aarch64_enc_ldrb(dst, mem));
7944 
7945   ins_pipe(iload_reg_mem);
7946 %}
7947 
7948 // Load Short (16 bit signed)
7949 instruct loadS(iRegINoSp dst, memory mem)
7950 %{
7951   match(Set dst (LoadS mem));
7952   predicate(!needs_acquiring_load(n));
7953 
7954   ins_cost(4 * INSN_COST);
7955   format %{ "ldrshw  $dst, $mem\t# short" %}
7956 
7957   ins_encode(aarch64_enc_ldrshw(dst, mem));
7958 
7959   ins_pipe(iload_reg_mem);
7960 %}
7961 
7962 // Load Short (16 bit signed) into long
7963 instruct loadS2L(iRegLNoSp dst, memory mem)
7964 %{
7965   match(Set dst (ConvI2L (LoadS mem)));
7966   predicate(!needs_acquiring_load(n->in(1)));
7967 
7968   ins_cost(4 * INSN_COST);
7969   format %{ "ldrsh  $dst, $mem\t# short" %}
7970 
7971   ins_encode(aarch64_enc_ldrsh(dst, mem));
7972 
7973   ins_pipe(iload_reg_mem);
7974 %}
7975 
7976 // Load Char (16 bit unsigned)
7977 instruct loadUS(iRegINoSp dst, memory mem)
7978 %{
7979   match(Set dst (LoadUS mem));
7980   predicate(!needs_acquiring_load(n));
7981 
7982   ins_cost(4 * INSN_COST);
7983   format %{ "ldrh  $dst, $mem\t# short" %}
7984 
7985   ins_encode(aarch64_enc_ldrh(dst, mem));
7986 
7987   ins_pipe(iload_reg_mem);
7988 %}
7989 
7990 // Load Short/Char (16 bit unsigned) into long
7991 instruct loadUS2L(iRegLNoSp dst, memory mem)
7992 %{
7993   match(Set dst (ConvI2L (LoadUS mem)));
7994   predicate(!needs_acquiring_load(n->in(1)));
7995 
7996   ins_cost(4 * INSN_COST);
7997   format %{ "ldrh  $dst, $mem\t# short" %}
7998 
7999   ins_encode(aarch64_enc_ldrh(dst, mem));
8000 
8001   ins_pipe(iload_reg_mem);
8002 %}
8003 
8004 // Load Integer (32 bit signed)
8005 instruct loadI(iRegINoSp dst, memory mem)
8006 %{
8007   match(Set dst (LoadI mem));
8008   predicate(!needs_acquiring_load(n));
8009 
8010   ins_cost(4 * INSN_COST);
8011   format %{ "ldrw  $dst, $mem\t# int" %}
8012 
8013   ins_encode(aarch64_enc_ldrw(dst, mem));
8014 
8015   ins_pipe(iload_reg_mem);
8016 %}
8017 
8018 // Load Integer (32 bit signed) into long
8019 instruct loadI2L(iRegLNoSp dst, memory mem)
8020 %{
8021   match(Set dst (ConvI2L (LoadI mem)));
8022   predicate(!needs_acquiring_load(n->in(1)));
8023 
8024   ins_cost(4 * INSN_COST);
8025   format %{ "ldrsw  $dst, $mem\t# int" %}
8026 
8027   ins_encode(aarch64_enc_ldrsw(dst, mem));
8028 
8029   ins_pipe(iload_reg_mem);
8030 %}
8031 
8032 // Load Integer (32 bit unsigned) into long
8033 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8034 %{
8035   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8036   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8037 
8038   ins_cost(4 * INSN_COST);
8039   format %{ "ldrw  $dst, $mem\t# int" %}
8040 
8041   ins_encode(aarch64_enc_ldrw(dst, mem));
8042 
8043   ins_pipe(iload_reg_mem);
8044 %}
8045 
8046 // Load Long (64 bit signed)
8047 instruct loadL(iRegLNoSp dst, memory mem)
8048 %{
8049   match(Set dst (LoadL mem));
8050   predicate(!needs_acquiring_load(n));
8051 
8052   ins_cost(4 * INSN_COST);
8053   format %{ "ldr  $dst, $mem\t# int" %}
8054 
8055   ins_encode(aarch64_enc_ldr(dst, mem));
8056 
8057   ins_pipe(iload_reg_mem);
8058 %}
8059 
8060 // Load Range
8061 instruct loadRange(iRegINoSp dst, memory mem)
8062 %{
8063   match(Set dst (LoadRange mem));
8064 
8065   ins_cost(4 * INSN_COST);
8066   format %{ "ldrw  $dst, $mem\t# range" %}
8067 
8068   ins_encode(aarch64_enc_ldrw(dst, mem));
8069 
8070   ins_pipe(iload_reg_mem);
8071 %}
8072 
8073 // Load Pointer
8074 instruct loadP(iRegPNoSp dst, memory mem)
8075 %{
8076   match(Set dst (LoadP mem));
8077   predicate(!needs_acquiring_load(n));
8078 
8079   ins_cost(4 * INSN_COST);
8080   format %{ "ldr  $dst, $mem\t# ptr" %}
8081 
8082   ins_encode(aarch64_enc_ldr(dst, mem));
8083 
8084   ins_pipe(iload_reg_mem);
8085 %}
8086 
8087 // Load Compressed Pointer
8088 instruct loadN(iRegNNoSp dst, memory mem)
8089 %{
8090   match(Set dst (LoadN mem));
8091   predicate(!needs_acquiring_load(n));
8092 
8093   ins_cost(4 * INSN_COST);
8094   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8095 
8096   ins_encode(aarch64_enc_ldrw(dst, mem));
8097 
8098   ins_pipe(iload_reg_mem);
8099 %}
8100 
8101 // Load Klass Pointer
8102 instruct loadKlass(iRegPNoSp dst, memory mem)
8103 %{
8104   match(Set dst (LoadKlass mem));
8105   predicate(!needs_acquiring_load(n));
8106 
8107   ins_cost(4 * INSN_COST);
8108   format %{ "ldr  $dst, $mem\t# class" %}
8109 
8110   ins_encode(aarch64_enc_ldr(dst, mem));
8111 
8112   ins_pipe(iload_reg_mem);
8113 %}
8114 
8115 // Load Narrow Klass Pointer
8116 instruct loadNKlass(iRegNNoSp dst, memory mem)
8117 %{
8118   match(Set dst (LoadNKlass mem));
8119   predicate(!needs_acquiring_load(n));
8120 
8121   ins_cost(4 * INSN_COST);
8122   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8123 
8124   ins_encode(aarch64_enc_ldrw(dst, mem));
8125 
8126   ins_pipe(iload_reg_mem);
8127 %}
8128 
8129 // Load Float
8130 instruct loadF(vRegF dst, memory mem)
8131 %{
8132   match(Set dst (LoadF mem));
8133   predicate(!needs_acquiring_load(n));
8134 
8135   ins_cost(4 * INSN_COST);
8136   format %{ "ldrs  $dst, $mem\t# float" %}
8137 
8138   ins_encode( aarch64_enc_ldrs(dst, mem) );
8139 
8140   ins_pipe(pipe_class_memory);
8141 %}
8142 
8143 // Load Double
8144 instruct loadD(vRegD dst, memory mem)
8145 %{
8146   match(Set dst (LoadD mem));
8147   predicate(!needs_acquiring_load(n));
8148 
8149   ins_cost(4 * INSN_COST);
8150   format %{ "ldrd  $dst, $mem\t# double" %}
8151 
8152   ins_encode( aarch64_enc_ldrd(dst, mem) );
8153 
8154   ins_pipe(pipe_class_memory);
8155 %}
8156 
8157 
8158 // Load Int Constant
8159 instruct loadConI(iRegINoSp dst, immI src)
8160 %{
8161   match(Set dst src);
8162 
8163   ins_cost(INSN_COST);
8164   format %{ "mov $dst, $src\t# int" %}
8165 
8166   ins_encode( aarch64_enc_movw_imm(dst, src) );
8167 
8168   ins_pipe(ialu_imm);
8169 %}
8170 
8171 // Load Long Constant
8172 instruct loadConL(iRegLNoSp dst, immL src)
8173 %{
8174   match(Set dst src);
8175 
8176   ins_cost(INSN_COST);
8177   format %{ "mov $dst, $src\t# long" %}
8178 
8179   ins_encode( aarch64_enc_mov_imm(dst, src) );
8180 
8181   ins_pipe(ialu_imm);
8182 %}
8183 
8184 // Load Pointer Constant
8185 
8186 instruct loadConP(iRegPNoSp dst, immP con)
8187 %{
8188   match(Set dst con);
8189 
8190   ins_cost(INSN_COST * 4);
8191   format %{
8192     "mov  $dst, $con\t# ptr\n\t"
8193   %}
8194 
8195   ins_encode(aarch64_enc_mov_p(dst, con));
8196 
8197   ins_pipe(ialu_imm);
8198 %}
8199 
8200 // Load Null Pointer Constant
8201 
8202 instruct loadConP0(iRegPNoSp dst, immP0 con)
8203 %{
8204   match(Set dst con);
8205 
8206   ins_cost(INSN_COST);
8207   format %{ "mov  $dst, $con\t# NULL ptr" %}
8208 
8209   ins_encode(aarch64_enc_mov_p0(dst, con));
8210 
8211   ins_pipe(ialu_imm);
8212 %}
8213 
8214 // Load Pointer Constant One
8215 
8216 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8217 %{
8218   match(Set dst con);
8219 
8220   ins_cost(INSN_COST);
8221   format %{ "mov  $dst, $con\t# NULL ptr" %}
8222 
8223   ins_encode(aarch64_enc_mov_p1(dst, con));
8224 
8225   ins_pipe(ialu_imm);
8226 %}
8227 
8228 // Load Poll Page Constant
8229 
8230 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8231 %{
8232   match(Set dst con);
8233 
8234   ins_cost(INSN_COST);
8235   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8236 
8237   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8238 
8239   ins_pipe(ialu_imm);
8240 %}
8241 
8242 // Load Byte Map Base Constant
8243 
8244 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8245 %{
8246   match(Set dst con);
8247 
8248   ins_cost(INSN_COST);
8249   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8250 
8251   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8252 
8253   ins_pipe(ialu_imm);
8254 %}
8255 
8256 // Load Narrow Pointer Constant
8257 
8258 instruct loadConN(iRegNNoSp dst, immN con)
8259 %{
8260   match(Set dst con);
8261 
8262   ins_cost(INSN_COST * 4);
8263   format %{ "mov  $dst, $con\t# compressed ptr" %}
8264 
8265   ins_encode(aarch64_enc_mov_n(dst, con));
8266 
8267   ins_pipe(ialu_imm);
8268 %}
8269 
8270 // Load Narrow Null Pointer Constant
8271 
8272 instruct loadConN0(iRegNNoSp dst, immN0 con)
8273 %{
8274   match(Set dst con);
8275 
8276   ins_cost(INSN_COST);
8277   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8278 
8279   ins_encode(aarch64_enc_mov_n0(dst, con));
8280 
8281   ins_pipe(ialu_imm);
8282 %}
8283 
8284 // Load Narrow Klass Constant
8285 
8286 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8287 %{
8288   match(Set dst con);
8289 
8290   ins_cost(INSN_COST);
8291   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8292 
8293   ins_encode(aarch64_enc_mov_nk(dst, con));
8294 
8295   ins_pipe(ialu_imm);
8296 %}
8297 
8298 // Load Packed Float Constant
8299 
8300 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8301   match(Set dst con);
8302   ins_cost(INSN_COST * 4);
8303   format %{ "fmovs  $dst, $con"%}
8304   ins_encode %{
8305     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8306   %}
8307 
8308   ins_pipe(fp_imm_s);
8309 %}
8310 
8311 // Load Float Constant
8312 
8313 instruct loadConF(vRegF dst, immF con) %{
8314   match(Set dst con);
8315 
8316   ins_cost(INSN_COST * 4);
8317 
8318   format %{
8319     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8320   %}
8321 
8322   ins_encode %{
8323     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8324   %}
8325 
8326   ins_pipe(fp_load_constant_s);
8327 %}
8328 
8329 // Load Packed Double Constant
8330 
8331 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8332   match(Set dst con);
8333   ins_cost(INSN_COST);
8334   format %{ "fmovd  $dst, $con"%}
8335   ins_encode %{
8336     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8337   %}
8338 
8339   ins_pipe(fp_imm_d);
8340 %}
8341 
8342 // Load Double Constant
8343 
8344 instruct loadConD(vRegD dst, immD con) %{
8345   match(Set dst con);
8346 
8347   ins_cost(INSN_COST * 5);
8348   format %{
8349     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8350   %}
8351 
8352   ins_encode %{
8353     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8354   %}
8355 
8356   ins_pipe(fp_load_constant_d);
8357 %}
8358 
8359 // Store Instructions
8360 
8361 // Store CMS card-mark Immediate
8362 instruct storeimmCM0(immI0 zero, memory mem)
8363 %{
8364   match(Set mem (StoreCM mem zero));
8365   predicate(unnecessary_storestore(n));
8366 
8367   ins_cost(INSN_COST);
8368   format %{ "storestore (elided)\n\t"
8369             "strb zr, $mem\t# byte" %}
8370 
8371   ins_encode(aarch64_enc_strb0(mem));
8372 
8373   ins_pipe(istore_mem);
8374 %}
8375 
8376 // Store CMS card-mark Immediate with intervening StoreStore
8377 // needed when using CMS with no conditional card marking
8378 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8379 %{
8380   match(Set mem (StoreCM mem zero));
8381 
8382   ins_cost(INSN_COST * 2);
8383   format %{ "storestore\n\t"
8384             "dmb ishst"
8385             "\n\tstrb zr, $mem\t# byte" %}
8386 
8387   ins_encode(aarch64_enc_strb0_ordered(mem));
8388 
8389   ins_pipe(istore_mem);
8390 %}
8391 
8392 // Store Byte
8393 instruct storeB(iRegIorL2I src, memory mem)
8394 %{
8395   match(Set mem (StoreB mem src));
8396   predicate(!needs_releasing_store(n));
8397 
8398   ins_cost(INSN_COST);
8399   format %{ "strb  $src, $mem\t# byte" %}
8400 
8401   ins_encode(aarch64_enc_strb(src, mem));
8402 
8403   ins_pipe(istore_reg_mem);
8404 %}
8405 
8406 
8407 instruct storeimmB0(immI0 zero, memory mem)
8408 %{
8409   match(Set mem (StoreB mem zero));
8410   predicate(!needs_releasing_store(n));
8411 
8412   ins_cost(INSN_COST);
8413   format %{ "strb rscractch2, $mem\t# byte" %}
8414 
8415   ins_encode(aarch64_enc_strb0(mem));
8416 
8417   ins_pipe(istore_mem);
8418 %}
8419 
8420 // Store Char/Short
8421 instruct storeC(iRegIorL2I src, memory mem)
8422 %{
8423   match(Set mem (StoreC mem src));
8424   predicate(!needs_releasing_store(n));
8425 
8426   ins_cost(INSN_COST);
8427   format %{ "strh  $src, $mem\t# short" %}
8428 
8429   ins_encode(aarch64_enc_strh(src, mem));
8430 
8431   ins_pipe(istore_reg_mem);
8432 %}
8433 
8434 instruct storeimmC0(immI0 zero, memory mem)
8435 %{
8436   match(Set mem (StoreC mem zero));
8437   predicate(!needs_releasing_store(n));
8438 
8439   ins_cost(INSN_COST);
8440   format %{ "strh  zr, $mem\t# short" %}
8441 
8442   ins_encode(aarch64_enc_strh0(mem));
8443 
8444   ins_pipe(istore_mem);
8445 %}
8446 
8447 // Store Integer
8448 
8449 instruct storeI(iRegIorL2I src, memory mem)
8450 %{
8451   match(Set mem(StoreI mem src));
8452   predicate(!needs_releasing_store(n));
8453 
8454   ins_cost(INSN_COST);
8455   format %{ "strw  $src, $mem\t# int" %}
8456 
8457   ins_encode(aarch64_enc_strw(src, mem));
8458 
8459   ins_pipe(istore_reg_mem);
8460 %}
8461 
8462 instruct storeimmI0(immI0 zero, memory mem)
8463 %{
8464   match(Set mem(StoreI mem zero));
8465   predicate(!needs_releasing_store(n));
8466 
8467   ins_cost(INSN_COST);
8468   format %{ "strw  zr, $mem\t# int" %}
8469 
8470   ins_encode(aarch64_enc_strw0(mem));
8471 
8472   ins_pipe(istore_mem);
8473 %}
8474 
8475 // Store Long (64 bit signed)
8476 instruct storeL(iRegL src, memory mem)
8477 %{
8478   match(Set mem (StoreL mem src));
8479   predicate(!needs_releasing_store(n));
8480 
8481   ins_cost(INSN_COST);
8482   format %{ "str  $src, $mem\t# int" %}
8483 
8484   ins_encode(aarch64_enc_str(src, mem));
8485 
8486   ins_pipe(istore_reg_mem);
8487 %}
8488 
8489 // Store Long (64 bit signed)
8490 instruct storeimmL0(immL0 zero, memory mem)
8491 %{
8492   match(Set mem (StoreL mem zero));
8493   predicate(!needs_releasing_store(n));
8494 
8495   ins_cost(INSN_COST);
8496   format %{ "str  zr, $mem\t# int" %}
8497 
8498   ins_encode(aarch64_enc_str0(mem));
8499 
8500   ins_pipe(istore_mem);
8501 %}
8502 
8503 // Store Pointer
8504 instruct storeP(iRegP src, memory mem)
8505 %{
8506   match(Set mem (StoreP mem src));
8507   predicate(!needs_releasing_store(n));
8508 
8509   ins_cost(INSN_COST);
8510   format %{ "str  $src, $mem\t# ptr" %}
8511 
8512   ins_encode(aarch64_enc_str(src, mem));
8513 
8514   ins_pipe(istore_reg_mem);
8515 %}
8516 
8517 // Store Pointer
8518 instruct storeimmP0(immP0 zero, memory mem)
8519 %{
8520   match(Set mem (StoreP mem zero));
8521   predicate(!needs_releasing_store(n));
8522 
8523   ins_cost(INSN_COST);
8524   format %{ "str zr, $mem\t# ptr" %}
8525 
8526   ins_encode(aarch64_enc_str0(mem));
8527 
8528   ins_pipe(istore_mem);
8529 %}
8530 
8531 // Store Compressed Pointer
8532 instruct storeN(iRegN src, memory mem)
8533 %{
8534   match(Set mem (StoreN mem src));
8535   predicate(!needs_releasing_store(n));
8536 
8537   ins_cost(INSN_COST);
8538   format %{ "strw  $src, $mem\t# compressed ptr" %}
8539 
8540   ins_encode(aarch64_enc_strw(src, mem));
8541 
8542   ins_pipe(istore_reg_mem);
8543 %}
8544 
8545 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8546 %{
8547   match(Set mem (StoreN mem zero));
8548   predicate(Universe::narrow_oop_base() == NULL &&
8549             Universe::narrow_klass_base() == NULL &&
8550             (!needs_releasing_store(n)));
8551 
8552   ins_cost(INSN_COST);
8553   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8554 
8555   ins_encode(aarch64_enc_strw(heapbase, mem));
8556 
8557   ins_pipe(istore_reg_mem);
8558 %}
8559 
8560 // Store Float
8561 instruct storeF(vRegF src, memory mem)
8562 %{
8563   match(Set mem (StoreF mem src));
8564   predicate(!needs_releasing_store(n));
8565 
8566   ins_cost(INSN_COST);
8567   format %{ "strs  $src, $mem\t# float" %}
8568 
8569   ins_encode( aarch64_enc_strs(src, mem) );
8570 
8571   ins_pipe(pipe_class_memory);
8572 %}
8573 
8574 // TODO
8575 // implement storeImmF0 and storeFImmPacked
8576 
8577 // Store Double
8578 instruct storeD(vRegD src, memory mem)
8579 %{
8580   match(Set mem (StoreD mem src));
8581   predicate(!needs_releasing_store(n));
8582 
8583   ins_cost(INSN_COST);
8584   format %{ "strd  $src, $mem\t# double" %}
8585 
8586   ins_encode( aarch64_enc_strd(src, mem) );
8587 
8588   ins_pipe(pipe_class_memory);
8589 %}
8590 
8591 // Store Compressed Klass Pointer
8592 instruct storeNKlass(iRegN src, memory mem)
8593 %{
8594   predicate(!needs_releasing_store(n));
8595   match(Set mem (StoreNKlass mem src));
8596 
8597   ins_cost(INSN_COST);
8598   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8599 
8600   ins_encode(aarch64_enc_strw(src, mem));
8601 
8602   ins_pipe(istore_reg_mem);
8603 %}
8604 
8605 // TODO
8606 // implement storeImmD0 and storeDImmPacked
8607 
8608 // prefetch instructions
8609 // Must be safe to execute with invalid address (cannot fault).
8610 
8611 instruct prefetchalloc( memory mem ) %{
8612   match(PrefetchAllocation mem);
8613 
8614   ins_cost(INSN_COST);
8615   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8616 
8617   ins_encode( aarch64_enc_prefetchw(mem) );
8618 
8619   ins_pipe(iload_prefetch);
8620 %}
8621 
8622 //  ---------------- volatile loads and stores ----------------
8623 
8624 // Load Byte (8 bit signed)
8625 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8626 %{
8627   match(Set dst (LoadB mem));
8628 
8629   ins_cost(VOLATILE_REF_COST);
8630   format %{ "ldarsb  $dst, $mem\t# byte" %}
8631 
8632   ins_encode(aarch64_enc_ldarsb(dst, mem));
8633 
8634   ins_pipe(pipe_serial);
8635 %}
8636 
8637 // Load Byte (8 bit signed) into long
8638 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8639 %{
8640   match(Set dst (ConvI2L (LoadB mem)));
8641 
8642   ins_cost(VOLATILE_REF_COST);
8643   format %{ "ldarsb  $dst, $mem\t# byte" %}
8644 
8645   ins_encode(aarch64_enc_ldarsb(dst, mem));
8646 
8647   ins_pipe(pipe_serial);
8648 %}
8649 
8650 // Load Byte (8 bit unsigned)
8651 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8652 %{
8653   match(Set dst (LoadUB mem));
8654 
8655   ins_cost(VOLATILE_REF_COST);
8656   format %{ "ldarb  $dst, $mem\t# byte" %}
8657 
8658   ins_encode(aarch64_enc_ldarb(dst, mem));
8659 
8660   ins_pipe(pipe_serial);
8661 %}
8662 
8663 // Load Byte (8 bit unsigned) into long
8664 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8665 %{
8666   match(Set dst (ConvI2L (LoadUB mem)));
8667 
8668   ins_cost(VOLATILE_REF_COST);
8669   format %{ "ldarb  $dst, $mem\t# byte" %}
8670 
8671   ins_encode(aarch64_enc_ldarb(dst, mem));
8672 
8673   ins_pipe(pipe_serial);
8674 %}
8675 
8676 // Load Short (16 bit signed)
8677 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8678 %{
8679   match(Set dst (LoadS mem));
8680 
8681   ins_cost(VOLATILE_REF_COST);
8682   format %{ "ldarshw  $dst, $mem\t# short" %}
8683 
8684   ins_encode(aarch64_enc_ldarshw(dst, mem));
8685 
8686   ins_pipe(pipe_serial);
8687 %}
8688 
8689 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8690 %{
8691   match(Set dst (LoadUS mem));
8692 
8693   ins_cost(VOLATILE_REF_COST);
8694   format %{ "ldarhw  $dst, $mem\t# short" %}
8695 
8696   ins_encode(aarch64_enc_ldarhw(dst, mem));
8697 
8698   ins_pipe(pipe_serial);
8699 %}
8700 
8701 // Load Short/Char (16 bit unsigned) into long
8702 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8703 %{
8704   match(Set dst (ConvI2L (LoadUS mem)));
8705 
8706   ins_cost(VOLATILE_REF_COST);
8707   format %{ "ldarh  $dst, $mem\t# short" %}
8708 
8709   ins_encode(aarch64_enc_ldarh(dst, mem));
8710 
8711   ins_pipe(pipe_serial);
8712 %}
8713 
8714 // Load Short/Char (16 bit signed) into long
8715 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8716 %{
8717   match(Set dst (ConvI2L (LoadS mem)));
8718 
8719   ins_cost(VOLATILE_REF_COST);
8720   format %{ "ldarh  $dst, $mem\t# short" %}
8721 
8722   ins_encode(aarch64_enc_ldarsh(dst, mem));
8723 
8724   ins_pipe(pipe_serial);
8725 %}
8726 
8727 // Load Integer (32 bit signed)
8728 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8729 %{
8730   match(Set dst (LoadI mem));
8731 
8732   ins_cost(VOLATILE_REF_COST);
8733   format %{ "ldarw  $dst, $mem\t# int" %}
8734 
8735   ins_encode(aarch64_enc_ldarw(dst, mem));
8736 
8737   ins_pipe(pipe_serial);
8738 %}
8739 
8740 // Load Integer (32 bit unsigned) into long
8741 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8742 %{
8743   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8744 
8745   ins_cost(VOLATILE_REF_COST);
8746   format %{ "ldarw  $dst, $mem\t# int" %}
8747 
8748   ins_encode(aarch64_enc_ldarw(dst, mem));
8749 
8750   ins_pipe(pipe_serial);
8751 %}
8752 
8753 // Load Long (64 bit signed)
8754 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8755 %{
8756   match(Set dst (LoadL mem));
8757 
8758   ins_cost(VOLATILE_REF_COST);
8759   format %{ "ldar  $dst, $mem\t# int" %}
8760 
8761   ins_encode(aarch64_enc_ldar(dst, mem));
8762 
8763   ins_pipe(pipe_serial);
8764 %}
8765 
8766 // Load Pointer
8767 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8768 %{
8769   match(Set dst (LoadP mem));
8770 
8771   ins_cost(VOLATILE_REF_COST);
8772   format %{ "ldar  $dst, $mem\t# ptr" %}
8773 
8774   ins_encode(aarch64_enc_ldar(dst, mem));
8775 
8776   ins_pipe(pipe_serial);
8777 %}
8778 
8779 // Load Compressed Pointer
8780 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8781 %{
8782   match(Set dst (LoadN mem));
8783 
8784   ins_cost(VOLATILE_REF_COST);
8785   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8786 
8787   ins_encode(aarch64_enc_ldarw(dst, mem));
8788 
8789   ins_pipe(pipe_serial);
8790 %}
8791 
8792 // Load Float
8793 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8794 %{
8795   match(Set dst (LoadF mem));
8796 
8797   ins_cost(VOLATILE_REF_COST);
8798   format %{ "ldars  $dst, $mem\t# float" %}
8799 
8800   ins_encode( aarch64_enc_fldars(dst, mem) );
8801 
8802   ins_pipe(pipe_serial);
8803 %}
8804 
8805 // Load Double
8806 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8807 %{
8808   match(Set dst (LoadD mem));
8809 
8810   ins_cost(VOLATILE_REF_COST);
8811   format %{ "ldard  $dst, $mem\t# double" %}
8812 
8813   ins_encode( aarch64_enc_fldard(dst, mem) );
8814 
8815   ins_pipe(pipe_serial);
8816 %}
8817 
8818 // Store Byte
8819 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8820 %{
8821   match(Set mem (StoreB mem src));
8822 
8823   ins_cost(VOLATILE_REF_COST);
8824   format %{ "stlrb  $src, $mem\t# byte" %}
8825 
8826   ins_encode(aarch64_enc_stlrb(src, mem));
8827 
8828   ins_pipe(pipe_class_memory);
8829 %}
8830 
8831 // Store Char/Short
8832 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8833 %{
8834   match(Set mem (StoreC mem src));
8835 
8836   ins_cost(VOLATILE_REF_COST);
8837   format %{ "stlrh  $src, $mem\t# short" %}
8838 
8839   ins_encode(aarch64_enc_stlrh(src, mem));
8840 
8841   ins_pipe(pipe_class_memory);
8842 %}
8843 
8844 // Store Integer
8845 
8846 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8847 %{
8848   match(Set mem(StoreI mem src));
8849 
8850   ins_cost(VOLATILE_REF_COST);
8851   format %{ "stlrw  $src, $mem\t# int" %}
8852 
8853   ins_encode(aarch64_enc_stlrw(src, mem));
8854 
8855   ins_pipe(pipe_class_memory);
8856 %}
8857 
8858 // Store Long (64 bit signed)
8859 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8860 %{
8861   match(Set mem (StoreL mem src));
8862 
8863   ins_cost(VOLATILE_REF_COST);
8864   format %{ "stlr  $src, $mem\t# int" %}
8865 
8866   ins_encode(aarch64_enc_stlr(src, mem));
8867 
8868   ins_pipe(pipe_class_memory);
8869 %}
8870 
8871 // Store Pointer
8872 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8873 %{
8874   match(Set mem (StoreP mem src));
8875 
8876   ins_cost(VOLATILE_REF_COST);
8877   format %{ "stlr  $src, $mem\t# ptr" %}
8878 
8879   ins_encode(aarch64_enc_stlr(src, mem));
8880 
8881   ins_pipe(pipe_class_memory);
8882 %}
8883 
8884 // Store Compressed Pointer
8885 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8886 %{
8887   match(Set mem (StoreN mem src));
8888 
8889   ins_cost(VOLATILE_REF_COST);
8890   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8891 
8892   ins_encode(aarch64_enc_stlrw(src, mem));
8893 
8894   ins_pipe(pipe_class_memory);
8895 %}
8896 
8897 // Store Float
8898 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8899 %{
8900   match(Set mem (StoreF mem src));
8901 
8902   ins_cost(VOLATILE_REF_COST);
8903   format %{ "stlrs  $src, $mem\t# float" %}
8904 
8905   ins_encode( aarch64_enc_fstlrs(src, mem) );
8906 
8907   ins_pipe(pipe_class_memory);
8908 %}
8909 
8910 // TODO
8911 // implement storeImmF0 and storeFImmPacked
8912 
8913 // Store Double
8914 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8915 %{
8916   match(Set mem (StoreD mem src));
8917 
8918   ins_cost(VOLATILE_REF_COST);
8919   format %{ "stlrd  $src, $mem\t# double" %}
8920 
8921   ins_encode( aarch64_enc_fstlrd(src, mem) );
8922 
8923   ins_pipe(pipe_class_memory);
8924 %}
8925 
8926 //  ---------------- end of volatile loads and stores ----------------
8927 
8928 // ============================================================================
8929 // BSWAP Instructions
8930 
8931 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8932   match(Set dst (ReverseBytesI src));
8933 
8934   ins_cost(INSN_COST);
8935   format %{ "revw  $dst, $src" %}
8936 
8937   ins_encode %{
8938     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8939   %}
8940 
8941   ins_pipe(ialu_reg);
8942 %}
8943 
8944 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8945   match(Set dst (ReverseBytesL src));
8946 
8947   ins_cost(INSN_COST);
8948   format %{ "rev  $dst, $src" %}
8949 
8950   ins_encode %{
8951     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8952   %}
8953 
8954   ins_pipe(ialu_reg);
8955 %}
8956 
8957 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8958   match(Set dst (ReverseBytesUS src));
8959 
8960   ins_cost(INSN_COST);
8961   format %{ "rev16w  $dst, $src" %}
8962 
8963   ins_encode %{
8964     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8965   %}
8966 
8967   ins_pipe(ialu_reg);
8968 %}
8969 
8970 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8971   match(Set dst (ReverseBytesS src));
8972 
8973   ins_cost(INSN_COST);
8974   format %{ "rev16w  $dst, $src\n\t"
8975             "sbfmw $dst, $dst, #0, #15" %}
8976 
8977   ins_encode %{
8978     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8979     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8980   %}
8981 
8982   ins_pipe(ialu_reg);
8983 %}
8984 
8985 // ============================================================================
8986 // Zero Count Instructions
8987 
8988 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8989   match(Set dst (CountLeadingZerosI src));
8990 
8991   ins_cost(INSN_COST);
8992   format %{ "clzw  $dst, $src" %}
8993   ins_encode %{
8994     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8995   %}
8996 
8997   ins_pipe(ialu_reg);
8998 %}
8999 
9000 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9001   match(Set dst (CountLeadingZerosL src));
9002 
9003   ins_cost(INSN_COST);
9004   format %{ "clz   $dst, $src" %}
9005   ins_encode %{
9006     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9007   %}
9008 
9009   ins_pipe(ialu_reg);
9010 %}
9011 
9012 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9013   match(Set dst (CountTrailingZerosI src));
9014 
9015   ins_cost(INSN_COST * 2);
9016   format %{ "rbitw  $dst, $src\n\t"
9017             "clzw   $dst, $dst" %}
9018   ins_encode %{
9019     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9020     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9021   %}
9022 
9023   ins_pipe(ialu_reg);
9024 %}
9025 
9026 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9027   match(Set dst (CountTrailingZerosL src));
9028 
9029   ins_cost(INSN_COST * 2);
9030   format %{ "rbit   $dst, $src\n\t"
9031             "clz    $dst, $dst" %}
9032   ins_encode %{
9033     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9034     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9035   %}
9036 
9037   ins_pipe(ialu_reg);
9038 %}
9039 
9040 //---------- Population Count Instructions -------------------------------------
9041 //
9042 
9043 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9044   predicate(UsePopCountInstruction);
9045   match(Set dst (PopCountI src));
9046   effect(TEMP tmp);
9047   ins_cost(INSN_COST * 13);
9048 
9049   format %{ "movw   $src, $src\n\t"
9050             "mov    $tmp, $src\t# vector (1D)\n\t"
9051             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9052             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9053             "mov    $dst, $tmp\t# vector (1D)" %}
9054   ins_encode %{
9055     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9056     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9057     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9058     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9059     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9060   %}
9061 
9062   ins_pipe(pipe_class_default);
9063 %}
9064 
9065 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9066   predicate(UsePopCountInstruction);
9067   match(Set dst (PopCountI (LoadI mem)));
9068   effect(TEMP tmp);
9069   ins_cost(INSN_COST * 13);
9070 
9071   format %{ "ldrs   $tmp, $mem\n\t"
9072             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9073             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9074             "mov    $dst, $tmp\t# vector (1D)" %}
9075   ins_encode %{
9076     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9077     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9078                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9079     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9080     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9081     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9082   %}
9083 
9084   ins_pipe(pipe_class_default);
9085 %}
9086 
9087 // Note: Long.bitCount(long) returns an int.
9088 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9089   predicate(UsePopCountInstruction);
9090   match(Set dst (PopCountL src));
9091   effect(TEMP tmp);
9092   ins_cost(INSN_COST * 13);
9093 
9094   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9095             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9096             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9097             "mov    $dst, $tmp\t# vector (1D)" %}
9098   ins_encode %{
9099     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9100     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9101     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9102     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9103   %}
9104 
9105   ins_pipe(pipe_class_default);
9106 %}
9107 
9108 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9109   predicate(UsePopCountInstruction);
9110   match(Set dst (PopCountL (LoadL mem)));
9111   effect(TEMP tmp);
9112   ins_cost(INSN_COST * 13);
9113 
9114   format %{ "ldrd   $tmp, $mem\n\t"
9115             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9116             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9117             "mov    $dst, $tmp\t# vector (1D)" %}
9118   ins_encode %{
9119     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9120     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9121                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9122     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9123     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9124     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9125   %}
9126 
9127   ins_pipe(pipe_class_default);
9128 %}
9129 
9130 // ============================================================================
9131 // MemBar Instruction
9132 
9133 instruct load_fence() %{
9134   match(LoadFence);
9135   ins_cost(VOLATILE_REF_COST);
9136 
9137   format %{ "load_fence" %}
9138 
9139   ins_encode %{
9140     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9141   %}
9142   ins_pipe(pipe_serial);
9143 %}
9144 
9145 instruct unnecessary_membar_acquire() %{
9146   predicate(unnecessary_acquire(n));
9147   match(MemBarAcquire);
9148   ins_cost(0);
9149 
9150   format %{ "membar_acquire (elided)" %}
9151 
9152   ins_encode %{
9153     __ block_comment("membar_acquire (elided)");
9154   %}
9155 
9156   ins_pipe(pipe_class_empty);
9157 %}
9158 
9159 instruct membar_acquire() %{
9160   match(MemBarAcquire);
9161   ins_cost(VOLATILE_REF_COST);
9162 
9163   format %{ "membar_acquire\n\t"
9164             "dmb ish" %}
9165 
9166   ins_encode %{
9167     __ block_comment("membar_acquire");
9168     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9169   %}
9170 
9171   ins_pipe(pipe_serial);
9172 %}
9173 
9174 
9175 instruct membar_acquire_lock() %{
9176   match(MemBarAcquireLock);
9177   ins_cost(VOLATILE_REF_COST);
9178 
9179   format %{ "membar_acquire_lock (elided)" %}
9180 
9181   ins_encode %{
9182     __ block_comment("membar_acquire_lock (elided)");
9183   %}
9184 
9185   ins_pipe(pipe_serial);
9186 %}
9187 
9188 instruct store_fence() %{
9189   match(StoreFence);
9190   ins_cost(VOLATILE_REF_COST);
9191 
9192   format %{ "store_fence" %}
9193 
9194   ins_encode %{
9195     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9196   %}
9197   ins_pipe(pipe_serial);
9198 %}
9199 
9200 instruct unnecessary_membar_release() %{
9201   predicate(unnecessary_release(n));
9202   match(MemBarRelease);
9203   ins_cost(0);
9204 
9205   format %{ "membar_release (elided)" %}
9206 
9207   ins_encode %{
9208     __ block_comment("membar_release (elided)");
9209   %}
9210   ins_pipe(pipe_serial);
9211 %}
9212 
9213 instruct membar_release() %{
9214   match(MemBarRelease);
9215   ins_cost(VOLATILE_REF_COST);
9216 
9217   format %{ "membar_release\n\t"
9218             "dmb ish" %}
9219 
9220   ins_encode %{
9221     __ block_comment("membar_release");
9222     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9223   %}
9224   ins_pipe(pipe_serial);
9225 %}
9226 
9227 instruct membar_storestore() %{
9228   match(MemBarStoreStore);
9229   ins_cost(VOLATILE_REF_COST);
9230 
9231   format %{ "MEMBAR-store-store" %}
9232 
9233   ins_encode %{
9234     __ membar(Assembler::StoreStore);
9235   %}
9236   ins_pipe(pipe_serial);
9237 %}
9238 
9239 instruct membar_release_lock() %{
9240   match(MemBarReleaseLock);
9241   ins_cost(VOLATILE_REF_COST);
9242 
9243   format %{ "membar_release_lock (elided)" %}
9244 
9245   ins_encode %{
9246     __ block_comment("membar_release_lock (elided)");
9247   %}
9248 
9249   ins_pipe(pipe_serial);
9250 %}
9251 
9252 instruct unnecessary_membar_volatile() %{
9253   predicate(unnecessary_volatile(n));
9254   match(MemBarVolatile);
9255   ins_cost(0);
9256 
9257   format %{ "membar_volatile (elided)" %}
9258 
9259   ins_encode %{
9260     __ block_comment("membar_volatile (elided)");
9261   %}
9262 
9263   ins_pipe(pipe_serial);
9264 %}
9265 
9266 instruct membar_volatile() %{
9267   match(MemBarVolatile);
9268   ins_cost(VOLATILE_REF_COST*100);
9269 
9270   format %{ "membar_volatile\n\t"
9271              "dmb ish"%}
9272 
9273   ins_encode %{
9274     __ block_comment("membar_volatile");
9275     __ membar(Assembler::StoreLoad);
9276   %}
9277 
9278   ins_pipe(pipe_serial);
9279 %}
9280 
9281 // ============================================================================
9282 // Cast/Convert Instructions
9283 
9284 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9285   match(Set dst (CastX2P src));
9286 
9287   ins_cost(INSN_COST);
9288   format %{ "mov $dst, $src\t# long -> ptr" %}
9289 
9290   ins_encode %{
9291     if ($dst$$reg != $src$$reg) {
9292       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9293     }
9294   %}
9295 
9296   ins_pipe(ialu_reg);
9297 %}
9298 
9299 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9300   match(Set dst (CastP2X src));
9301 
9302   ins_cost(INSN_COST);
9303   format %{ "mov $dst, $src\t# ptr -> long" %}
9304 
9305   ins_encode %{
9306     if ($dst$$reg != $src$$reg) {
9307       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9308     }
9309   %}
9310 
9311   ins_pipe(ialu_reg);
9312 %}
9313 
9314 // Convert oop into int for vectors alignment masking
9315 instruct convP2I(iRegINoSp dst, iRegP src) %{
9316   match(Set dst (ConvL2I (CastP2X src)));
9317 
9318   ins_cost(INSN_COST);
9319   format %{ "movw $dst, $src\t# ptr -> int" %}
9320   ins_encode %{
9321     __ movw($dst$$Register, $src$$Register);
9322   %}
9323 
9324   ins_pipe(ialu_reg);
9325 %}
9326 
9327 // Convert compressed oop into int for vectors alignment masking
9328 // in case of 32bit oops (heap < 4Gb).
9329 instruct convN2I(iRegINoSp dst, iRegN src)
9330 %{
9331   predicate(Universe::narrow_oop_shift() == 0);
9332   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9333 
9334   ins_cost(INSN_COST);
9335   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9336   ins_encode %{
9337     __ movw($dst$$Register, $src$$Register);
9338   %}
9339 
9340   ins_pipe(ialu_reg);
9341 %}
9342 
9343 
9344 // Convert oop pointer into compressed form
9345 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9346   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9347   match(Set dst (EncodeP src));
9348   effect(KILL cr);
9349   ins_cost(INSN_COST * 3);
9350   format %{ "encode_heap_oop $dst, $src" %}
9351   ins_encode %{
9352     Register s = $src$$Register;
9353     Register d = $dst$$Register;
9354     __ encode_heap_oop(d, s);
9355   %}
9356   ins_pipe(ialu_reg);
9357 %}
9358 
9359 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9360   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9361   match(Set dst (EncodeP src));
9362   ins_cost(INSN_COST * 3);
9363   format %{ "encode_heap_oop_not_null $dst, $src" %}
9364   ins_encode %{
9365     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9366   %}
9367   ins_pipe(ialu_reg);
9368 %}
9369 
9370 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9371   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9372             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9373   match(Set dst (DecodeN src));
9374   ins_cost(INSN_COST * 3);
9375   format %{ "decode_heap_oop $dst, $src" %}
9376   ins_encode %{
9377     Register s = $src$$Register;
9378     Register d = $dst$$Register;
9379     __ decode_heap_oop(d, s);
9380   %}
9381   ins_pipe(ialu_reg);
9382 %}
9383 
9384 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9385   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9386             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9387   match(Set dst (DecodeN src));
9388   ins_cost(INSN_COST * 3);
9389   format %{ "decode_heap_oop_not_null $dst, $src" %}
9390   ins_encode %{
9391     Register s = $src$$Register;
9392     Register d = $dst$$Register;
9393     __ decode_heap_oop_not_null(d, s);
9394   %}
9395   ins_pipe(ialu_reg);
9396 %}
9397 
9398 // n.b. AArch64 implementations of encode_klass_not_null and
9399 // decode_klass_not_null do not modify the flags register so, unlike
9400 // Intel, we don't kill CR as a side effect here
9401 
9402 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9403   match(Set dst (EncodePKlass src));
9404 
9405   ins_cost(INSN_COST * 3);
9406   format %{ "encode_klass_not_null $dst,$src" %}
9407 
9408   ins_encode %{
9409     Register src_reg = as_Register($src$$reg);
9410     Register dst_reg = as_Register($dst$$reg);
9411     __ encode_klass_not_null(dst_reg, src_reg);
9412   %}
9413 
9414    ins_pipe(ialu_reg);
9415 %}
9416 
9417 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9418   match(Set dst (DecodeNKlass src));
9419 
9420   ins_cost(INSN_COST * 3);
9421   format %{ "decode_klass_not_null $dst,$src" %}
9422 
9423   ins_encode %{
9424     Register src_reg = as_Register($src$$reg);
9425     Register dst_reg = as_Register($dst$$reg);
9426     if (dst_reg != src_reg) {
9427       __ decode_klass_not_null(dst_reg, src_reg);
9428     } else {
9429       __ decode_klass_not_null(dst_reg);
9430     }
9431   %}
9432 
9433    ins_pipe(ialu_reg);
9434 %}
9435 
9436 instruct checkCastPP(iRegPNoSp dst)
9437 %{
9438   match(Set dst (CheckCastPP dst));
9439 
9440   size(0);
9441   format %{ "# checkcastPP of $dst" %}
9442   ins_encode(/* empty encoding */);
9443   ins_pipe(pipe_class_empty);
9444 %}
9445 
9446 instruct castPP(iRegPNoSp dst)
9447 %{
9448   match(Set dst (CastPP dst));
9449 
9450   size(0);
9451   format %{ "# castPP of $dst" %}
9452   ins_encode(/* empty encoding */);
9453   ins_pipe(pipe_class_empty);
9454 %}
9455 
9456 instruct castII(iRegI dst)
9457 %{
9458   match(Set dst (CastII dst));
9459 
9460   size(0);
9461   format %{ "# castII of $dst" %}
9462   ins_encode(/* empty encoding */);
9463   ins_cost(0);
9464   ins_pipe(pipe_class_empty);
9465 %}
9466 
9467 // ============================================================================
9468 // Atomic operation instructions
9469 //
9470 // Intel and SPARC both implement Ideal Node LoadPLocked and
9471 // Store{PIL}Conditional instructions using a normal load for the
9472 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9473 //
9474 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9475 // pair to lock object allocations from Eden space when not using
9476 // TLABs.
9477 //
9478 // There does not appear to be a Load{IL}Locked Ideal Node and the
9479 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9480 // and to use StoreIConditional only for 32-bit and StoreLConditional
9481 // only for 64-bit.
9482 //
9483 // We implement LoadPLocked and StorePLocked instructions using,
9484 // respectively the AArch64 hw load-exclusive and store-conditional
9485 // instructions. Whereas we must implement each of
9486 // Store{IL}Conditional using a CAS which employs a pair of
9487 // instructions comprising a load-exclusive followed by a
9488 // store-conditional.
9489 
9490 
9491 // Locked-load (linked load) of the current heap-top
9492 // used when updating the eden heap top
9493 // implemented using ldaxr on AArch64
9494 
9495 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9496 %{
9497   match(Set dst (LoadPLocked mem));
9498 
9499   ins_cost(VOLATILE_REF_COST);
9500 
9501   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9502 
9503   ins_encode(aarch64_enc_ldaxr(dst, mem));
9504 
9505   ins_pipe(pipe_serial);
9506 %}
9507 
9508 // Conditional-store of the updated heap-top.
9509 // Used during allocation of the shared heap.
9510 // Sets flag (EQ) on success.
9511 // implemented using stlxr on AArch64.
9512 
9513 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9514 %{
9515   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9516 
9517   ins_cost(VOLATILE_REF_COST);
9518 
9519  // TODO
9520  // do we need to do a store-conditional release or can we just use a
9521  // plain store-conditional?
9522 
9523   format %{
9524     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9525     "cmpw rscratch1, zr\t# EQ on successful write"
9526   %}
9527 
9528   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9529 
9530   ins_pipe(pipe_serial);
9531 %}
9532 
9533 
9534 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9535 // when attempting to rebias a lock towards the current thread.  We
9536 // must use the acquire form of cmpxchg in order to guarantee acquire
9537 // semantics in this case.
9538 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9539 %{
9540   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9541 
9542   ins_cost(VOLATILE_REF_COST);
9543 
9544   format %{
9545     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9546     "cmpw rscratch1, zr\t# EQ on successful write"
9547   %}
9548 
9549   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9550 
9551   ins_pipe(pipe_slow);
9552 %}
9553 
9554 // storeIConditional also has acquire semantics, for no better reason
9555 // than matching storeLConditional.  At the time of writing this
9556 // comment storeIConditional was not used anywhere by AArch64.
9557 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9558 %{
9559   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9560 
9561   ins_cost(VOLATILE_REF_COST);
9562 
9563   format %{
9564     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9565     "cmpw rscratch1, zr\t# EQ on successful write"
9566   %}
9567 
9568   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9569 
9570   ins_pipe(pipe_slow);
9571 %}
9572 
9573 // standard CompareAndSwapX when we are using barriers
9574 // these have higher priority than the rules selected by a predicate
9575 
9576 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9577 // can't match them
9578 
9579 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9580 
9581   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
9582   ins_cost(2 * VOLATILE_REF_COST);
9583 
9584   effect(KILL cr);
9585 
9586   format %{
9587     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9588     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9589   %}
9590 
9591   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
9592             aarch64_enc_cset_eq(res));
9593 
9594   ins_pipe(pipe_slow);
9595 %}
9596 
9597 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9598 
9599   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
9600   ins_cost(2 * VOLATILE_REF_COST);
9601 
9602   effect(KILL cr);
9603 
9604   format %{
9605     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9606     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9607   %}
9608 
9609   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
9610             aarch64_enc_cset_eq(res));
9611 
9612   ins_pipe(pipe_slow);
9613 %}
9614 
9615 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9616 
9617   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9618   ins_cost(2 * VOLATILE_REF_COST);
9619 
9620   effect(KILL cr);
9621 
9622  format %{
9623     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9624     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9625  %}
9626 
9627  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9628             aarch64_enc_cset_eq(res));
9629 
9630   ins_pipe(pipe_slow);
9631 %}
9632 
9633 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9634 
9635   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9636   ins_cost(2 * VOLATILE_REF_COST);
9637 
9638   effect(KILL cr);
9639 
9640  format %{
9641     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9642     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9643  %}
9644 
9645  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9646             aarch64_enc_cset_eq(res));
9647 
9648   ins_pipe(pipe_slow);
9649 %}
9650 
9651 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9652 
9653   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9654   ins_cost(2 * VOLATILE_REF_COST);
9655 
9656   effect(KILL cr);
9657 
9658  format %{
9659     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9660     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9661  %}
9662 
9663  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9664             aarch64_enc_cset_eq(res));
9665 
9666   ins_pipe(pipe_slow);
9667 %}
9668 
9669 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9670 
9671   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9672   ins_cost(2 * VOLATILE_REF_COST);
9673 
9674   effect(KILL cr);
9675 
9676  format %{
9677     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9678     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9679  %}
9680 
9681  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9682             aarch64_enc_cset_eq(res));
9683 
9684   ins_pipe(pipe_slow);
9685 %}
9686 
9687 // alternative CompareAndSwapX when we are eliding barriers
9688 
9689 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9690 
9691   predicate(needs_acquiring_load_exclusive(n));
9692   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9693   ins_cost(VOLATILE_REF_COST);
9694 
9695   effect(KILL cr);
9696 
9697  format %{
9698     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9699     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9700  %}
9701 
9702  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9703             aarch64_enc_cset_eq(res));
9704 
9705   ins_pipe(pipe_slow);
9706 %}
9707 
9708 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9709 
9710   predicate(needs_acquiring_load_exclusive(n));
9711   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9712   ins_cost(VOLATILE_REF_COST);
9713 
9714   effect(KILL cr);
9715 
9716  format %{
9717     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9718     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9719  %}
9720 
9721  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9722             aarch64_enc_cset_eq(res));
9723 
9724   ins_pipe(pipe_slow);
9725 %}
9726 
9727 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9728 
9729   predicate(needs_acquiring_load_exclusive(n));
9730   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9731   ins_cost(VOLATILE_REF_COST);
9732 
9733   effect(KILL cr);
9734 
9735  format %{
9736     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9737     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9738  %}
9739 
9740  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9741             aarch64_enc_cset_eq(res));
9742 
9743   ins_pipe(pipe_slow);
9744 %}
9745 
9746 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9747 
9748   predicate(needs_acquiring_load_exclusive(n));
9749   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9750   ins_cost(VOLATILE_REF_COST);
9751 
9752   effect(KILL cr);
9753 
9754  format %{
9755     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9756     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9757  %}
9758 
9759  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9760             aarch64_enc_cset_eq(res));
9761 
9762   ins_pipe(pipe_slow);
9763 %}
9764 
9765 
9766 // ---------------------------------------------------------------------
9767 
9768 
9769 // BEGIN This section of the file is automatically generated. Do not edit --------------
9770 
9771 // Sundry CAS operations.  Note that release is always true,
9772 // regardless of the memory ordering of the CAS.  This is because we
9773 // need the volatile case to be sequentially consistent but there is
9774 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9775 // can't check the type of memory ordering here, so we always emit a
9776 // STLXR.
9777 
9778 // This section is generated from aarch64_ad_cas.m4
9779 
9780 
9781 
9782 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9783   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9784   ins_cost(2 * VOLATILE_REF_COST);
9785   effect(TEMP_DEF res, KILL cr);
9786   format %{
9787     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9788   %}
9789   ins_encode %{
9790     __ uxtbw(rscratch2, $oldval$$Register);
9791     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9792                Assembler::byte, /*acquire*/ false, /*release*/ true,
9793                /*weak*/ false, $res$$Register);
9794     __ sxtbw($res$$Register, $res$$Register);
9795   %}
9796   ins_pipe(pipe_slow);
9797 %}
9798 
9799 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9800   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9801   ins_cost(2 * VOLATILE_REF_COST);
9802   effect(TEMP_DEF res, KILL cr);
9803   format %{
9804     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9805   %}
9806   ins_encode %{
9807     __ uxthw(rscratch2, $oldval$$Register);
9808     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9809                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9810                /*weak*/ false, $res$$Register);
9811     __ sxthw($res$$Register, $res$$Register);
9812   %}
9813   ins_pipe(pipe_slow);
9814 %}
9815 
9816 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9817   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9818   ins_cost(2 * VOLATILE_REF_COST);
9819   effect(TEMP_DEF res, KILL cr);
9820   format %{
9821     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9822   %}
9823   ins_encode %{
9824     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9825                Assembler::word, /*acquire*/ false, /*release*/ true,
9826                /*weak*/ false, $res$$Register);
9827   %}
9828   ins_pipe(pipe_slow);
9829 %}
9830 
9831 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9832   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9833   ins_cost(2 * VOLATILE_REF_COST);
9834   effect(TEMP_DEF res, KILL cr);
9835   format %{
9836     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9837   %}
9838   ins_encode %{
9839     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9840                Assembler::xword, /*acquire*/ false, /*release*/ true,
9841                /*weak*/ false, $res$$Register);
9842   %}
9843   ins_pipe(pipe_slow);
9844 %}
9845 
9846 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9847   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9848   ins_cost(2 * VOLATILE_REF_COST);
9849   effect(TEMP_DEF res, KILL cr);
9850   format %{
9851     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9852   %}
9853   ins_encode %{
9854     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9855                Assembler::word, /*acquire*/ false, /*release*/ true,
9856                /*weak*/ false, $res$$Register);
9857   %}
9858   ins_pipe(pipe_slow);
9859 %}
9860 
9861 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9862   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9863   ins_cost(2 * VOLATILE_REF_COST);
9864   effect(TEMP_DEF res, KILL cr);
9865   format %{
9866     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9867   %}
9868   ins_encode %{
9869     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9870                Assembler::xword, /*acquire*/ false, /*release*/ true,
9871                /*weak*/ false, $res$$Register);
9872   %}
9873   ins_pipe(pipe_slow);
9874 %}
9875 
9876 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9877   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9878   ins_cost(2 * VOLATILE_REF_COST);
9879   effect(KILL cr);
9880   format %{
9881     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9882     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9883   %}
9884   ins_encode %{
9885     __ uxtbw(rscratch2, $oldval$$Register);
9886     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9887                Assembler::byte, /*acquire*/ false, /*release*/ true,
9888                /*weak*/ true, noreg);
9889     __ csetw($res$$Register, Assembler::EQ);
9890   %}
9891   ins_pipe(pipe_slow);
9892 %}
9893 
9894 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9895   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9896   ins_cost(2 * VOLATILE_REF_COST);
9897   effect(KILL cr);
9898   format %{
9899     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9900     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9901   %}
9902   ins_encode %{
9903     __ uxthw(rscratch2, $oldval$$Register);
9904     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9905                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9906                /*weak*/ true, noreg);
9907     __ csetw($res$$Register, Assembler::EQ);
9908   %}
9909   ins_pipe(pipe_slow);
9910 %}
9911 
9912 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9913   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9914   ins_cost(2 * VOLATILE_REF_COST);
9915   effect(KILL cr);
9916   format %{
9917     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9918     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9919   %}
9920   ins_encode %{
9921     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9922                Assembler::word, /*acquire*/ false, /*release*/ true,
9923                /*weak*/ true, noreg);
9924     __ csetw($res$$Register, Assembler::EQ);
9925   %}
9926   ins_pipe(pipe_slow);
9927 %}
9928 
9929 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9930   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9931   ins_cost(2 * VOLATILE_REF_COST);
9932   effect(KILL cr);
9933   format %{
9934     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9935     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9936   %}
9937   ins_encode %{
9938     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9939                Assembler::xword, /*acquire*/ false, /*release*/ true,
9940                /*weak*/ true, noreg);
9941     __ csetw($res$$Register, Assembler::EQ);
9942   %}
9943   ins_pipe(pipe_slow);
9944 %}
9945 
9946 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9947   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9948   ins_cost(2 * VOLATILE_REF_COST);
9949   effect(KILL cr);
9950   format %{
9951     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9952     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9953   %}
9954   ins_encode %{
9955     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9956                Assembler::word, /*acquire*/ false, /*release*/ true,
9957                /*weak*/ true, noreg);
9958     __ csetw($res$$Register, Assembler::EQ);
9959   %}
9960   ins_pipe(pipe_slow);
9961 %}
9962 
9963 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9964   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9965   ins_cost(2 * VOLATILE_REF_COST);
9966   effect(KILL cr);
9967   format %{
9968     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9969     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9970   %}
9971   ins_encode %{
9972     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9973                Assembler::xword, /*acquire*/ false, /*release*/ true,
9974                /*weak*/ true, noreg);
9975     __ csetw($res$$Register, Assembler::EQ);
9976   %}
9977   ins_pipe(pipe_slow);
9978 %}
9979 
9980 // END This section of the file is automatically generated. Do not edit --------------
9981 // ---------------------------------------------------------------------
9982 
9983 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9984   match(Set prev (GetAndSetI mem newv));
9985   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9986   ins_encode %{
9987     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9988   %}
9989   ins_pipe(pipe_serial);
9990 %}
9991 
9992 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9993   match(Set prev (GetAndSetL mem newv));
9994   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9995   ins_encode %{
9996     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9997   %}
9998   ins_pipe(pipe_serial);
9999 %}
10000 
10001 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10002   match(Set prev (GetAndSetN mem newv));
10003   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10004   ins_encode %{
10005     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10006   %}
10007   ins_pipe(pipe_serial);
10008 %}
10009 
10010 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10011   match(Set prev (GetAndSetP mem newv));
10012   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10013   ins_encode %{
10014     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10015   %}
10016   ins_pipe(pipe_serial);
10017 %}
10018 
10019 
10020 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10021   match(Set newval (GetAndAddL mem incr));
10022   ins_cost(INSN_COST * 10);
10023   format %{ "get_and_addL $newval, [$mem], $incr" %}
10024   ins_encode %{
10025     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10026   %}
10027   ins_pipe(pipe_serial);
10028 %}
10029 
10030 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10031   predicate(n->as_LoadStore()->result_not_used());
10032   match(Set dummy (GetAndAddL mem incr));
10033   ins_cost(INSN_COST * 9);
10034   format %{ "get_and_addL [$mem], $incr" %}
10035   ins_encode %{
10036     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10037   %}
10038   ins_pipe(pipe_serial);
10039 %}
10040 
10041 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10042   match(Set newval (GetAndAddL mem incr));
10043   ins_cost(INSN_COST * 10);
10044   format %{ "get_and_addL $newval, [$mem], $incr" %}
10045   ins_encode %{
10046     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10047   %}
10048   ins_pipe(pipe_serial);
10049 %}
10050 
10051 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10052   predicate(n->as_LoadStore()->result_not_used());
10053   match(Set dummy (GetAndAddL mem incr));
10054   ins_cost(INSN_COST * 9);
10055   format %{ "get_and_addL [$mem], $incr" %}
10056   ins_encode %{
10057     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10058   %}
10059   ins_pipe(pipe_serial);
10060 %}
10061 
10062 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10063   match(Set newval (GetAndAddI mem incr));
10064   ins_cost(INSN_COST * 10);
10065   format %{ "get_and_addI $newval, [$mem], $incr" %}
10066   ins_encode %{
10067     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10068   %}
10069   ins_pipe(pipe_serial);
10070 %}
10071 
10072 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10073   predicate(n->as_LoadStore()->result_not_used());
10074   match(Set dummy (GetAndAddI mem incr));
10075   ins_cost(INSN_COST * 9);
10076   format %{ "get_and_addI [$mem], $incr" %}
10077   ins_encode %{
10078     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10079   %}
10080   ins_pipe(pipe_serial);
10081 %}
10082 
10083 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10084   match(Set newval (GetAndAddI mem incr));
10085   ins_cost(INSN_COST * 10);
10086   format %{ "get_and_addI $newval, [$mem], $incr" %}
10087   ins_encode %{
10088     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10089   %}
10090   ins_pipe(pipe_serial);
10091 %}
10092 
10093 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10094   predicate(n->as_LoadStore()->result_not_used());
10095   match(Set dummy (GetAndAddI mem incr));
10096   ins_cost(INSN_COST * 9);
10097   format %{ "get_and_addI [$mem], $incr" %}
10098   ins_encode %{
10099     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10100   %}
10101   ins_pipe(pipe_serial);
10102 %}
10103 
10104 // Manifest a CmpL result in an integer register.
10105 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10106 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10107 %{
10108   match(Set dst (CmpL3 src1 src2));
10109   effect(KILL flags);
10110 
10111   ins_cost(INSN_COST * 6);
10112   format %{
10113       "cmp $src1, $src2"
10114       "csetw $dst, ne"
10115       "cnegw $dst, lt"
10116   %}
10117   // format %{ "CmpL3 $dst, $src1, $src2" %}
10118   ins_encode %{
10119     __ cmp($src1$$Register, $src2$$Register);
10120     __ csetw($dst$$Register, Assembler::NE);
10121     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10122   %}
10123 
10124   ins_pipe(pipe_class_default);
10125 %}
10126 
10127 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10128 %{
10129   match(Set dst (CmpL3 src1 src2));
10130   effect(KILL flags);
10131 
10132   ins_cost(INSN_COST * 6);
10133   format %{
10134       "cmp $src1, $src2"
10135       "csetw $dst, ne"
10136       "cnegw $dst, lt"
10137   %}
10138   ins_encode %{
10139     int32_t con = (int32_t)$src2$$constant;
10140      if (con < 0) {
10141       __ adds(zr, $src1$$Register, -con);
10142     } else {
10143       __ subs(zr, $src1$$Register, con);
10144     }
10145     __ csetw($dst$$Register, Assembler::NE);
10146     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10147   %}
10148 
10149   ins_pipe(pipe_class_default);
10150 %}
10151 
10152 // ============================================================================
10153 // Conditional Move Instructions
10154 
10155 // n.b. we have identical rules for both a signed compare op (cmpOp)
10156 // and an unsigned compare op (cmpOpU). it would be nice if we could
10157 // define an op class which merged both inputs and use it to type the
10158 // argument to a single rule. unfortunatelyt his fails because the
10159 // opclass does not live up to the COND_INTER interface of its
10160 // component operands. When the generic code tries to negate the
10161 // operand it ends up running the generci Machoper::negate method
10162 // which throws a ShouldNotHappen. So, we have to provide two flavours
10163 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10164 
10165 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10166   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10167 
10168   ins_cost(INSN_COST * 2);
10169   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10170 
10171   ins_encode %{
10172     __ cselw(as_Register($dst$$reg),
10173              as_Register($src2$$reg),
10174              as_Register($src1$$reg),
10175              (Assembler::Condition)$cmp$$cmpcode);
10176   %}
10177 
10178   ins_pipe(icond_reg_reg);
10179 %}
10180 
10181 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10182   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10183 
10184   ins_cost(INSN_COST * 2);
10185   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10186 
10187   ins_encode %{
10188     __ cselw(as_Register($dst$$reg),
10189              as_Register($src2$$reg),
10190              as_Register($src1$$reg),
10191              (Assembler::Condition)$cmp$$cmpcode);
10192   %}
10193 
10194   ins_pipe(icond_reg_reg);
10195 %}
10196 
10197 // special cases where one arg is zero
10198 
10199 // n.b. this is selected in preference to the rule above because it
10200 // avoids loading constant 0 into a source register
10201 
10202 // TODO
10203 // we ought only to be able to cull one of these variants as the ideal
10204 // transforms ought always to order the zero consistently (to left/right?)
10205 
10206 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10207   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10208 
10209   ins_cost(INSN_COST * 2);
10210   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10211 
10212   ins_encode %{
10213     __ cselw(as_Register($dst$$reg),
10214              as_Register($src$$reg),
10215              zr,
10216              (Assembler::Condition)$cmp$$cmpcode);
10217   %}
10218 
10219   ins_pipe(icond_reg);
10220 %}
10221 
10222 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10223   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10224 
10225   ins_cost(INSN_COST * 2);
10226   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10227 
10228   ins_encode %{
10229     __ cselw(as_Register($dst$$reg),
10230              as_Register($src$$reg),
10231              zr,
10232              (Assembler::Condition)$cmp$$cmpcode);
10233   %}
10234 
10235   ins_pipe(icond_reg);
10236 %}
10237 
10238 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10239   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10240 
10241   ins_cost(INSN_COST * 2);
10242   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10243 
10244   ins_encode %{
10245     __ cselw(as_Register($dst$$reg),
10246              zr,
10247              as_Register($src$$reg),
10248              (Assembler::Condition)$cmp$$cmpcode);
10249   %}
10250 
10251   ins_pipe(icond_reg);
10252 %}
10253 
10254 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10255   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10256 
10257   ins_cost(INSN_COST * 2);
10258   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10259 
10260   ins_encode %{
10261     __ cselw(as_Register($dst$$reg),
10262              zr,
10263              as_Register($src$$reg),
10264              (Assembler::Condition)$cmp$$cmpcode);
10265   %}
10266 
10267   ins_pipe(icond_reg);
10268 %}
10269 
10270 // special case for creating a boolean 0 or 1
10271 
10272 // n.b. this is selected in preference to the rule above because it
10273 // avoids loading constants 0 and 1 into a source register
10274 
10275 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10276   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10277 
10278   ins_cost(INSN_COST * 2);
10279   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10280 
10281   ins_encode %{
10282     // equivalently
10283     // cset(as_Register($dst$$reg),
10284     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10285     __ csincw(as_Register($dst$$reg),
10286              zr,
10287              zr,
10288              (Assembler::Condition)$cmp$$cmpcode);
10289   %}
10290 
10291   ins_pipe(icond_none);
10292 %}
10293 
10294 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10295   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10296 
10297   ins_cost(INSN_COST * 2);
10298   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10299 
10300   ins_encode %{
10301     // equivalently
10302     // cset(as_Register($dst$$reg),
10303     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10304     __ csincw(as_Register($dst$$reg),
10305              zr,
10306              zr,
10307              (Assembler::Condition)$cmp$$cmpcode);
10308   %}
10309 
10310   ins_pipe(icond_none);
10311 %}
10312 
10313 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10314   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10315 
10316   ins_cost(INSN_COST * 2);
10317   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10318 
10319   ins_encode %{
10320     __ csel(as_Register($dst$$reg),
10321             as_Register($src2$$reg),
10322             as_Register($src1$$reg),
10323             (Assembler::Condition)$cmp$$cmpcode);
10324   %}
10325 
10326   ins_pipe(icond_reg_reg);
10327 %}
10328 
10329 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10330   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10331 
10332   ins_cost(INSN_COST * 2);
10333   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10334 
10335   ins_encode %{
10336     __ csel(as_Register($dst$$reg),
10337             as_Register($src2$$reg),
10338             as_Register($src1$$reg),
10339             (Assembler::Condition)$cmp$$cmpcode);
10340   %}
10341 
10342   ins_pipe(icond_reg_reg);
10343 %}
10344 
10345 // special cases where one arg is zero
10346 
10347 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10348   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10349 
10350   ins_cost(INSN_COST * 2);
10351   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10352 
10353   ins_encode %{
10354     __ csel(as_Register($dst$$reg),
10355             zr,
10356             as_Register($src$$reg),
10357             (Assembler::Condition)$cmp$$cmpcode);
10358   %}
10359 
10360   ins_pipe(icond_reg);
10361 %}
10362 
10363 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10364   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10365 
10366   ins_cost(INSN_COST * 2);
10367   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10368 
10369   ins_encode %{
10370     __ csel(as_Register($dst$$reg),
10371             zr,
10372             as_Register($src$$reg),
10373             (Assembler::Condition)$cmp$$cmpcode);
10374   %}
10375 
10376   ins_pipe(icond_reg);
10377 %}
10378 
10379 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10380   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10381 
10382   ins_cost(INSN_COST * 2);
10383   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10384 
10385   ins_encode %{
10386     __ csel(as_Register($dst$$reg),
10387             as_Register($src$$reg),
10388             zr,
10389             (Assembler::Condition)$cmp$$cmpcode);
10390   %}
10391 
10392   ins_pipe(icond_reg);
10393 %}
10394 
10395 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10396   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10397 
10398   ins_cost(INSN_COST * 2);
10399   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10400 
10401   ins_encode %{
10402     __ csel(as_Register($dst$$reg),
10403             as_Register($src$$reg),
10404             zr,
10405             (Assembler::Condition)$cmp$$cmpcode);
10406   %}
10407 
10408   ins_pipe(icond_reg);
10409 %}
10410 
10411 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10412   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10413 
10414   ins_cost(INSN_COST * 2);
10415   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10416 
10417   ins_encode %{
10418     __ csel(as_Register($dst$$reg),
10419             as_Register($src2$$reg),
10420             as_Register($src1$$reg),
10421             (Assembler::Condition)$cmp$$cmpcode);
10422   %}
10423 
10424   ins_pipe(icond_reg_reg);
10425 %}
10426 
10427 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10428   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10429 
10430   ins_cost(INSN_COST * 2);
10431   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10432 
10433   ins_encode %{
10434     __ csel(as_Register($dst$$reg),
10435             as_Register($src2$$reg),
10436             as_Register($src1$$reg),
10437             (Assembler::Condition)$cmp$$cmpcode);
10438   %}
10439 
10440   ins_pipe(icond_reg_reg);
10441 %}
10442 
10443 // special cases where one arg is zero
10444 
10445 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10446   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10447 
10448   ins_cost(INSN_COST * 2);
10449   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10450 
10451   ins_encode %{
10452     __ csel(as_Register($dst$$reg),
10453             zr,
10454             as_Register($src$$reg),
10455             (Assembler::Condition)$cmp$$cmpcode);
10456   %}
10457 
10458   ins_pipe(icond_reg);
10459 %}
10460 
10461 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10462   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10463 
10464   ins_cost(INSN_COST * 2);
10465   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10466 
10467   ins_encode %{
10468     __ csel(as_Register($dst$$reg),
10469             zr,
10470             as_Register($src$$reg),
10471             (Assembler::Condition)$cmp$$cmpcode);
10472   %}
10473 
10474   ins_pipe(icond_reg);
10475 %}
10476 
10477 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10478   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10479 
10480   ins_cost(INSN_COST * 2);
10481   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10482 
10483   ins_encode %{
10484     __ csel(as_Register($dst$$reg),
10485             as_Register($src$$reg),
10486             zr,
10487             (Assembler::Condition)$cmp$$cmpcode);
10488   %}
10489 
10490   ins_pipe(icond_reg);
10491 %}
10492 
10493 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10494   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10495 
10496   ins_cost(INSN_COST * 2);
10497   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10498 
10499   ins_encode %{
10500     __ csel(as_Register($dst$$reg),
10501             as_Register($src$$reg),
10502             zr,
10503             (Assembler::Condition)$cmp$$cmpcode);
10504   %}
10505 
10506   ins_pipe(icond_reg);
10507 %}
10508 
10509 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10510   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10511 
10512   ins_cost(INSN_COST * 2);
10513   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10514 
10515   ins_encode %{
10516     __ cselw(as_Register($dst$$reg),
10517              as_Register($src2$$reg),
10518              as_Register($src1$$reg),
10519              (Assembler::Condition)$cmp$$cmpcode);
10520   %}
10521 
10522   ins_pipe(icond_reg_reg);
10523 %}
10524 
10525 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10526   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10527 
10528   ins_cost(INSN_COST * 2);
10529   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10530 
10531   ins_encode %{
10532     __ cselw(as_Register($dst$$reg),
10533              as_Register($src2$$reg),
10534              as_Register($src1$$reg),
10535              (Assembler::Condition)$cmp$$cmpcode);
10536   %}
10537 
10538   ins_pipe(icond_reg_reg);
10539 %}
10540 
10541 // special cases where one arg is zero
10542 
10543 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10544   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10545 
10546   ins_cost(INSN_COST * 2);
10547   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10548 
10549   ins_encode %{
10550     __ cselw(as_Register($dst$$reg),
10551              zr,
10552              as_Register($src$$reg),
10553              (Assembler::Condition)$cmp$$cmpcode);
10554   %}
10555 
10556   ins_pipe(icond_reg);
10557 %}
10558 
10559 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10560   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10561 
10562   ins_cost(INSN_COST * 2);
10563   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10564 
10565   ins_encode %{
10566     __ cselw(as_Register($dst$$reg),
10567              zr,
10568              as_Register($src$$reg),
10569              (Assembler::Condition)$cmp$$cmpcode);
10570   %}
10571 
10572   ins_pipe(icond_reg);
10573 %}
10574 
10575 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10576   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10577 
10578   ins_cost(INSN_COST * 2);
10579   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10580 
10581   ins_encode %{
10582     __ cselw(as_Register($dst$$reg),
10583              as_Register($src$$reg),
10584              zr,
10585              (Assembler::Condition)$cmp$$cmpcode);
10586   %}
10587 
10588   ins_pipe(icond_reg);
10589 %}
10590 
10591 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10592   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10593 
10594   ins_cost(INSN_COST * 2);
10595   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10596 
10597   ins_encode %{
10598     __ cselw(as_Register($dst$$reg),
10599              as_Register($src$$reg),
10600              zr,
10601              (Assembler::Condition)$cmp$$cmpcode);
10602   %}
10603 
10604   ins_pipe(icond_reg);
10605 %}
10606 
10607 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10608 %{
10609   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10610 
10611   ins_cost(INSN_COST * 3);
10612 
10613   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10614   ins_encode %{
10615     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10616     __ fcsels(as_FloatRegister($dst$$reg),
10617               as_FloatRegister($src2$$reg),
10618               as_FloatRegister($src1$$reg),
10619               cond);
10620   %}
10621 
10622   ins_pipe(fp_cond_reg_reg_s);
10623 %}
10624 
10625 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10626 %{
10627   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10628 
10629   ins_cost(INSN_COST * 3);
10630 
10631   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10632   ins_encode %{
10633     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10634     __ fcsels(as_FloatRegister($dst$$reg),
10635               as_FloatRegister($src2$$reg),
10636               as_FloatRegister($src1$$reg),
10637               cond);
10638   %}
10639 
10640   ins_pipe(fp_cond_reg_reg_s);
10641 %}
10642 
10643 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10644 %{
10645   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10646 
10647   ins_cost(INSN_COST * 3);
10648 
10649   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10650   ins_encode %{
10651     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10652     __ fcseld(as_FloatRegister($dst$$reg),
10653               as_FloatRegister($src2$$reg),
10654               as_FloatRegister($src1$$reg),
10655               cond);
10656   %}
10657 
10658   ins_pipe(fp_cond_reg_reg_d);
10659 %}
10660 
10661 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10662 %{
10663   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10664 
10665   ins_cost(INSN_COST * 3);
10666 
10667   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10668   ins_encode %{
10669     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10670     __ fcseld(as_FloatRegister($dst$$reg),
10671               as_FloatRegister($src2$$reg),
10672               as_FloatRegister($src1$$reg),
10673               cond);
10674   %}
10675 
10676   ins_pipe(fp_cond_reg_reg_d);
10677 %}
10678 
10679 // ============================================================================
10680 // Arithmetic Instructions
10681 //
10682 
10683 // Integer Addition
10684 
10685 // TODO
10686 // these currently employ operations which do not set CR and hence are
10687 // not flagged as killing CR but we would like to isolate the cases
10688 // where we want to set flags from those where we don't. need to work
10689 // out how to do that.
10690 
10691 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10692   match(Set dst (AddI src1 src2));
10693 
10694   ins_cost(INSN_COST);
10695   format %{ "addw  $dst, $src1, $src2" %}
10696 
10697   ins_encode %{
10698     __ addw(as_Register($dst$$reg),
10699             as_Register($src1$$reg),
10700             as_Register($src2$$reg));
10701   %}
10702 
10703   ins_pipe(ialu_reg_reg);
10704 %}
10705 
10706 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10707   match(Set dst (AddI src1 src2));
10708 
10709   ins_cost(INSN_COST);
10710   format %{ "addw $dst, $src1, $src2" %}
10711 
10712   // use opcode to indicate that this is an add not a sub
10713   opcode(0x0);
10714 
10715   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10716 
10717   ins_pipe(ialu_reg_imm);
10718 %}
10719 
10720 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10721   match(Set dst (AddI (ConvL2I src1) src2));
10722 
10723   ins_cost(INSN_COST);
10724   format %{ "addw $dst, $src1, $src2" %}
10725 
10726   // use opcode to indicate that this is an add not a sub
10727   opcode(0x0);
10728 
10729   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10730 
10731   ins_pipe(ialu_reg_imm);
10732 %}
10733 
10734 // Pointer Addition
10735 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10736   match(Set dst (AddP src1 src2));
10737 
10738   ins_cost(INSN_COST);
10739   format %{ "add $dst, $src1, $src2\t# ptr" %}
10740 
10741   ins_encode %{
10742     __ add(as_Register($dst$$reg),
10743            as_Register($src1$$reg),
10744            as_Register($src2$$reg));
10745   %}
10746 
10747   ins_pipe(ialu_reg_reg);
10748 %}
10749 
10750 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10751   match(Set dst (AddP src1 (ConvI2L src2)));
10752 
10753   ins_cost(1.9 * INSN_COST);
10754   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10755 
10756   ins_encode %{
10757     __ add(as_Register($dst$$reg),
10758            as_Register($src1$$reg),
10759            as_Register($src2$$reg), ext::sxtw);
10760   %}
10761 
10762   ins_pipe(ialu_reg_reg);
10763 %}
10764 
10765 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10766   match(Set dst (AddP src1 (LShiftL src2 scale)));
10767 
10768   ins_cost(1.9 * INSN_COST);
10769   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10770 
10771   ins_encode %{
10772     __ lea(as_Register($dst$$reg),
10773            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10774                    Address::lsl($scale$$constant)));
10775   %}
10776 
10777   ins_pipe(ialu_reg_reg_shift);
10778 %}
10779 
10780 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10781   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10782 
10783   ins_cost(1.9 * INSN_COST);
10784   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10785 
10786   ins_encode %{
10787     __ lea(as_Register($dst$$reg),
10788            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10789                    Address::sxtw($scale$$constant)));
10790   %}
10791 
10792   ins_pipe(ialu_reg_reg_shift);
10793 %}
10794 
10795 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10796   match(Set dst (LShiftL (ConvI2L src) scale));
10797 
10798   ins_cost(INSN_COST);
10799   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10800 
10801   ins_encode %{
10802     __ sbfiz(as_Register($dst$$reg),
10803           as_Register($src$$reg),
10804           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10805   %}
10806 
10807   ins_pipe(ialu_reg_shift);
10808 %}
10809 
10810 // Pointer Immediate Addition
10811 // n.b. this needs to be more expensive than using an indirect memory
10812 // operand
10813 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10814   match(Set dst (AddP src1 src2));
10815 
10816   ins_cost(INSN_COST);
10817   format %{ "add $dst, $src1, $src2\t# ptr" %}
10818 
10819   // use opcode to indicate that this is an add not a sub
10820   opcode(0x0);
10821 
10822   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10823 
10824   ins_pipe(ialu_reg_imm);
10825 %}
10826 
10827 // Long Addition
10828 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10829 
10830   match(Set dst (AddL src1 src2));
10831 
10832   ins_cost(INSN_COST);
10833   format %{ "add  $dst, $src1, $src2" %}
10834 
10835   ins_encode %{
10836     __ add(as_Register($dst$$reg),
10837            as_Register($src1$$reg),
10838            as_Register($src2$$reg));
10839   %}
10840 
10841   ins_pipe(ialu_reg_reg);
10842 %}
10843 
10844 // No constant pool entries requiredLong Immediate Addition.
10845 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10846   match(Set dst (AddL src1 src2));
10847 
10848   ins_cost(INSN_COST);
10849   format %{ "add $dst, $src1, $src2" %}
10850 
10851   // use opcode to indicate that this is an add not a sub
10852   opcode(0x0);
10853 
10854   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10855 
10856   ins_pipe(ialu_reg_imm);
10857 %}
10858 
10859 // Integer Subtraction
10860 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10861   match(Set dst (SubI src1 src2));
10862 
10863   ins_cost(INSN_COST);
10864   format %{ "subw  $dst, $src1, $src2" %}
10865 
10866   ins_encode %{
10867     __ subw(as_Register($dst$$reg),
10868             as_Register($src1$$reg),
10869             as_Register($src2$$reg));
10870   %}
10871 
10872   ins_pipe(ialu_reg_reg);
10873 %}
10874 
10875 // Immediate Subtraction
10876 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10877   match(Set dst (SubI src1 src2));
10878 
10879   ins_cost(INSN_COST);
10880   format %{ "subw $dst, $src1, $src2" %}
10881 
10882   // use opcode to indicate that this is a sub not an add
10883   opcode(0x1);
10884 
10885   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10886 
10887   ins_pipe(ialu_reg_imm);
10888 %}
10889 
10890 // Long Subtraction
10891 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10892 
10893   match(Set dst (SubL src1 src2));
10894 
10895   ins_cost(INSN_COST);
10896   format %{ "sub  $dst, $src1, $src2" %}
10897 
10898   ins_encode %{
10899     __ sub(as_Register($dst$$reg),
10900            as_Register($src1$$reg),
10901            as_Register($src2$$reg));
10902   %}
10903 
10904   ins_pipe(ialu_reg_reg);
10905 %}
10906 
10907 // No constant pool entries requiredLong Immediate Subtraction.
10908 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10909   match(Set dst (SubL src1 src2));
10910 
10911   ins_cost(INSN_COST);
10912   format %{ "sub$dst, $src1, $src2" %}
10913 
10914   // use opcode to indicate that this is a sub not an add
10915   opcode(0x1);
10916 
10917   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10918 
10919   ins_pipe(ialu_reg_imm);
10920 %}
10921 
10922 // Integer Negation (special case for sub)
10923 
10924 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10925   match(Set dst (SubI zero src));
10926 
10927   ins_cost(INSN_COST);
10928   format %{ "negw $dst, $src\t# int" %}
10929 
10930   ins_encode %{
10931     __ negw(as_Register($dst$$reg),
10932             as_Register($src$$reg));
10933   %}
10934 
10935   ins_pipe(ialu_reg);
10936 %}
10937 
10938 // Long Negation
10939 
10940 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10941   match(Set dst (SubL zero src));
10942 
10943   ins_cost(INSN_COST);
10944   format %{ "neg $dst, $src\t# long" %}
10945 
10946   ins_encode %{
10947     __ neg(as_Register($dst$$reg),
10948            as_Register($src$$reg));
10949   %}
10950 
10951   ins_pipe(ialu_reg);
10952 %}
10953 
10954 // Integer Multiply
10955 
10956 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10957   match(Set dst (MulI src1 src2));
10958 
10959   ins_cost(INSN_COST * 3);
10960   format %{ "mulw  $dst, $src1, $src2" %}
10961 
10962   ins_encode %{
10963     __ mulw(as_Register($dst$$reg),
10964             as_Register($src1$$reg),
10965             as_Register($src2$$reg));
10966   %}
10967 
10968   ins_pipe(imul_reg_reg);
10969 %}
10970 
10971 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10972   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10973 
10974   ins_cost(INSN_COST * 3);
10975   format %{ "smull  $dst, $src1, $src2" %}
10976 
10977   ins_encode %{
10978     __ smull(as_Register($dst$$reg),
10979              as_Register($src1$$reg),
10980              as_Register($src2$$reg));
10981   %}
10982 
10983   ins_pipe(imul_reg_reg);
10984 %}
10985 
10986 // Long Multiply
10987 
10988 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10989   match(Set dst (MulL src1 src2));
10990 
10991   ins_cost(INSN_COST * 5);
10992   format %{ "mul  $dst, $src1, $src2" %}
10993 
10994   ins_encode %{
10995     __ mul(as_Register($dst$$reg),
10996            as_Register($src1$$reg),
10997            as_Register($src2$$reg));
10998   %}
10999 
11000   ins_pipe(lmul_reg_reg);
11001 %}
11002 
11003 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11004 %{
11005   match(Set dst (MulHiL src1 src2));
11006 
11007   ins_cost(INSN_COST * 7);
11008   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11009 
11010   ins_encode %{
11011     __ smulh(as_Register($dst$$reg),
11012              as_Register($src1$$reg),
11013              as_Register($src2$$reg));
11014   %}
11015 
11016   ins_pipe(lmul_reg_reg);
11017 %}
11018 
11019 // Combined Integer Multiply & Add/Sub
11020 
11021 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11022   match(Set dst (AddI src3 (MulI src1 src2)));
11023 
11024   ins_cost(INSN_COST * 3);
11025   format %{ "madd  $dst, $src1, $src2, $src3" %}
11026 
11027   ins_encode %{
11028     __ maddw(as_Register($dst$$reg),
11029              as_Register($src1$$reg),
11030              as_Register($src2$$reg),
11031              as_Register($src3$$reg));
11032   %}
11033 
11034   ins_pipe(imac_reg_reg);
11035 %}
11036 
11037 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11038   match(Set dst (SubI src3 (MulI src1 src2)));
11039 
11040   ins_cost(INSN_COST * 3);
11041   format %{ "msub  $dst, $src1, $src2, $src3" %}
11042 
11043   ins_encode %{
11044     __ msubw(as_Register($dst$$reg),
11045              as_Register($src1$$reg),
11046              as_Register($src2$$reg),
11047              as_Register($src3$$reg));
11048   %}
11049 
11050   ins_pipe(imac_reg_reg);
11051 %}
11052 
11053 // Combined Long Multiply & Add/Sub
11054 
11055 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11056   match(Set dst (AddL src3 (MulL src1 src2)));
11057 
11058   ins_cost(INSN_COST * 5);
11059   format %{ "madd  $dst, $src1, $src2, $src3" %}
11060 
11061   ins_encode %{
11062     __ madd(as_Register($dst$$reg),
11063             as_Register($src1$$reg),
11064             as_Register($src2$$reg),
11065             as_Register($src3$$reg));
11066   %}
11067 
11068   ins_pipe(lmac_reg_reg);
11069 %}
11070 
11071 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11072   match(Set dst (SubL src3 (MulL src1 src2)));
11073 
11074   ins_cost(INSN_COST * 5);
11075   format %{ "msub  $dst, $src1, $src2, $src3" %}
11076 
11077   ins_encode %{
11078     __ msub(as_Register($dst$$reg),
11079             as_Register($src1$$reg),
11080             as_Register($src2$$reg),
11081             as_Register($src3$$reg));
11082   %}
11083 
11084   ins_pipe(lmac_reg_reg);
11085 %}
11086 
11087 // Integer Divide
11088 
11089 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11090   match(Set dst (DivI src1 src2));
11091 
11092   ins_cost(INSN_COST * 19);
11093   format %{ "sdivw  $dst, $src1, $src2" %}
11094 
11095   ins_encode(aarch64_enc_divw(dst, src1, src2));
11096   ins_pipe(idiv_reg_reg);
11097 %}
11098 
11099 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11100   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11101   ins_cost(INSN_COST);
11102   format %{ "lsrw $dst, $src1, $div1" %}
11103   ins_encode %{
11104     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11105   %}
11106   ins_pipe(ialu_reg_shift);
11107 %}
11108 
11109 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11110   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11111   ins_cost(INSN_COST);
11112   format %{ "addw $dst, $src, LSR $div1" %}
11113 
11114   ins_encode %{
11115     __ addw(as_Register($dst$$reg),
11116               as_Register($src$$reg),
11117               as_Register($src$$reg),
11118               Assembler::LSR, 31);
11119   %}
11120   ins_pipe(ialu_reg);
11121 %}
11122 
11123 // Long Divide
11124 
11125 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11126   match(Set dst (DivL src1 src2));
11127 
11128   ins_cost(INSN_COST * 35);
11129   format %{ "sdiv   $dst, $src1, $src2" %}
11130 
11131   ins_encode(aarch64_enc_div(dst, src1, src2));
11132   ins_pipe(ldiv_reg_reg);
11133 %}
11134 
11135 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11136   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11137   ins_cost(INSN_COST);
11138   format %{ "lsr $dst, $src1, $div1" %}
11139   ins_encode %{
11140     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11141   %}
11142   ins_pipe(ialu_reg_shift);
11143 %}
11144 
11145 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11146   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11147   ins_cost(INSN_COST);
11148   format %{ "add $dst, $src, $div1" %}
11149 
11150   ins_encode %{
11151     __ add(as_Register($dst$$reg),
11152               as_Register($src$$reg),
11153               as_Register($src$$reg),
11154               Assembler::LSR, 63);
11155   %}
11156   ins_pipe(ialu_reg);
11157 %}
11158 
11159 // Integer Remainder
11160 
11161 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11162   match(Set dst (ModI src1 src2));
11163 
11164   ins_cost(INSN_COST * 22);
11165   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11166             "msubw($dst, rscratch1, $src2, $src1" %}
11167 
11168   ins_encode(aarch64_enc_modw(dst, src1, src2));
11169   ins_pipe(idiv_reg_reg);
11170 %}
11171 
11172 // Long Remainder
11173 
11174 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11175   match(Set dst (ModL src1 src2));
11176 
11177   ins_cost(INSN_COST * 38);
11178   format %{ "sdiv   rscratch1, $src1, $src2\n"
11179             "msub($dst, rscratch1, $src2, $src1" %}
11180 
11181   ins_encode(aarch64_enc_mod(dst, src1, src2));
11182   ins_pipe(ldiv_reg_reg);
11183 %}
11184 
11185 // Integer Shifts
11186 
11187 // Shift Left Register
11188 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11189   match(Set dst (LShiftI src1 src2));
11190 
11191   ins_cost(INSN_COST * 2);
11192   format %{ "lslvw  $dst, $src1, $src2" %}
11193 
11194   ins_encode %{
11195     __ lslvw(as_Register($dst$$reg),
11196              as_Register($src1$$reg),
11197              as_Register($src2$$reg));
11198   %}
11199 
11200   ins_pipe(ialu_reg_reg_vshift);
11201 %}
11202 
11203 // Shift Left Immediate
11204 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11205   match(Set dst (LShiftI src1 src2));
11206 
11207   ins_cost(INSN_COST);
11208   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11209 
11210   ins_encode %{
11211     __ lslw(as_Register($dst$$reg),
11212             as_Register($src1$$reg),
11213             $src2$$constant & 0x1f);
11214   %}
11215 
11216   ins_pipe(ialu_reg_shift);
11217 %}
11218 
11219 // Shift Right Logical Register
11220 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11221   match(Set dst (URShiftI src1 src2));
11222 
11223   ins_cost(INSN_COST * 2);
11224   format %{ "lsrvw  $dst, $src1, $src2" %}
11225 
11226   ins_encode %{
11227     __ lsrvw(as_Register($dst$$reg),
11228              as_Register($src1$$reg),
11229              as_Register($src2$$reg));
11230   %}
11231 
11232   ins_pipe(ialu_reg_reg_vshift);
11233 %}
11234 
11235 // Shift Right Logical Immediate
11236 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11237   match(Set dst (URShiftI src1 src2));
11238 
11239   ins_cost(INSN_COST);
11240   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11241 
11242   ins_encode %{
11243     __ lsrw(as_Register($dst$$reg),
11244             as_Register($src1$$reg),
11245             $src2$$constant & 0x1f);
11246   %}
11247 
11248   ins_pipe(ialu_reg_shift);
11249 %}
11250 
11251 // Shift Right Arithmetic Register
11252 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11253   match(Set dst (RShiftI src1 src2));
11254 
11255   ins_cost(INSN_COST * 2);
11256   format %{ "asrvw  $dst, $src1, $src2" %}
11257 
11258   ins_encode %{
11259     __ asrvw(as_Register($dst$$reg),
11260              as_Register($src1$$reg),
11261              as_Register($src2$$reg));
11262   %}
11263 
11264   ins_pipe(ialu_reg_reg_vshift);
11265 %}
11266 
11267 // Shift Right Arithmetic Immediate
11268 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11269   match(Set dst (RShiftI src1 src2));
11270 
11271   ins_cost(INSN_COST);
11272   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11273 
11274   ins_encode %{
11275     __ asrw(as_Register($dst$$reg),
11276             as_Register($src1$$reg),
11277             $src2$$constant & 0x1f);
11278   %}
11279 
11280   ins_pipe(ialu_reg_shift);
11281 %}
11282 
11283 // Combined Int Mask and Right Shift (using UBFM)
11284 // TODO
11285 
11286 // Long Shifts
11287 
11288 // Shift Left Register
11289 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11290   match(Set dst (LShiftL src1 src2));
11291 
11292   ins_cost(INSN_COST * 2);
11293   format %{ "lslv  $dst, $src1, $src2" %}
11294 
11295   ins_encode %{
11296     __ lslv(as_Register($dst$$reg),
11297             as_Register($src1$$reg),
11298             as_Register($src2$$reg));
11299   %}
11300 
11301   ins_pipe(ialu_reg_reg_vshift);
11302 %}
11303 
11304 // Shift Left Immediate
11305 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11306   match(Set dst (LShiftL src1 src2));
11307 
11308   ins_cost(INSN_COST);
11309   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11310 
11311   ins_encode %{
11312     __ lsl(as_Register($dst$$reg),
11313             as_Register($src1$$reg),
11314             $src2$$constant & 0x3f);
11315   %}
11316 
11317   ins_pipe(ialu_reg_shift);
11318 %}
11319 
11320 // Shift Right Logical Register
11321 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11322   match(Set dst (URShiftL src1 src2));
11323 
11324   ins_cost(INSN_COST * 2);
11325   format %{ "lsrv  $dst, $src1, $src2" %}
11326 
11327   ins_encode %{
11328     __ lsrv(as_Register($dst$$reg),
11329             as_Register($src1$$reg),
11330             as_Register($src2$$reg));
11331   %}
11332 
11333   ins_pipe(ialu_reg_reg_vshift);
11334 %}
11335 
11336 // Shift Right Logical Immediate
11337 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11338   match(Set dst (URShiftL src1 src2));
11339 
11340   ins_cost(INSN_COST);
11341   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11342 
11343   ins_encode %{
11344     __ lsr(as_Register($dst$$reg),
11345            as_Register($src1$$reg),
11346            $src2$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_shift);
11350 %}
11351 
11352 // A special-case pattern for card table stores.
11353 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11354   match(Set dst (URShiftL (CastP2X src1) src2));
11355 
11356   ins_cost(INSN_COST);
11357   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11358 
11359   ins_encode %{
11360     __ lsr(as_Register($dst$$reg),
11361            as_Register($src1$$reg),
11362            $src2$$constant & 0x3f);
11363   %}
11364 
11365   ins_pipe(ialu_reg_shift);
11366 %}
11367 
11368 // Shift Right Arithmetic Register
11369 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11370   match(Set dst (RShiftL src1 src2));
11371 
11372   ins_cost(INSN_COST * 2);
11373   format %{ "asrv  $dst, $src1, $src2" %}
11374 
11375   ins_encode %{
11376     __ asrv(as_Register($dst$$reg),
11377             as_Register($src1$$reg),
11378             as_Register($src2$$reg));
11379   %}
11380 
11381   ins_pipe(ialu_reg_reg_vshift);
11382 %}
11383 
11384 // Shift Right Arithmetic Immediate
11385 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11386   match(Set dst (RShiftL src1 src2));
11387 
11388   ins_cost(INSN_COST);
11389   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11390 
11391   ins_encode %{
11392     __ asr(as_Register($dst$$reg),
11393            as_Register($src1$$reg),
11394            $src2$$constant & 0x3f);
11395   %}
11396 
11397   ins_pipe(ialu_reg_shift);
11398 %}
11399 
11400 // BEGIN This section of the file is automatically generated. Do not edit --------------
11401 
11402 instruct regL_not_reg(iRegLNoSp dst,
11403                          iRegL src1, immL_M1 m1,
11404                          rFlagsReg cr) %{
11405   match(Set dst (XorL src1 m1));
11406   ins_cost(INSN_COST);
11407   format %{ "eon  $dst, $src1, zr" %}
11408 
11409   ins_encode %{
11410     __ eon(as_Register($dst$$reg),
11411               as_Register($src1$$reg),
11412               zr,
11413               Assembler::LSL, 0);
11414   %}
11415 
11416   ins_pipe(ialu_reg);
11417 %}
11418 instruct regI_not_reg(iRegINoSp dst,
11419                          iRegIorL2I src1, immI_M1 m1,
11420                          rFlagsReg cr) %{
11421   match(Set dst (XorI src1 m1));
11422   ins_cost(INSN_COST);
11423   format %{ "eonw  $dst, $src1, zr" %}
11424 
11425   ins_encode %{
11426     __ eonw(as_Register($dst$$reg),
11427               as_Register($src1$$reg),
11428               zr,
11429               Assembler::LSL, 0);
11430   %}
11431 
11432   ins_pipe(ialu_reg);
11433 %}
11434 
11435 instruct AndI_reg_not_reg(iRegINoSp dst,
11436                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11437                          rFlagsReg cr) %{
11438   match(Set dst (AndI src1 (XorI src2 m1)));
11439   ins_cost(INSN_COST);
11440   format %{ "bicw  $dst, $src1, $src2" %}
11441 
11442   ins_encode %{
11443     __ bicw(as_Register($dst$$reg),
11444               as_Register($src1$$reg),
11445               as_Register($src2$$reg),
11446               Assembler::LSL, 0);
11447   %}
11448 
11449   ins_pipe(ialu_reg_reg);
11450 %}
11451 
11452 instruct AndL_reg_not_reg(iRegLNoSp dst,
11453                          iRegL src1, iRegL src2, immL_M1 m1,
11454                          rFlagsReg cr) %{
11455   match(Set dst (AndL src1 (XorL src2 m1)));
11456   ins_cost(INSN_COST);
11457   format %{ "bic  $dst, $src1, $src2" %}
11458 
11459   ins_encode %{
11460     __ bic(as_Register($dst$$reg),
11461               as_Register($src1$$reg),
11462               as_Register($src2$$reg),
11463               Assembler::LSL, 0);
11464   %}
11465 
11466   ins_pipe(ialu_reg_reg);
11467 %}
11468 
11469 instruct OrI_reg_not_reg(iRegINoSp dst,
11470                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11471                          rFlagsReg cr) %{
11472   match(Set dst (OrI src1 (XorI src2 m1)));
11473   ins_cost(INSN_COST);
11474   format %{ "ornw  $dst, $src1, $src2" %}
11475 
11476   ins_encode %{
11477     __ ornw(as_Register($dst$$reg),
11478               as_Register($src1$$reg),
11479               as_Register($src2$$reg),
11480               Assembler::LSL, 0);
11481   %}
11482 
11483   ins_pipe(ialu_reg_reg);
11484 %}
11485 
11486 instruct OrL_reg_not_reg(iRegLNoSp dst,
11487                          iRegL src1, iRegL src2, immL_M1 m1,
11488                          rFlagsReg cr) %{
11489   match(Set dst (OrL src1 (XorL src2 m1)));
11490   ins_cost(INSN_COST);
11491   format %{ "orn  $dst, $src1, $src2" %}
11492 
11493   ins_encode %{
11494     __ orn(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::LSL, 0);
11498   %}
11499 
11500   ins_pipe(ialu_reg_reg);
11501 %}
11502 
11503 instruct XorI_reg_not_reg(iRegINoSp dst,
11504                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11505                          rFlagsReg cr) %{
11506   match(Set dst (XorI m1 (XorI src2 src1)));
11507   ins_cost(INSN_COST);
11508   format %{ "eonw  $dst, $src1, $src2" %}
11509 
11510   ins_encode %{
11511     __ eonw(as_Register($dst$$reg),
11512               as_Register($src1$$reg),
11513               as_Register($src2$$reg),
11514               Assembler::LSL, 0);
11515   %}
11516 
11517   ins_pipe(ialu_reg_reg);
11518 %}
11519 
11520 instruct XorL_reg_not_reg(iRegLNoSp dst,
11521                          iRegL src1, iRegL src2, immL_M1 m1,
11522                          rFlagsReg cr) %{
11523   match(Set dst (XorL m1 (XorL src2 src1)));
11524   ins_cost(INSN_COST);
11525   format %{ "eon  $dst, $src1, $src2" %}
11526 
11527   ins_encode %{
11528     __ eon(as_Register($dst$$reg),
11529               as_Register($src1$$reg),
11530               as_Register($src2$$reg),
11531               Assembler::LSL, 0);
11532   %}
11533 
11534   ins_pipe(ialu_reg_reg);
11535 %}
11536 
11537 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11538                          iRegIorL2I src1, iRegIorL2I src2,
11539                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11540   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11541   ins_cost(1.9 * INSN_COST);
11542   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11543 
11544   ins_encode %{
11545     __ bicw(as_Register($dst$$reg),
11546               as_Register($src1$$reg),
11547               as_Register($src2$$reg),
11548               Assembler::LSR,
11549               $src3$$constant & 0x1f);
11550   %}
11551 
11552   ins_pipe(ialu_reg_reg_shift);
11553 %}
11554 
11555 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11556                          iRegL src1, iRegL src2,
11557                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11558   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11559   ins_cost(1.9 * INSN_COST);
11560   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11561 
11562   ins_encode %{
11563     __ bic(as_Register($dst$$reg),
11564               as_Register($src1$$reg),
11565               as_Register($src2$$reg),
11566               Assembler::LSR,
11567               $src3$$constant & 0x3f);
11568   %}
11569 
11570   ins_pipe(ialu_reg_reg_shift);
11571 %}
11572 
11573 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11574                          iRegIorL2I src1, iRegIorL2I src2,
11575                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11576   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11577   ins_cost(1.9 * INSN_COST);
11578   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11579 
11580   ins_encode %{
11581     __ bicw(as_Register($dst$$reg),
11582               as_Register($src1$$reg),
11583               as_Register($src2$$reg),
11584               Assembler::ASR,
11585               $src3$$constant & 0x1f);
11586   %}
11587 
11588   ins_pipe(ialu_reg_reg_shift);
11589 %}
11590 
11591 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11592                          iRegL src1, iRegL src2,
11593                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11594   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11595   ins_cost(1.9 * INSN_COST);
11596   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11597 
11598   ins_encode %{
11599     __ bic(as_Register($dst$$reg),
11600               as_Register($src1$$reg),
11601               as_Register($src2$$reg),
11602               Assembler::ASR,
11603               $src3$$constant & 0x3f);
11604   %}
11605 
11606   ins_pipe(ialu_reg_reg_shift);
11607 %}
11608 
11609 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11610                          iRegIorL2I src1, iRegIorL2I src2,
11611                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11612   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11613   ins_cost(1.9 * INSN_COST);
11614   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11615 
11616   ins_encode %{
11617     __ bicw(as_Register($dst$$reg),
11618               as_Register($src1$$reg),
11619               as_Register($src2$$reg),
11620               Assembler::LSL,
11621               $src3$$constant & 0x1f);
11622   %}
11623 
11624   ins_pipe(ialu_reg_reg_shift);
11625 %}
11626 
11627 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11628                          iRegL src1, iRegL src2,
11629                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11630   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11631   ins_cost(1.9 * INSN_COST);
11632   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11633 
11634   ins_encode %{
11635     __ bic(as_Register($dst$$reg),
11636               as_Register($src1$$reg),
11637               as_Register($src2$$reg),
11638               Assembler::LSL,
11639               $src3$$constant & 0x3f);
11640   %}
11641 
11642   ins_pipe(ialu_reg_reg_shift);
11643 %}
11644 
11645 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11646                          iRegIorL2I src1, iRegIorL2I src2,
11647                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11648   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11649   ins_cost(1.9 * INSN_COST);
11650   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11651 
11652   ins_encode %{
11653     __ eonw(as_Register($dst$$reg),
11654               as_Register($src1$$reg),
11655               as_Register($src2$$reg),
11656               Assembler::LSR,
11657               $src3$$constant & 0x1f);
11658   %}
11659 
11660   ins_pipe(ialu_reg_reg_shift);
11661 %}
11662 
11663 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11664                          iRegL src1, iRegL src2,
11665                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11666   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11667   ins_cost(1.9 * INSN_COST);
11668   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11669 
11670   ins_encode %{
11671     __ eon(as_Register($dst$$reg),
11672               as_Register($src1$$reg),
11673               as_Register($src2$$reg),
11674               Assembler::LSR,
11675               $src3$$constant & 0x3f);
11676   %}
11677 
11678   ins_pipe(ialu_reg_reg_shift);
11679 %}
11680 
11681 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11682                          iRegIorL2I src1, iRegIorL2I src2,
11683                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11684   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11685   ins_cost(1.9 * INSN_COST);
11686   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11687 
11688   ins_encode %{
11689     __ eonw(as_Register($dst$$reg),
11690               as_Register($src1$$reg),
11691               as_Register($src2$$reg),
11692               Assembler::ASR,
11693               $src3$$constant & 0x1f);
11694   %}
11695 
11696   ins_pipe(ialu_reg_reg_shift);
11697 %}
11698 
11699 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11700                          iRegL src1, iRegL src2,
11701                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11702   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11703   ins_cost(1.9 * INSN_COST);
11704   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11705 
11706   ins_encode %{
11707     __ eon(as_Register($dst$$reg),
11708               as_Register($src1$$reg),
11709               as_Register($src2$$reg),
11710               Assembler::ASR,
11711               $src3$$constant & 0x3f);
11712   %}
11713 
11714   ins_pipe(ialu_reg_reg_shift);
11715 %}
11716 
11717 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11718                          iRegIorL2I src1, iRegIorL2I src2,
11719                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11720   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11721   ins_cost(1.9 * INSN_COST);
11722   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11723 
11724   ins_encode %{
11725     __ eonw(as_Register($dst$$reg),
11726               as_Register($src1$$reg),
11727               as_Register($src2$$reg),
11728               Assembler::LSL,
11729               $src3$$constant & 0x1f);
11730   %}
11731 
11732   ins_pipe(ialu_reg_reg_shift);
11733 %}
11734 
11735 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11736                          iRegL src1, iRegL src2,
11737                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11738   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11739   ins_cost(1.9 * INSN_COST);
11740   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11741 
11742   ins_encode %{
11743     __ eon(as_Register($dst$$reg),
11744               as_Register($src1$$reg),
11745               as_Register($src2$$reg),
11746               Assembler::LSL,
11747               $src3$$constant & 0x3f);
11748   %}
11749 
11750   ins_pipe(ialu_reg_reg_shift);
11751 %}
11752 
11753 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11754                          iRegIorL2I src1, iRegIorL2I src2,
11755                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11756   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11757   ins_cost(1.9 * INSN_COST);
11758   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11759 
11760   ins_encode %{
11761     __ ornw(as_Register($dst$$reg),
11762               as_Register($src1$$reg),
11763               as_Register($src2$$reg),
11764               Assembler::LSR,
11765               $src3$$constant & 0x1f);
11766   %}
11767 
11768   ins_pipe(ialu_reg_reg_shift);
11769 %}
11770 
11771 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11772                          iRegL src1, iRegL src2,
11773                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11774   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11777 
11778   ins_encode %{
11779     __ orn(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSR,
11783               $src3$$constant & 0x3f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11790                          iRegIorL2I src1, iRegIorL2I src2,
11791                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11792   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11793   ins_cost(1.9 * INSN_COST);
11794   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11795 
11796   ins_encode %{
11797     __ ornw(as_Register($dst$$reg),
11798               as_Register($src1$$reg),
11799               as_Register($src2$$reg),
11800               Assembler::ASR,
11801               $src3$$constant & 0x1f);
11802   %}
11803 
11804   ins_pipe(ialu_reg_reg_shift);
11805 %}
11806 
11807 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11808                          iRegL src1, iRegL src2,
11809                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11810   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11811   ins_cost(1.9 * INSN_COST);
11812   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11813 
11814   ins_encode %{
11815     __ orn(as_Register($dst$$reg),
11816               as_Register($src1$$reg),
11817               as_Register($src2$$reg),
11818               Assembler::ASR,
11819               $src3$$constant & 0x3f);
11820   %}
11821 
11822   ins_pipe(ialu_reg_reg_shift);
11823 %}
11824 
11825 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11826                          iRegIorL2I src1, iRegIorL2I src2,
11827                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11828   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11829   ins_cost(1.9 * INSN_COST);
11830   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11831 
11832   ins_encode %{
11833     __ ornw(as_Register($dst$$reg),
11834               as_Register($src1$$reg),
11835               as_Register($src2$$reg),
11836               Assembler::LSL,
11837               $src3$$constant & 0x1f);
11838   %}
11839 
11840   ins_pipe(ialu_reg_reg_shift);
11841 %}
11842 
11843 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11844                          iRegL src1, iRegL src2,
11845                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11846   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11847   ins_cost(1.9 * INSN_COST);
11848   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11849 
11850   ins_encode %{
11851     __ orn(as_Register($dst$$reg),
11852               as_Register($src1$$reg),
11853               as_Register($src2$$reg),
11854               Assembler::LSL,
11855               $src3$$constant & 0x3f);
11856   %}
11857 
11858   ins_pipe(ialu_reg_reg_shift);
11859 %}
11860 
11861 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11862                          iRegIorL2I src1, iRegIorL2I src2,
11863                          immI src3, rFlagsReg cr) %{
11864   match(Set dst (AndI src1 (URShiftI src2 src3)));
11865 
11866   ins_cost(1.9 * INSN_COST);
11867   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11868 
11869   ins_encode %{
11870     __ andw(as_Register($dst$$reg),
11871               as_Register($src1$$reg),
11872               as_Register($src2$$reg),
11873               Assembler::LSR,
11874               $src3$$constant & 0x1f);
11875   %}
11876 
11877   ins_pipe(ialu_reg_reg_shift);
11878 %}
11879 
11880 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11881                          iRegL src1, iRegL src2,
11882                          immI src3, rFlagsReg cr) %{
11883   match(Set dst (AndL src1 (URShiftL src2 src3)));
11884 
11885   ins_cost(1.9 * INSN_COST);
11886   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11887 
11888   ins_encode %{
11889     __ andr(as_Register($dst$$reg),
11890               as_Register($src1$$reg),
11891               as_Register($src2$$reg),
11892               Assembler::LSR,
11893               $src3$$constant & 0x3f);
11894   %}
11895 
11896   ins_pipe(ialu_reg_reg_shift);
11897 %}
11898 
11899 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11900                          iRegIorL2I src1, iRegIorL2I src2,
11901                          immI src3, rFlagsReg cr) %{
11902   match(Set dst (AndI src1 (RShiftI src2 src3)));
11903 
11904   ins_cost(1.9 * INSN_COST);
11905   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11906 
11907   ins_encode %{
11908     __ andw(as_Register($dst$$reg),
11909               as_Register($src1$$reg),
11910               as_Register($src2$$reg),
11911               Assembler::ASR,
11912               $src3$$constant & 0x1f);
11913   %}
11914 
11915   ins_pipe(ialu_reg_reg_shift);
11916 %}
11917 
11918 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11919                          iRegL src1, iRegL src2,
11920                          immI src3, rFlagsReg cr) %{
11921   match(Set dst (AndL src1 (RShiftL src2 src3)));
11922 
11923   ins_cost(1.9 * INSN_COST);
11924   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11925 
11926   ins_encode %{
11927     __ andr(as_Register($dst$$reg),
11928               as_Register($src1$$reg),
11929               as_Register($src2$$reg),
11930               Assembler::ASR,
11931               $src3$$constant & 0x3f);
11932   %}
11933 
11934   ins_pipe(ialu_reg_reg_shift);
11935 %}
11936 
11937 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11938                          iRegIorL2I src1, iRegIorL2I src2,
11939                          immI src3, rFlagsReg cr) %{
11940   match(Set dst (AndI src1 (LShiftI src2 src3)));
11941 
11942   ins_cost(1.9 * INSN_COST);
11943   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11944 
11945   ins_encode %{
11946     __ andw(as_Register($dst$$reg),
11947               as_Register($src1$$reg),
11948               as_Register($src2$$reg),
11949               Assembler::LSL,
11950               $src3$$constant & 0x1f);
11951   %}
11952 
11953   ins_pipe(ialu_reg_reg_shift);
11954 %}
11955 
11956 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11957                          iRegL src1, iRegL src2,
11958                          immI src3, rFlagsReg cr) %{
11959   match(Set dst (AndL src1 (LShiftL src2 src3)));
11960 
11961   ins_cost(1.9 * INSN_COST);
11962   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11963 
11964   ins_encode %{
11965     __ andr(as_Register($dst$$reg),
11966               as_Register($src1$$reg),
11967               as_Register($src2$$reg),
11968               Assembler::LSL,
11969               $src3$$constant & 0x3f);
11970   %}
11971 
11972   ins_pipe(ialu_reg_reg_shift);
11973 %}
11974 
11975 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11976                          iRegIorL2I src1, iRegIorL2I src2,
11977                          immI src3, rFlagsReg cr) %{
11978   match(Set dst (XorI src1 (URShiftI src2 src3)));
11979 
11980   ins_cost(1.9 * INSN_COST);
11981   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11982 
11983   ins_encode %{
11984     __ eorw(as_Register($dst$$reg),
11985               as_Register($src1$$reg),
11986               as_Register($src2$$reg),
11987               Assembler::LSR,
11988               $src3$$constant & 0x1f);
11989   %}
11990 
11991   ins_pipe(ialu_reg_reg_shift);
11992 %}
11993 
11994 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11995                          iRegL src1, iRegL src2,
11996                          immI src3, rFlagsReg cr) %{
11997   match(Set dst (XorL src1 (URShiftL src2 src3)));
11998 
11999   ins_cost(1.9 * INSN_COST);
12000   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12001 
12002   ins_encode %{
12003     __ eor(as_Register($dst$$reg),
12004               as_Register($src1$$reg),
12005               as_Register($src2$$reg),
12006               Assembler::LSR,
12007               $src3$$constant & 0x3f);
12008   %}
12009 
12010   ins_pipe(ialu_reg_reg_shift);
12011 %}
12012 
12013 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12014                          iRegIorL2I src1, iRegIorL2I src2,
12015                          immI src3, rFlagsReg cr) %{
12016   match(Set dst (XorI src1 (RShiftI src2 src3)));
12017 
12018   ins_cost(1.9 * INSN_COST);
12019   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12020 
12021   ins_encode %{
12022     __ eorw(as_Register($dst$$reg),
12023               as_Register($src1$$reg),
12024               as_Register($src2$$reg),
12025               Assembler::ASR,
12026               $src3$$constant & 0x1f);
12027   %}
12028 
12029   ins_pipe(ialu_reg_reg_shift);
12030 %}
12031 
12032 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12033                          iRegL src1, iRegL src2,
12034                          immI src3, rFlagsReg cr) %{
12035   match(Set dst (XorL src1 (RShiftL src2 src3)));
12036 
12037   ins_cost(1.9 * INSN_COST);
12038   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12039 
12040   ins_encode %{
12041     __ eor(as_Register($dst$$reg),
12042               as_Register($src1$$reg),
12043               as_Register($src2$$reg),
12044               Assembler::ASR,
12045               $src3$$constant & 0x3f);
12046   %}
12047 
12048   ins_pipe(ialu_reg_reg_shift);
12049 %}
12050 
12051 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12052                          iRegIorL2I src1, iRegIorL2I src2,
12053                          immI src3, rFlagsReg cr) %{
12054   match(Set dst (XorI src1 (LShiftI src2 src3)));
12055 
12056   ins_cost(1.9 * INSN_COST);
12057   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12058 
12059   ins_encode %{
12060     __ eorw(as_Register($dst$$reg),
12061               as_Register($src1$$reg),
12062               as_Register($src2$$reg),
12063               Assembler::LSL,
12064               $src3$$constant & 0x1f);
12065   %}
12066 
12067   ins_pipe(ialu_reg_reg_shift);
12068 %}
12069 
12070 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12071                          iRegL src1, iRegL src2,
12072                          immI src3, rFlagsReg cr) %{
12073   match(Set dst (XorL src1 (LShiftL src2 src3)));
12074 
12075   ins_cost(1.9 * INSN_COST);
12076   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12077 
12078   ins_encode %{
12079     __ eor(as_Register($dst$$reg),
12080               as_Register($src1$$reg),
12081               as_Register($src2$$reg),
12082               Assembler::LSL,
12083               $src3$$constant & 0x3f);
12084   %}
12085 
12086   ins_pipe(ialu_reg_reg_shift);
12087 %}
12088 
12089 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12090                          iRegIorL2I src1, iRegIorL2I src2,
12091                          immI src3, rFlagsReg cr) %{
12092   match(Set dst (OrI src1 (URShiftI src2 src3)));
12093 
12094   ins_cost(1.9 * INSN_COST);
12095   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12096 
12097   ins_encode %{
12098     __ orrw(as_Register($dst$$reg),
12099               as_Register($src1$$reg),
12100               as_Register($src2$$reg),
12101               Assembler::LSR,
12102               $src3$$constant & 0x1f);
12103   %}
12104 
12105   ins_pipe(ialu_reg_reg_shift);
12106 %}
12107 
12108 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12109                          iRegL src1, iRegL src2,
12110                          immI src3, rFlagsReg cr) %{
12111   match(Set dst (OrL src1 (URShiftL src2 src3)));
12112 
12113   ins_cost(1.9 * INSN_COST);
12114   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12115 
12116   ins_encode %{
12117     __ orr(as_Register($dst$$reg),
12118               as_Register($src1$$reg),
12119               as_Register($src2$$reg),
12120               Assembler::LSR,
12121               $src3$$constant & 0x3f);
12122   %}
12123 
12124   ins_pipe(ialu_reg_reg_shift);
12125 %}
12126 
12127 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12128                          iRegIorL2I src1, iRegIorL2I src2,
12129                          immI src3, rFlagsReg cr) %{
12130   match(Set dst (OrI src1 (RShiftI src2 src3)));
12131 
12132   ins_cost(1.9 * INSN_COST);
12133   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12134 
12135   ins_encode %{
12136     __ orrw(as_Register($dst$$reg),
12137               as_Register($src1$$reg),
12138               as_Register($src2$$reg),
12139               Assembler::ASR,
12140               $src3$$constant & 0x1f);
12141   %}
12142 
12143   ins_pipe(ialu_reg_reg_shift);
12144 %}
12145 
12146 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12147                          iRegL src1, iRegL src2,
12148                          immI src3, rFlagsReg cr) %{
12149   match(Set dst (OrL src1 (RShiftL src2 src3)));
12150 
12151   ins_cost(1.9 * INSN_COST);
12152   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12153 
12154   ins_encode %{
12155     __ orr(as_Register($dst$$reg),
12156               as_Register($src1$$reg),
12157               as_Register($src2$$reg),
12158               Assembler::ASR,
12159               $src3$$constant & 0x3f);
12160   %}
12161 
12162   ins_pipe(ialu_reg_reg_shift);
12163 %}
12164 
12165 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12166                          iRegIorL2I src1, iRegIorL2I src2,
12167                          immI src3, rFlagsReg cr) %{
12168   match(Set dst (OrI src1 (LShiftI src2 src3)));
12169 
12170   ins_cost(1.9 * INSN_COST);
12171   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12172 
12173   ins_encode %{
12174     __ orrw(as_Register($dst$$reg),
12175               as_Register($src1$$reg),
12176               as_Register($src2$$reg),
12177               Assembler::LSL,
12178               $src3$$constant & 0x1f);
12179   %}
12180 
12181   ins_pipe(ialu_reg_reg_shift);
12182 %}
12183 
12184 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12185                          iRegL src1, iRegL src2,
12186                          immI src3, rFlagsReg cr) %{
12187   match(Set dst (OrL src1 (LShiftL src2 src3)));
12188 
12189   ins_cost(1.9 * INSN_COST);
12190   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12191 
12192   ins_encode %{
12193     __ orr(as_Register($dst$$reg),
12194               as_Register($src1$$reg),
12195               as_Register($src2$$reg),
12196               Assembler::LSL,
12197               $src3$$constant & 0x3f);
12198   %}
12199 
12200   ins_pipe(ialu_reg_reg_shift);
12201 %}
12202 
12203 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12204                          iRegIorL2I src1, iRegIorL2I src2,
12205                          immI src3, rFlagsReg cr) %{
12206   match(Set dst (AddI src1 (URShiftI src2 src3)));
12207 
12208   ins_cost(1.9 * INSN_COST);
12209   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12210 
12211   ins_encode %{
12212     __ addw(as_Register($dst$$reg),
12213               as_Register($src1$$reg),
12214               as_Register($src2$$reg),
12215               Assembler::LSR,
12216               $src3$$constant & 0x1f);
12217   %}
12218 
12219   ins_pipe(ialu_reg_reg_shift);
12220 %}
12221 
12222 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12223                          iRegL src1, iRegL src2,
12224                          immI src3, rFlagsReg cr) %{
12225   match(Set dst (AddL src1 (URShiftL src2 src3)));
12226 
12227   ins_cost(1.9 * INSN_COST);
12228   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12229 
12230   ins_encode %{
12231     __ add(as_Register($dst$$reg),
12232               as_Register($src1$$reg),
12233               as_Register($src2$$reg),
12234               Assembler::LSR,
12235               $src3$$constant & 0x3f);
12236   %}
12237 
12238   ins_pipe(ialu_reg_reg_shift);
12239 %}
12240 
12241 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12242                          iRegIorL2I src1, iRegIorL2I src2,
12243                          immI src3, rFlagsReg cr) %{
12244   match(Set dst (AddI src1 (RShiftI src2 src3)));
12245 
12246   ins_cost(1.9 * INSN_COST);
12247   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12248 
12249   ins_encode %{
12250     __ addw(as_Register($dst$$reg),
12251               as_Register($src1$$reg),
12252               as_Register($src2$$reg),
12253               Assembler::ASR,
12254               $src3$$constant & 0x1f);
12255   %}
12256 
12257   ins_pipe(ialu_reg_reg_shift);
12258 %}
12259 
12260 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12261                          iRegL src1, iRegL src2,
12262                          immI src3, rFlagsReg cr) %{
12263   match(Set dst (AddL src1 (RShiftL src2 src3)));
12264 
12265   ins_cost(1.9 * INSN_COST);
12266   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12267 
12268   ins_encode %{
12269     __ add(as_Register($dst$$reg),
12270               as_Register($src1$$reg),
12271               as_Register($src2$$reg),
12272               Assembler::ASR,
12273               $src3$$constant & 0x3f);
12274   %}
12275 
12276   ins_pipe(ialu_reg_reg_shift);
12277 %}
12278 
12279 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12280                          iRegIorL2I src1, iRegIorL2I src2,
12281                          immI src3, rFlagsReg cr) %{
12282   match(Set dst (AddI src1 (LShiftI src2 src3)));
12283 
12284   ins_cost(1.9 * INSN_COST);
12285   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12286 
12287   ins_encode %{
12288     __ addw(as_Register($dst$$reg),
12289               as_Register($src1$$reg),
12290               as_Register($src2$$reg),
12291               Assembler::LSL,
12292               $src3$$constant & 0x1f);
12293   %}
12294 
12295   ins_pipe(ialu_reg_reg_shift);
12296 %}
12297 
12298 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12299                          iRegL src1, iRegL src2,
12300                          immI src3, rFlagsReg cr) %{
12301   match(Set dst (AddL src1 (LShiftL src2 src3)));
12302 
12303   ins_cost(1.9 * INSN_COST);
12304   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12305 
12306   ins_encode %{
12307     __ add(as_Register($dst$$reg),
12308               as_Register($src1$$reg),
12309               as_Register($src2$$reg),
12310               Assembler::LSL,
12311               $src3$$constant & 0x3f);
12312   %}
12313 
12314   ins_pipe(ialu_reg_reg_shift);
12315 %}
12316 
12317 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12318                          iRegIorL2I src1, iRegIorL2I src2,
12319                          immI src3, rFlagsReg cr) %{
12320   match(Set dst (SubI src1 (URShiftI src2 src3)));
12321 
12322   ins_cost(1.9 * INSN_COST);
12323   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12324 
12325   ins_encode %{
12326     __ subw(as_Register($dst$$reg),
12327               as_Register($src1$$reg),
12328               as_Register($src2$$reg),
12329               Assembler::LSR,
12330               $src3$$constant & 0x1f);
12331   %}
12332 
12333   ins_pipe(ialu_reg_reg_shift);
12334 %}
12335 
12336 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12337                          iRegL src1, iRegL src2,
12338                          immI src3, rFlagsReg cr) %{
12339   match(Set dst (SubL src1 (URShiftL src2 src3)));
12340 
12341   ins_cost(1.9 * INSN_COST);
12342   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12343 
12344   ins_encode %{
12345     __ sub(as_Register($dst$$reg),
12346               as_Register($src1$$reg),
12347               as_Register($src2$$reg),
12348               Assembler::LSR,
12349               $src3$$constant & 0x3f);
12350   %}
12351 
12352   ins_pipe(ialu_reg_reg_shift);
12353 %}
12354 
12355 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12356                          iRegIorL2I src1, iRegIorL2I src2,
12357                          immI src3, rFlagsReg cr) %{
12358   match(Set dst (SubI src1 (RShiftI src2 src3)));
12359 
12360   ins_cost(1.9 * INSN_COST);
12361   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12362 
12363   ins_encode %{
12364     __ subw(as_Register($dst$$reg),
12365               as_Register($src1$$reg),
12366               as_Register($src2$$reg),
12367               Assembler::ASR,
12368               $src3$$constant & 0x1f);
12369   %}
12370 
12371   ins_pipe(ialu_reg_reg_shift);
12372 %}
12373 
12374 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12375                          iRegL src1, iRegL src2,
12376                          immI src3, rFlagsReg cr) %{
12377   match(Set dst (SubL src1 (RShiftL src2 src3)));
12378 
12379   ins_cost(1.9 * INSN_COST);
12380   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12381 
12382   ins_encode %{
12383     __ sub(as_Register($dst$$reg),
12384               as_Register($src1$$reg),
12385               as_Register($src2$$reg),
12386               Assembler::ASR,
12387               $src3$$constant & 0x3f);
12388   %}
12389 
12390   ins_pipe(ialu_reg_reg_shift);
12391 %}
12392 
12393 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12394                          iRegIorL2I src1, iRegIorL2I src2,
12395                          immI src3, rFlagsReg cr) %{
12396   match(Set dst (SubI src1 (LShiftI src2 src3)));
12397 
12398   ins_cost(1.9 * INSN_COST);
12399   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12400 
12401   ins_encode %{
12402     __ subw(as_Register($dst$$reg),
12403               as_Register($src1$$reg),
12404               as_Register($src2$$reg),
12405               Assembler::LSL,
12406               $src3$$constant & 0x1f);
12407   %}
12408 
12409   ins_pipe(ialu_reg_reg_shift);
12410 %}
12411 
12412 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12413                          iRegL src1, iRegL src2,
12414                          immI src3, rFlagsReg cr) %{
12415   match(Set dst (SubL src1 (LShiftL src2 src3)));
12416 
12417   ins_cost(1.9 * INSN_COST);
12418   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12419 
12420   ins_encode %{
12421     __ sub(as_Register($dst$$reg),
12422               as_Register($src1$$reg),
12423               as_Register($src2$$reg),
12424               Assembler::LSL,
12425               $src3$$constant & 0x3f);
12426   %}
12427 
12428   ins_pipe(ialu_reg_reg_shift);
12429 %}
12430 
12431 
12432 
12433 // Shift Left followed by Shift Right.
12434 // This idiom is used by the compiler for the i2b bytecode etc.
12435 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12436 %{
12437   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12438   // Make sure we are not going to exceed what sbfm can do.
12439   predicate((unsigned int)n->in(2)->get_int() <= 63
12440             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12441 
12442   ins_cost(INSN_COST * 2);
12443   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12444   ins_encode %{
12445     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12446     int s = 63 - lshift;
12447     int r = (rshift - lshift) & 63;
12448     __ sbfm(as_Register($dst$$reg),
12449             as_Register($src$$reg),
12450             r, s);
12451   %}
12452 
12453   ins_pipe(ialu_reg_shift);
12454 %}
12455 
12456 // Shift Left followed by Shift Right.
12457 // This idiom is used by the compiler for the i2b bytecode etc.
12458 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12459 %{
12460   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12461   // Make sure we are not going to exceed what sbfmw can do.
12462   predicate((unsigned int)n->in(2)->get_int() <= 31
12463             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12464 
12465   ins_cost(INSN_COST * 2);
12466   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12467   ins_encode %{
12468     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12469     int s = 31 - lshift;
12470     int r = (rshift - lshift) & 31;
12471     __ sbfmw(as_Register($dst$$reg),
12472             as_Register($src$$reg),
12473             r, s);
12474   %}
12475 
12476   ins_pipe(ialu_reg_shift);
12477 %}
12478 
12479 // Shift Left followed by Shift Right.
12480 // This idiom is used by the compiler for the i2b bytecode etc.
12481 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12482 %{
12483   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12484   // Make sure we are not going to exceed what ubfm can do.
12485   predicate((unsigned int)n->in(2)->get_int() <= 63
12486             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12487 
12488   ins_cost(INSN_COST * 2);
12489   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12490   ins_encode %{
12491     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12492     int s = 63 - lshift;
12493     int r = (rshift - lshift) & 63;
12494     __ ubfm(as_Register($dst$$reg),
12495             as_Register($src$$reg),
12496             r, s);
12497   %}
12498 
12499   ins_pipe(ialu_reg_shift);
12500 %}
12501 
12502 // Shift Left followed by Shift Right.
12503 // This idiom is used by the compiler for the i2b bytecode etc.
12504 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12505 %{
12506   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12507   // Make sure we are not going to exceed what ubfmw can do.
12508   predicate((unsigned int)n->in(2)->get_int() <= 31
12509             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12510 
12511   ins_cost(INSN_COST * 2);
12512   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12513   ins_encode %{
12514     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12515     int s = 31 - lshift;
12516     int r = (rshift - lshift) & 31;
12517     __ ubfmw(as_Register($dst$$reg),
12518             as_Register($src$$reg),
12519             r, s);
12520   %}
12521 
12522   ins_pipe(ialu_reg_shift);
12523 %}
12524 // Bitfield extract with shift & mask
12525 
12526 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12527 %{
12528   match(Set dst (AndI (URShiftI src rshift) mask));
12529 
12530   ins_cost(INSN_COST);
12531   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12532   ins_encode %{
12533     int rshift = $rshift$$constant;
12534     long mask = $mask$$constant;
12535     int width = exact_log2(mask+1);
12536     __ ubfxw(as_Register($dst$$reg),
12537             as_Register($src$$reg), rshift, width);
12538   %}
12539   ins_pipe(ialu_reg_shift);
12540 %}
12541 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12542 %{
12543   match(Set dst (AndL (URShiftL src rshift) mask));
12544 
12545   ins_cost(INSN_COST);
12546   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12547   ins_encode %{
12548     int rshift = $rshift$$constant;
12549     long mask = $mask$$constant;
12550     int width = exact_log2(mask+1);
12551     __ ubfx(as_Register($dst$$reg),
12552             as_Register($src$$reg), rshift, width);
12553   %}
12554   ins_pipe(ialu_reg_shift);
12555 %}
12556 
12557 // We can use ubfx when extending an And with a mask when we know mask
12558 // is positive.  We know that because immI_bitmask guarantees it.
12559 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12560 %{
12561   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12562 
12563   ins_cost(INSN_COST * 2);
12564   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12565   ins_encode %{
12566     int rshift = $rshift$$constant;
12567     long mask = $mask$$constant;
12568     int width = exact_log2(mask+1);
12569     __ ubfx(as_Register($dst$$reg),
12570             as_Register($src$$reg), rshift, width);
12571   %}
12572   ins_pipe(ialu_reg_shift);
12573 %}
12574 
12575 // We can use ubfiz when masking by a positive number and then left shifting the result.
12576 // We know that the mask is positive because immI_bitmask guarantees it.
12577 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12578 %{
12579   match(Set dst (LShiftI (AndI src mask) lshift));
12580   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12581     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12582 
12583   ins_cost(INSN_COST);
12584   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12585   ins_encode %{
12586     int lshift = $lshift$$constant;
12587     long mask = $mask$$constant;
12588     int width = exact_log2(mask+1);
12589     __ ubfizw(as_Register($dst$$reg),
12590           as_Register($src$$reg), lshift, width);
12591   %}
12592   ins_pipe(ialu_reg_shift);
12593 %}
12594 // We can use ubfiz when masking by a positive number and then left shifting the result.
12595 // We know that the mask is positive because immL_bitmask guarantees it.
12596 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12597 %{
12598   match(Set dst (LShiftL (AndL src mask) lshift));
12599   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12600     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12601 
12602   ins_cost(INSN_COST);
12603   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12604   ins_encode %{
12605     int lshift = $lshift$$constant;
12606     long mask = $mask$$constant;
12607     int width = exact_log2(mask+1);
12608     __ ubfiz(as_Register($dst$$reg),
12609           as_Register($src$$reg), lshift, width);
12610   %}
12611   ins_pipe(ialu_reg_shift);
12612 %}
12613 
12614 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12615 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12616 %{
12617   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12618   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12619     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12620 
12621   ins_cost(INSN_COST);
12622   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12623   ins_encode %{
12624     int lshift = $lshift$$constant;
12625     long mask = $mask$$constant;
12626     int width = exact_log2(mask+1);
12627     __ ubfiz(as_Register($dst$$reg),
12628              as_Register($src$$reg), lshift, width);
12629   %}
12630   ins_pipe(ialu_reg_shift);
12631 %}
12632 
12633 // Rotations
12634 
12635 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12636 %{
12637   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12638   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12639 
12640   ins_cost(INSN_COST);
12641   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12642 
12643   ins_encode %{
12644     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12645             $rshift$$constant & 63);
12646   %}
12647   ins_pipe(ialu_reg_reg_extr);
12648 %}
12649 
12650 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12651 %{
12652   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12653   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12654 
12655   ins_cost(INSN_COST);
12656   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12657 
12658   ins_encode %{
12659     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12660             $rshift$$constant & 31);
12661   %}
12662   ins_pipe(ialu_reg_reg_extr);
12663 %}
12664 
12665 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12666 %{
12667   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12668   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12669 
12670   ins_cost(INSN_COST);
12671   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12672 
12673   ins_encode %{
12674     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12675             $rshift$$constant & 63);
12676   %}
12677   ins_pipe(ialu_reg_reg_extr);
12678 %}
12679 
12680 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12681 %{
12682   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12683   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12684 
12685   ins_cost(INSN_COST);
12686   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12687 
12688   ins_encode %{
12689     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12690             $rshift$$constant & 31);
12691   %}
12692   ins_pipe(ialu_reg_reg_extr);
12693 %}
12694 
12695 
12696 // rol expander
12697 
12698 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12699 %{
12700   effect(DEF dst, USE src, USE shift);
12701 
12702   format %{ "rol    $dst, $src, $shift" %}
12703   ins_cost(INSN_COST * 3);
12704   ins_encode %{
12705     __ subw(rscratch1, zr, as_Register($shift$$reg));
12706     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12707             rscratch1);
12708     %}
12709   ins_pipe(ialu_reg_reg_vshift);
12710 %}
12711 
12712 // rol expander
12713 
12714 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12715 %{
12716   effect(DEF dst, USE src, USE shift);
12717 
12718   format %{ "rol    $dst, $src, $shift" %}
12719   ins_cost(INSN_COST * 3);
12720   ins_encode %{
12721     __ subw(rscratch1, zr, as_Register($shift$$reg));
12722     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12723             rscratch1);
12724     %}
12725   ins_pipe(ialu_reg_reg_vshift);
12726 %}
12727 
12728 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12729 %{
12730   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12731 
12732   expand %{
12733     rolL_rReg(dst, src, shift, cr);
12734   %}
12735 %}
12736 
12737 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12738 %{
12739   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12740 
12741   expand %{
12742     rolL_rReg(dst, src, shift, cr);
12743   %}
12744 %}
12745 
12746 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12747 %{
12748   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12749 
12750   expand %{
12751     rolI_rReg(dst, src, shift, cr);
12752   %}
12753 %}
12754 
12755 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12756 %{
12757   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12758 
12759   expand %{
12760     rolI_rReg(dst, src, shift, cr);
12761   %}
12762 %}
12763 
12764 // ror expander
12765 
12766 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12767 %{
12768   effect(DEF dst, USE src, USE shift);
12769 
12770   format %{ "ror    $dst, $src, $shift" %}
12771   ins_cost(INSN_COST);
12772   ins_encode %{
12773     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12774             as_Register($shift$$reg));
12775     %}
12776   ins_pipe(ialu_reg_reg_vshift);
12777 %}
12778 
12779 // ror expander
12780 
12781 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12782 %{
12783   effect(DEF dst, USE src, USE shift);
12784 
12785   format %{ "ror    $dst, $src, $shift" %}
12786   ins_cost(INSN_COST);
12787   ins_encode %{
12788     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12789             as_Register($shift$$reg));
12790     %}
12791   ins_pipe(ialu_reg_reg_vshift);
12792 %}
12793 
12794 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12795 %{
12796   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12797 
12798   expand %{
12799     rorL_rReg(dst, src, shift, cr);
12800   %}
12801 %}
12802 
12803 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12804 %{
12805   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12806 
12807   expand %{
12808     rorL_rReg(dst, src, shift, cr);
12809   %}
12810 %}
12811 
12812 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12813 %{
12814   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12815 
12816   expand %{
12817     rorI_rReg(dst, src, shift, cr);
12818   %}
12819 %}
12820 
12821 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12822 %{
12823   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12824 
12825   expand %{
12826     rorI_rReg(dst, src, shift, cr);
12827   %}
12828 %}
12829 
12830 // Add/subtract (extended)
12831 
12832 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12833 %{
12834   match(Set dst (AddL src1 (ConvI2L src2)));
12835   ins_cost(INSN_COST);
12836   format %{ "add  $dst, $src1, $src2, sxtw" %}
12837 
12838    ins_encode %{
12839      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12840             as_Register($src2$$reg), ext::sxtw);
12841    %}
12842   ins_pipe(ialu_reg_reg);
12843 %};
12844 
12845 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12846 %{
12847   match(Set dst (SubL src1 (ConvI2L src2)));
12848   ins_cost(INSN_COST);
12849   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12850 
12851    ins_encode %{
12852      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12853             as_Register($src2$$reg), ext::sxtw);
12854    %}
12855   ins_pipe(ialu_reg_reg);
12856 %};
12857 
12858 
12859 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12860 %{
12861   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12862   ins_cost(INSN_COST);
12863   format %{ "add  $dst, $src1, $src2, sxth" %}
12864 
12865    ins_encode %{
12866      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12867             as_Register($src2$$reg), ext::sxth);
12868    %}
12869   ins_pipe(ialu_reg_reg);
12870 %}
12871 
12872 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12873 %{
12874   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12875   ins_cost(INSN_COST);
12876   format %{ "add  $dst, $src1, $src2, sxtb" %}
12877 
12878    ins_encode %{
12879      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12880             as_Register($src2$$reg), ext::sxtb);
12881    %}
12882   ins_pipe(ialu_reg_reg);
12883 %}
12884 
12885 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12886 %{
12887   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12888   ins_cost(INSN_COST);
12889   format %{ "add  $dst, $src1, $src2, uxtb" %}
12890 
12891    ins_encode %{
12892      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12893             as_Register($src2$$reg), ext::uxtb);
12894    %}
12895   ins_pipe(ialu_reg_reg);
12896 %}
12897 
12898 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12899 %{
12900   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12901   ins_cost(INSN_COST);
12902   format %{ "add  $dst, $src1, $src2, sxth" %}
12903 
12904    ins_encode %{
12905      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12906             as_Register($src2$$reg), ext::sxth);
12907    %}
12908   ins_pipe(ialu_reg_reg);
12909 %}
12910 
12911 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12912 %{
12913   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12914   ins_cost(INSN_COST);
12915   format %{ "add  $dst, $src1, $src2, sxtw" %}
12916 
12917    ins_encode %{
12918      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12919             as_Register($src2$$reg), ext::sxtw);
12920    %}
12921   ins_pipe(ialu_reg_reg);
12922 %}
12923 
12924 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12925 %{
12926   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12927   ins_cost(INSN_COST);
12928   format %{ "add  $dst, $src1, $src2, sxtb" %}
12929 
12930    ins_encode %{
12931      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12932             as_Register($src2$$reg), ext::sxtb);
12933    %}
12934   ins_pipe(ialu_reg_reg);
12935 %}
12936 
12937 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12938 %{
12939   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12940   ins_cost(INSN_COST);
12941   format %{ "add  $dst, $src1, $src2, uxtb" %}
12942 
12943    ins_encode %{
12944      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12945             as_Register($src2$$reg), ext::uxtb);
12946    %}
12947   ins_pipe(ialu_reg_reg);
12948 %}
12949 
12950 
12951 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12952 %{
12953   match(Set dst (AddI src1 (AndI src2 mask)));
12954   ins_cost(INSN_COST);
12955   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12956 
12957    ins_encode %{
12958      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12959             as_Register($src2$$reg), ext::uxtb);
12960    %}
12961   ins_pipe(ialu_reg_reg);
12962 %}
12963 
12964 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12965 %{
12966   match(Set dst (AddI src1 (AndI src2 mask)));
12967   ins_cost(INSN_COST);
12968   format %{ "addw  $dst, $src1, $src2, uxth" %}
12969 
12970    ins_encode %{
12971      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12972             as_Register($src2$$reg), ext::uxth);
12973    %}
12974   ins_pipe(ialu_reg_reg);
12975 %}
12976 
12977 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12978 %{
12979   match(Set dst (AddL src1 (AndL src2 mask)));
12980   ins_cost(INSN_COST);
12981   format %{ "add  $dst, $src1, $src2, uxtb" %}
12982 
12983    ins_encode %{
12984      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12985             as_Register($src2$$reg), ext::uxtb);
12986    %}
12987   ins_pipe(ialu_reg_reg);
12988 %}
12989 
12990 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12991 %{
12992   match(Set dst (AddL src1 (AndL src2 mask)));
12993   ins_cost(INSN_COST);
12994   format %{ "add  $dst, $src1, $src2, uxth" %}
12995 
12996    ins_encode %{
12997      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12998             as_Register($src2$$reg), ext::uxth);
12999    %}
13000   ins_pipe(ialu_reg_reg);
13001 %}
13002 
13003 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13004 %{
13005   match(Set dst (AddL src1 (AndL src2 mask)));
13006   ins_cost(INSN_COST);
13007   format %{ "add  $dst, $src1, $src2, uxtw" %}
13008 
13009    ins_encode %{
13010      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13011             as_Register($src2$$reg), ext::uxtw);
13012    %}
13013   ins_pipe(ialu_reg_reg);
13014 %}
13015 
13016 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13017 %{
13018   match(Set dst (SubI src1 (AndI src2 mask)));
13019   ins_cost(INSN_COST);
13020   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13021 
13022    ins_encode %{
13023      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13024             as_Register($src2$$reg), ext::uxtb);
13025    %}
13026   ins_pipe(ialu_reg_reg);
13027 %}
13028 
13029 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13030 %{
13031   match(Set dst (SubI src1 (AndI src2 mask)));
13032   ins_cost(INSN_COST);
13033   format %{ "subw  $dst, $src1, $src2, uxth" %}
13034 
13035    ins_encode %{
13036      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13037             as_Register($src2$$reg), ext::uxth);
13038    %}
13039   ins_pipe(ialu_reg_reg);
13040 %}
13041 
13042 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13043 %{
13044   match(Set dst (SubL src1 (AndL src2 mask)));
13045   ins_cost(INSN_COST);
13046   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13047 
13048    ins_encode %{
13049      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13050             as_Register($src2$$reg), ext::uxtb);
13051    %}
13052   ins_pipe(ialu_reg_reg);
13053 %}
13054 
13055 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13056 %{
13057   match(Set dst (SubL src1 (AndL src2 mask)));
13058   ins_cost(INSN_COST);
13059   format %{ "sub  $dst, $src1, $src2, uxth" %}
13060 
13061    ins_encode %{
13062      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13063             as_Register($src2$$reg), ext::uxth);
13064    %}
13065   ins_pipe(ialu_reg_reg);
13066 %}
13067 
13068 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13069 %{
13070   match(Set dst (SubL src1 (AndL src2 mask)));
13071   ins_cost(INSN_COST);
13072   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13073 
13074    ins_encode %{
13075      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13076             as_Register($src2$$reg), ext::uxtw);
13077    %}
13078   ins_pipe(ialu_reg_reg);
13079 %}
13080 
13081 
13082 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13083 %{
13084   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13085   ins_cost(1.9 * INSN_COST);
13086   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13087 
13088    ins_encode %{
13089      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13090             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13091    %}
13092   ins_pipe(ialu_reg_reg_shift);
13093 %}
13094 
13095 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13096 %{
13097   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13098   ins_cost(1.9 * INSN_COST);
13099   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13100 
13101    ins_encode %{
13102      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13103             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13104    %}
13105   ins_pipe(ialu_reg_reg_shift);
13106 %}
13107 
13108 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13109 %{
13110   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13111   ins_cost(1.9 * INSN_COST);
13112   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13113 
13114    ins_encode %{
13115      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13116             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13117    %}
13118   ins_pipe(ialu_reg_reg_shift);
13119 %}
13120 
13121 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13122 %{
13123   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13124   ins_cost(1.9 * INSN_COST);
13125   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13126 
13127    ins_encode %{
13128      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13129             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13130    %}
13131   ins_pipe(ialu_reg_reg_shift);
13132 %}
13133 
13134 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13135 %{
13136   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13137   ins_cost(1.9 * INSN_COST);
13138   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13139 
13140    ins_encode %{
13141      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13142             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13143    %}
13144   ins_pipe(ialu_reg_reg_shift);
13145 %}
13146 
13147 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13148 %{
13149   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13150   ins_cost(1.9 * INSN_COST);
13151   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13152 
13153    ins_encode %{
13154      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13155             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13156    %}
13157   ins_pipe(ialu_reg_reg_shift);
13158 %}
13159 
13160 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13161 %{
13162   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13163   ins_cost(1.9 * INSN_COST);
13164   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13165 
13166    ins_encode %{
13167      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13168             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13169    %}
13170   ins_pipe(ialu_reg_reg_shift);
13171 %}
13172 
13173 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13174 %{
13175   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13176   ins_cost(1.9 * INSN_COST);
13177   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13178 
13179    ins_encode %{
13180      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13181             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13182    %}
13183   ins_pipe(ialu_reg_reg_shift);
13184 %}
13185 
13186 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13187 %{
13188   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13189   ins_cost(1.9 * INSN_COST);
13190   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13191 
13192    ins_encode %{
13193      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13194             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13195    %}
13196   ins_pipe(ialu_reg_reg_shift);
13197 %}
13198 
13199 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13200 %{
13201   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13202   ins_cost(1.9 * INSN_COST);
13203   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13204 
13205    ins_encode %{
13206      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13207             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13208    %}
13209   ins_pipe(ialu_reg_reg_shift);
13210 %}
13211 
13212 
13213 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13214 %{
13215   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13216   ins_cost(1.9 * INSN_COST);
13217   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13218 
13219    ins_encode %{
13220      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13221             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13222    %}
13223   ins_pipe(ialu_reg_reg_shift);
13224 %};
13225 
13226 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13227 %{
13228   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13229   ins_cost(1.9 * INSN_COST);
13230   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13231 
13232    ins_encode %{
13233      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13234             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13235    %}
13236   ins_pipe(ialu_reg_reg_shift);
13237 %};
13238 
13239 
13240 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13241 %{
13242   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13243   ins_cost(1.9 * INSN_COST);
13244   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13245 
13246    ins_encode %{
13247      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13248             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13249    %}
13250   ins_pipe(ialu_reg_reg_shift);
13251 %}
13252 
13253 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13254 %{
13255   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13256   ins_cost(1.9 * INSN_COST);
13257   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13258 
13259    ins_encode %{
13260      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13261             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13262    %}
13263   ins_pipe(ialu_reg_reg_shift);
13264 %}
13265 
13266 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13267 %{
13268   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13269   ins_cost(1.9 * INSN_COST);
13270   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13271 
13272    ins_encode %{
13273      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13274             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13275    %}
13276   ins_pipe(ialu_reg_reg_shift);
13277 %}
13278 
13279 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13280 %{
13281   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13282   ins_cost(1.9 * INSN_COST);
13283   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13284 
13285    ins_encode %{
13286      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13287             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13288    %}
13289   ins_pipe(ialu_reg_reg_shift);
13290 %}
13291 
13292 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13293 %{
13294   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13295   ins_cost(1.9 * INSN_COST);
13296   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13297 
13298    ins_encode %{
13299      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13300             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13301    %}
13302   ins_pipe(ialu_reg_reg_shift);
13303 %}
13304 
13305 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13306 %{
13307   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13308   ins_cost(1.9 * INSN_COST);
13309   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13310 
13311    ins_encode %{
13312      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13313             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13314    %}
13315   ins_pipe(ialu_reg_reg_shift);
13316 %}
13317 
13318 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13319 %{
13320   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13321   ins_cost(1.9 * INSN_COST);
13322   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13323 
13324    ins_encode %{
13325      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13326             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13327    %}
13328   ins_pipe(ialu_reg_reg_shift);
13329 %}
13330 
13331 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13332 %{
13333   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13334   ins_cost(1.9 * INSN_COST);
13335   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13336 
13337    ins_encode %{
13338      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13339             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13340    %}
13341   ins_pipe(ialu_reg_reg_shift);
13342 %}
13343 
13344 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13345 %{
13346   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13347   ins_cost(1.9 * INSN_COST);
13348   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13349 
13350    ins_encode %{
13351      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13352             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13353    %}
13354   ins_pipe(ialu_reg_reg_shift);
13355 %}
13356 
13357 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13358 %{
13359   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13360   ins_cost(1.9 * INSN_COST);
13361   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13362 
13363    ins_encode %{
13364      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13365             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13366    %}
13367   ins_pipe(ialu_reg_reg_shift);
13368 %}
13369 // END This section of the file is automatically generated. Do not edit --------------
13370 
13371 // ============================================================================
13372 // Floating Point Arithmetic Instructions
13373 
13374 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13375   match(Set dst (AddF src1 src2));
13376 
13377   ins_cost(INSN_COST * 5);
13378   format %{ "fadds   $dst, $src1, $src2" %}
13379 
13380   ins_encode %{
13381     __ fadds(as_FloatRegister($dst$$reg),
13382              as_FloatRegister($src1$$reg),
13383              as_FloatRegister($src2$$reg));
13384   %}
13385 
13386   ins_pipe(fp_dop_reg_reg_s);
13387 %}
13388 
13389 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13390   match(Set dst (AddD src1 src2));
13391 
13392   ins_cost(INSN_COST * 5);
13393   format %{ "faddd   $dst, $src1, $src2" %}
13394 
13395   ins_encode %{
13396     __ faddd(as_FloatRegister($dst$$reg),
13397              as_FloatRegister($src1$$reg),
13398              as_FloatRegister($src2$$reg));
13399   %}
13400 
13401   ins_pipe(fp_dop_reg_reg_d);
13402 %}
13403 
13404 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13405   match(Set dst (SubF src1 src2));
13406 
13407   ins_cost(INSN_COST * 5);
13408   format %{ "fsubs   $dst, $src1, $src2" %}
13409 
13410   ins_encode %{
13411     __ fsubs(as_FloatRegister($dst$$reg),
13412              as_FloatRegister($src1$$reg),
13413              as_FloatRegister($src2$$reg));
13414   %}
13415 
13416   ins_pipe(fp_dop_reg_reg_s);
13417 %}
13418 
13419 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13420   match(Set dst (SubD src1 src2));
13421 
13422   ins_cost(INSN_COST * 5);
13423   format %{ "fsubd   $dst, $src1, $src2" %}
13424 
13425   ins_encode %{
13426     __ fsubd(as_FloatRegister($dst$$reg),
13427              as_FloatRegister($src1$$reg),
13428              as_FloatRegister($src2$$reg));
13429   %}
13430 
13431   ins_pipe(fp_dop_reg_reg_d);
13432 %}
13433 
13434 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13435   match(Set dst (MulF src1 src2));
13436 
13437   ins_cost(INSN_COST * 6);
13438   format %{ "fmuls   $dst, $src1, $src2" %}
13439 
13440   ins_encode %{
13441     __ fmuls(as_FloatRegister($dst$$reg),
13442              as_FloatRegister($src1$$reg),
13443              as_FloatRegister($src2$$reg));
13444   %}
13445 
13446   ins_pipe(fp_dop_reg_reg_s);
13447 %}
13448 
13449 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13450   match(Set dst (MulD src1 src2));
13451 
13452   ins_cost(INSN_COST * 6);
13453   format %{ "fmuld   $dst, $src1, $src2" %}
13454 
13455   ins_encode %{
13456     __ fmuld(as_FloatRegister($dst$$reg),
13457              as_FloatRegister($src1$$reg),
13458              as_FloatRegister($src2$$reg));
13459   %}
13460 
13461   ins_pipe(fp_dop_reg_reg_d);
13462 %}
13463 
13464 // src1 * src2 + src3
13465 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13466   predicate(UseFMA);
13467   match(Set dst (FmaF src3 (Binary src1 src2)));
13468 
13469   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13470 
13471   ins_encode %{
13472     __ fmadds(as_FloatRegister($dst$$reg),
13473              as_FloatRegister($src1$$reg),
13474              as_FloatRegister($src2$$reg),
13475              as_FloatRegister($src3$$reg));
13476   %}
13477 
13478   ins_pipe(pipe_class_default);
13479 %}
13480 
13481 // src1 * src2 + src3
13482 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13483   predicate(UseFMA);
13484   match(Set dst (FmaD src3 (Binary src1 src2)));
13485 
13486   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13487 
13488   ins_encode %{
13489     __ fmaddd(as_FloatRegister($dst$$reg),
13490              as_FloatRegister($src1$$reg),
13491              as_FloatRegister($src2$$reg),
13492              as_FloatRegister($src3$$reg));
13493   %}
13494 
13495   ins_pipe(pipe_class_default);
13496 %}
13497 
13498 // -src1 * src2 + src3
13499 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13500   predicate(UseFMA);
13501   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13502   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13503 
13504   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13505 
13506   ins_encode %{
13507     __ fmsubs(as_FloatRegister($dst$$reg),
13508               as_FloatRegister($src1$$reg),
13509               as_FloatRegister($src2$$reg),
13510               as_FloatRegister($src3$$reg));
13511   %}
13512 
13513   ins_pipe(pipe_class_default);
13514 %}
13515 
13516 // -src1 * src2 + src3
13517 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13518   predicate(UseFMA);
13519   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13520   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13521 
13522   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13523 
13524   ins_encode %{
13525     __ fmsubd(as_FloatRegister($dst$$reg),
13526               as_FloatRegister($src1$$reg),
13527               as_FloatRegister($src2$$reg),
13528               as_FloatRegister($src3$$reg));
13529   %}
13530 
13531   ins_pipe(pipe_class_default);
13532 %}
13533 
13534 // -src1 * src2 - src3
13535 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13536   predicate(UseFMA);
13537   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13538   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13539 
13540   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13541 
13542   ins_encode %{
13543     __ fnmadds(as_FloatRegister($dst$$reg),
13544                as_FloatRegister($src1$$reg),
13545                as_FloatRegister($src2$$reg),
13546                as_FloatRegister($src3$$reg));
13547   %}
13548 
13549   ins_pipe(pipe_class_default);
13550 %}
13551 
13552 // -src1 * src2 - src3
13553 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13554   predicate(UseFMA);
13555   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13556   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13557 
13558   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13559 
13560   ins_encode %{
13561     __ fnmaddd(as_FloatRegister($dst$$reg),
13562                as_FloatRegister($src1$$reg),
13563                as_FloatRegister($src2$$reg),
13564                as_FloatRegister($src3$$reg));
13565   %}
13566 
13567   ins_pipe(pipe_class_default);
13568 %}
13569 
13570 // src1 * src2 - src3
13571 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13572   predicate(UseFMA);
13573   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13574 
13575   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13576 
13577   ins_encode %{
13578     __ fnmsubs(as_FloatRegister($dst$$reg),
13579                as_FloatRegister($src1$$reg),
13580                as_FloatRegister($src2$$reg),
13581                as_FloatRegister($src3$$reg));
13582   %}
13583 
13584   ins_pipe(pipe_class_default);
13585 %}
13586 
13587 // src1 * src2 - src3
13588 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13589   predicate(UseFMA);
13590   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13591 
13592   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13593 
13594   ins_encode %{
13595   // n.b. insn name should be fnmsubd
13596     __ fnmsub(as_FloatRegister($dst$$reg),
13597               as_FloatRegister($src1$$reg),
13598               as_FloatRegister($src2$$reg),
13599               as_FloatRegister($src3$$reg));
13600   %}
13601 
13602   ins_pipe(pipe_class_default);
13603 %}
13604 
13605 
13606 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13607   match(Set dst (DivF src1  src2));
13608 
13609   ins_cost(INSN_COST * 18);
13610   format %{ "fdivs   $dst, $src1, $src2" %}
13611 
13612   ins_encode %{
13613     __ fdivs(as_FloatRegister($dst$$reg),
13614              as_FloatRegister($src1$$reg),
13615              as_FloatRegister($src2$$reg));
13616   %}
13617 
13618   ins_pipe(fp_div_s);
13619 %}
13620 
13621 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13622   match(Set dst (DivD src1  src2));
13623 
13624   ins_cost(INSN_COST * 32);
13625   format %{ "fdivd   $dst, $src1, $src2" %}
13626 
13627   ins_encode %{
13628     __ fdivd(as_FloatRegister($dst$$reg),
13629              as_FloatRegister($src1$$reg),
13630              as_FloatRegister($src2$$reg));
13631   %}
13632 
13633   ins_pipe(fp_div_d);
13634 %}
13635 
13636 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13637   match(Set dst (NegF src));
13638 
13639   ins_cost(INSN_COST * 3);
13640   format %{ "fneg   $dst, $src" %}
13641 
13642   ins_encode %{
13643     __ fnegs(as_FloatRegister($dst$$reg),
13644              as_FloatRegister($src$$reg));
13645   %}
13646 
13647   ins_pipe(fp_uop_s);
13648 %}
13649 
13650 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13651   match(Set dst (NegD src));
13652 
13653   ins_cost(INSN_COST * 3);
13654   format %{ "fnegd   $dst, $src" %}
13655 
13656   ins_encode %{
13657     __ fnegd(as_FloatRegister($dst$$reg),
13658              as_FloatRegister($src$$reg));
13659   %}
13660 
13661   ins_pipe(fp_uop_d);
13662 %}
13663 
13664 instruct absF_reg(vRegF dst, vRegF src) %{
13665   match(Set dst (AbsF src));
13666 
13667   ins_cost(INSN_COST * 3);
13668   format %{ "fabss   $dst, $src" %}
13669   ins_encode %{
13670     __ fabss(as_FloatRegister($dst$$reg),
13671              as_FloatRegister($src$$reg));
13672   %}
13673 
13674   ins_pipe(fp_uop_s);
13675 %}
13676 
13677 instruct absD_reg(vRegD dst, vRegD src) %{
13678   match(Set dst (AbsD src));
13679 
13680   ins_cost(INSN_COST * 3);
13681   format %{ "fabsd   $dst, $src" %}
13682   ins_encode %{
13683     __ fabsd(as_FloatRegister($dst$$reg),
13684              as_FloatRegister($src$$reg));
13685   %}
13686 
13687   ins_pipe(fp_uop_d);
13688 %}
13689 
13690 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13691   match(Set dst (SqrtD src));
13692 
13693   ins_cost(INSN_COST * 50);
13694   format %{ "fsqrtd  $dst, $src" %}
13695   ins_encode %{
13696     __ fsqrtd(as_FloatRegister($dst$$reg),
13697              as_FloatRegister($src$$reg));
13698   %}
13699 
13700   ins_pipe(fp_div_s);
13701 %}
13702 
13703 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13704   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13705 
13706   ins_cost(INSN_COST * 50);
13707   format %{ "fsqrts  $dst, $src" %}
13708   ins_encode %{
13709     __ fsqrts(as_FloatRegister($dst$$reg),
13710              as_FloatRegister($src$$reg));
13711   %}
13712 
13713   ins_pipe(fp_div_d);
13714 %}
13715 
13716 // ============================================================================
13717 // Logical Instructions
13718 
13719 // Integer Logical Instructions
13720 
13721 // And Instructions
13722 
13723 
13724 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13725   match(Set dst (AndI src1 src2));
13726 
13727   format %{ "andw  $dst, $src1, $src2\t# int" %}
13728 
13729   ins_cost(INSN_COST);
13730   ins_encode %{
13731     __ andw(as_Register($dst$$reg),
13732             as_Register($src1$$reg),
13733             as_Register($src2$$reg));
13734   %}
13735 
13736   ins_pipe(ialu_reg_reg);
13737 %}
13738 
13739 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13740   match(Set dst (AndI src1 src2));
13741 
13742   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13743 
13744   ins_cost(INSN_COST);
13745   ins_encode %{
13746     __ andw(as_Register($dst$$reg),
13747             as_Register($src1$$reg),
13748             (unsigned long)($src2$$constant));
13749   %}
13750 
13751   ins_pipe(ialu_reg_imm);
13752 %}
13753 
13754 // Or Instructions
13755 
13756 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13757   match(Set dst (OrI src1 src2));
13758 
13759   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13760 
13761   ins_cost(INSN_COST);
13762   ins_encode %{
13763     __ orrw(as_Register($dst$$reg),
13764             as_Register($src1$$reg),
13765             as_Register($src2$$reg));
13766   %}
13767 
13768   ins_pipe(ialu_reg_reg);
13769 %}
13770 
13771 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13772   match(Set dst (OrI src1 src2));
13773 
13774   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13775 
13776   ins_cost(INSN_COST);
13777   ins_encode %{
13778     __ orrw(as_Register($dst$$reg),
13779             as_Register($src1$$reg),
13780             (unsigned long)($src2$$constant));
13781   %}
13782 
13783   ins_pipe(ialu_reg_imm);
13784 %}
13785 
13786 // Xor Instructions
13787 
13788 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13789   match(Set dst (XorI src1 src2));
13790 
13791   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13792 
13793   ins_cost(INSN_COST);
13794   ins_encode %{
13795     __ eorw(as_Register($dst$$reg),
13796             as_Register($src1$$reg),
13797             as_Register($src2$$reg));
13798   %}
13799 
13800   ins_pipe(ialu_reg_reg);
13801 %}
13802 
13803 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13804   match(Set dst (XorI src1 src2));
13805 
13806   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13807 
13808   ins_cost(INSN_COST);
13809   ins_encode %{
13810     __ eorw(as_Register($dst$$reg),
13811             as_Register($src1$$reg),
13812             (unsigned long)($src2$$constant));
13813   %}
13814 
13815   ins_pipe(ialu_reg_imm);
13816 %}
13817 
13818 // Long Logical Instructions
13819 // TODO
13820 
13821 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13822   match(Set dst (AndL src1 src2));
13823 
13824   format %{ "and  $dst, $src1, $src2\t# int" %}
13825 
13826   ins_cost(INSN_COST);
13827   ins_encode %{
13828     __ andr(as_Register($dst$$reg),
13829             as_Register($src1$$reg),
13830             as_Register($src2$$reg));
13831   %}
13832 
13833   ins_pipe(ialu_reg_reg);
13834 %}
13835 
13836 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13837   match(Set dst (AndL src1 src2));
13838 
13839   format %{ "and  $dst, $src1, $src2\t# int" %}
13840 
13841   ins_cost(INSN_COST);
13842   ins_encode %{
13843     __ andr(as_Register($dst$$reg),
13844             as_Register($src1$$reg),
13845             (unsigned long)($src2$$constant));
13846   %}
13847 
13848   ins_pipe(ialu_reg_imm);
13849 %}
13850 
13851 // Or Instructions
13852 
13853 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13854   match(Set dst (OrL src1 src2));
13855 
13856   format %{ "orr  $dst, $src1, $src2\t# int" %}
13857 
13858   ins_cost(INSN_COST);
13859   ins_encode %{
13860     __ orr(as_Register($dst$$reg),
13861            as_Register($src1$$reg),
13862            as_Register($src2$$reg));
13863   %}
13864 
13865   ins_pipe(ialu_reg_reg);
13866 %}
13867 
13868 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13869   match(Set dst (OrL src1 src2));
13870 
13871   format %{ "orr  $dst, $src1, $src2\t# int" %}
13872 
13873   ins_cost(INSN_COST);
13874   ins_encode %{
13875     __ orr(as_Register($dst$$reg),
13876            as_Register($src1$$reg),
13877            (unsigned long)($src2$$constant));
13878   %}
13879 
13880   ins_pipe(ialu_reg_imm);
13881 %}
13882 
13883 // Xor Instructions
13884 
13885 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13886   match(Set dst (XorL src1 src2));
13887 
13888   format %{ "eor  $dst, $src1, $src2\t# int" %}
13889 
13890   ins_cost(INSN_COST);
13891   ins_encode %{
13892     __ eor(as_Register($dst$$reg),
13893            as_Register($src1$$reg),
13894            as_Register($src2$$reg));
13895   %}
13896 
13897   ins_pipe(ialu_reg_reg);
13898 %}
13899 
13900 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13901   match(Set dst (XorL src1 src2));
13902 
13903   ins_cost(INSN_COST);
13904   format %{ "eor  $dst, $src1, $src2\t# int" %}
13905 
13906   ins_encode %{
13907     __ eor(as_Register($dst$$reg),
13908            as_Register($src1$$reg),
13909            (unsigned long)($src2$$constant));
13910   %}
13911 
13912   ins_pipe(ialu_reg_imm);
13913 %}
13914 
13915 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13916 %{
13917   match(Set dst (ConvI2L src));
13918 
13919   ins_cost(INSN_COST);
13920   format %{ "sxtw  $dst, $src\t# i2l" %}
13921   ins_encode %{
13922     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13923   %}
13924   ins_pipe(ialu_reg_shift);
13925 %}
13926 
13927 // this pattern occurs in bigmath arithmetic
13928 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13929 %{
13930   match(Set dst (AndL (ConvI2L src) mask));
13931 
13932   ins_cost(INSN_COST);
13933   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13934   ins_encode %{
13935     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13936   %}
13937 
13938   ins_pipe(ialu_reg_shift);
13939 %}
13940 
13941 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13942   match(Set dst (ConvL2I src));
13943 
13944   ins_cost(INSN_COST);
13945   format %{ "movw  $dst, $src \t// l2i" %}
13946 
13947   ins_encode %{
13948     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13949   %}
13950 
13951   ins_pipe(ialu_reg);
13952 %}
13953 
13954 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13955 %{
13956   match(Set dst (Conv2B src));
13957   effect(KILL cr);
13958 
13959   format %{
13960     "cmpw $src, zr\n\t"
13961     "cset $dst, ne"
13962   %}
13963 
13964   ins_encode %{
13965     __ cmpw(as_Register($src$$reg), zr);
13966     __ cset(as_Register($dst$$reg), Assembler::NE);
13967   %}
13968 
13969   ins_pipe(ialu_reg);
13970 %}
13971 
13972 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13973 %{
13974   match(Set dst (Conv2B src));
13975   effect(KILL cr);
13976 
13977   format %{
13978     "cmp  $src, zr\n\t"
13979     "cset $dst, ne"
13980   %}
13981 
13982   ins_encode %{
13983     __ cmp(as_Register($src$$reg), zr);
13984     __ cset(as_Register($dst$$reg), Assembler::NE);
13985   %}
13986 
13987   ins_pipe(ialu_reg);
13988 %}
13989 
13990 instruct convD2F_reg(vRegF dst, vRegD src) %{
13991   match(Set dst (ConvD2F src));
13992 
13993   ins_cost(INSN_COST * 5);
13994   format %{ "fcvtd  $dst, $src \t// d2f" %}
13995 
13996   ins_encode %{
13997     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13998   %}
13999 
14000   ins_pipe(fp_d2f);
14001 %}
14002 
14003 instruct convF2D_reg(vRegD dst, vRegF src) %{
14004   match(Set dst (ConvF2D src));
14005 
14006   ins_cost(INSN_COST * 5);
14007   format %{ "fcvts  $dst, $src \t// f2d" %}
14008 
14009   ins_encode %{
14010     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14011   %}
14012 
14013   ins_pipe(fp_f2d);
14014 %}
14015 
14016 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14017   match(Set dst (ConvF2I src));
14018 
14019   ins_cost(INSN_COST * 5);
14020   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14021 
14022   ins_encode %{
14023     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14024   %}
14025 
14026   ins_pipe(fp_f2i);
14027 %}
14028 
14029 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14030   match(Set dst (ConvF2L src));
14031 
14032   ins_cost(INSN_COST * 5);
14033   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14034 
14035   ins_encode %{
14036     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14037   %}
14038 
14039   ins_pipe(fp_f2l);
14040 %}
14041 
14042 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14043   match(Set dst (ConvI2F src));
14044 
14045   ins_cost(INSN_COST * 5);
14046   format %{ "scvtfws  $dst, $src \t// i2f" %}
14047 
14048   ins_encode %{
14049     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14050   %}
14051 
14052   ins_pipe(fp_i2f);
14053 %}
14054 
14055 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14056   match(Set dst (ConvL2F src));
14057 
14058   ins_cost(INSN_COST * 5);
14059   format %{ "scvtfs  $dst, $src \t// l2f" %}
14060 
14061   ins_encode %{
14062     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14063   %}
14064 
14065   ins_pipe(fp_l2f);
14066 %}
14067 
14068 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14069   match(Set dst (ConvD2I src));
14070 
14071   ins_cost(INSN_COST * 5);
14072   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14073 
14074   ins_encode %{
14075     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14076   %}
14077 
14078   ins_pipe(fp_d2i);
14079 %}
14080 
14081 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14082   match(Set dst (ConvD2L src));
14083 
14084   ins_cost(INSN_COST * 5);
14085   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14086 
14087   ins_encode %{
14088     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14089   %}
14090 
14091   ins_pipe(fp_d2l);
14092 %}
14093 
14094 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14095   match(Set dst (ConvI2D src));
14096 
14097   ins_cost(INSN_COST * 5);
14098   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14099 
14100   ins_encode %{
14101     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14102   %}
14103 
14104   ins_pipe(fp_i2d);
14105 %}
14106 
14107 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14108   match(Set dst (ConvL2D src));
14109 
14110   ins_cost(INSN_COST * 5);
14111   format %{ "scvtfd  $dst, $src \t// l2d" %}
14112 
14113   ins_encode %{
14114     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14115   %}
14116 
14117   ins_pipe(fp_l2d);
14118 %}
14119 
14120 // stack <-> reg and reg <-> reg shuffles with no conversion
14121 
14122 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14123 
14124   match(Set dst (MoveF2I src));
14125 
14126   effect(DEF dst, USE src);
14127 
14128   ins_cost(4 * INSN_COST);
14129 
14130   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14131 
14132   ins_encode %{
14133     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14134   %}
14135 
14136   ins_pipe(iload_reg_reg);
14137 
14138 %}
14139 
14140 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14141 
14142   match(Set dst (MoveI2F src));
14143 
14144   effect(DEF dst, USE src);
14145 
14146   ins_cost(4 * INSN_COST);
14147 
14148   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14149 
14150   ins_encode %{
14151     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14152   %}
14153 
14154   ins_pipe(pipe_class_memory);
14155 
14156 %}
14157 
14158 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14159 
14160   match(Set dst (MoveD2L src));
14161 
14162   effect(DEF dst, USE src);
14163 
14164   ins_cost(4 * INSN_COST);
14165 
14166   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14167 
14168   ins_encode %{
14169     __ ldr($dst$$Register, Address(sp, $src$$disp));
14170   %}
14171 
14172   ins_pipe(iload_reg_reg);
14173 
14174 %}
14175 
14176 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14177 
14178   match(Set dst (MoveL2D src));
14179 
14180   effect(DEF dst, USE src);
14181 
14182   ins_cost(4 * INSN_COST);
14183 
14184   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14185 
14186   ins_encode %{
14187     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14188   %}
14189 
14190   ins_pipe(pipe_class_memory);
14191 
14192 %}
14193 
14194 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14195 
14196   match(Set dst (MoveF2I src));
14197 
14198   effect(DEF dst, USE src);
14199 
14200   ins_cost(INSN_COST);
14201 
14202   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14203 
14204   ins_encode %{
14205     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14206   %}
14207 
14208   ins_pipe(pipe_class_memory);
14209 
14210 %}
14211 
14212 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14213 
14214   match(Set dst (MoveI2F src));
14215 
14216   effect(DEF dst, USE src);
14217 
14218   ins_cost(INSN_COST);
14219 
14220   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14221 
14222   ins_encode %{
14223     __ strw($src$$Register, Address(sp, $dst$$disp));
14224   %}
14225 
14226   ins_pipe(istore_reg_reg);
14227 
14228 %}
14229 
14230 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14231 
14232   match(Set dst (MoveD2L src));
14233 
14234   effect(DEF dst, USE src);
14235 
14236   ins_cost(INSN_COST);
14237 
14238   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14239 
14240   ins_encode %{
14241     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14242   %}
14243 
14244   ins_pipe(pipe_class_memory);
14245 
14246 %}
14247 
14248 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14249 
14250   match(Set dst (MoveL2D src));
14251 
14252   effect(DEF dst, USE src);
14253 
14254   ins_cost(INSN_COST);
14255 
14256   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14257 
14258   ins_encode %{
14259     __ str($src$$Register, Address(sp, $dst$$disp));
14260   %}
14261 
14262   ins_pipe(istore_reg_reg);
14263 
14264 %}
14265 
14266 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14267 
14268   match(Set dst (MoveF2I src));
14269 
14270   effect(DEF dst, USE src);
14271 
14272   ins_cost(INSN_COST);
14273 
14274   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14275 
14276   ins_encode %{
14277     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14278   %}
14279 
14280   ins_pipe(fp_f2i);
14281 
14282 %}
14283 
14284 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14285 
14286   match(Set dst (MoveI2F src));
14287 
14288   effect(DEF dst, USE src);
14289 
14290   ins_cost(INSN_COST);
14291 
14292   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14293 
14294   ins_encode %{
14295     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14296   %}
14297 
14298   ins_pipe(fp_i2f);
14299 
14300 %}
14301 
14302 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14303 
14304   match(Set dst (MoveD2L src));
14305 
14306   effect(DEF dst, USE src);
14307 
14308   ins_cost(INSN_COST);
14309 
14310   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14311 
14312   ins_encode %{
14313     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14314   %}
14315 
14316   ins_pipe(fp_d2l);
14317 
14318 %}
14319 
14320 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14321 
14322   match(Set dst (MoveL2D src));
14323 
14324   effect(DEF dst, USE src);
14325 
14326   ins_cost(INSN_COST);
14327 
14328   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14329 
14330   ins_encode %{
14331     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14332   %}
14333 
14334   ins_pipe(fp_l2d);
14335 
14336 %}
14337 
14338 // ============================================================================
14339 // clearing of an array
14340 
14341 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14342 %{
14343   match(Set dummy (ClearArray cnt base));
14344   effect(USE_KILL cnt, USE_KILL base);
14345 
14346   ins_cost(4 * INSN_COST);
14347   format %{ "ClearArray $cnt, $base" %}
14348 
14349   ins_encode %{
14350     __ zero_words($base$$Register, $cnt$$Register);
14351   %}
14352 
14353   ins_pipe(pipe_class_memory);
14354 %}
14355 
14356 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14357 %{
14358   predicate((u_int64_t)n->in(2)->get_long()
14359             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14360   match(Set dummy (ClearArray cnt base));
14361   effect(USE_KILL base);
14362 
14363   ins_cost(4 * INSN_COST);
14364   format %{ "ClearArray $cnt, $base" %}
14365 
14366   ins_encode %{
14367     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14368   %}
14369 
14370   ins_pipe(pipe_class_memory);
14371 %}
14372 
14373 // ============================================================================
14374 // Overflow Math Instructions
14375 
14376 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14377 %{
14378   match(Set cr (OverflowAddI op1 op2));
14379 
14380   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14381   ins_cost(INSN_COST);
14382   ins_encode %{
14383     __ cmnw($op1$$Register, $op2$$Register);
14384   %}
14385 
14386   ins_pipe(icmp_reg_reg);
14387 %}
14388 
14389 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14390 %{
14391   match(Set cr (OverflowAddI op1 op2));
14392 
14393   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14394   ins_cost(INSN_COST);
14395   ins_encode %{
14396     __ cmnw($op1$$Register, $op2$$constant);
14397   %}
14398 
14399   ins_pipe(icmp_reg_imm);
14400 %}
14401 
14402 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14403 %{
14404   match(Set cr (OverflowAddL op1 op2));
14405 
14406   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14407   ins_cost(INSN_COST);
14408   ins_encode %{
14409     __ cmn($op1$$Register, $op2$$Register);
14410   %}
14411 
14412   ins_pipe(icmp_reg_reg);
14413 %}
14414 
14415 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14416 %{
14417   match(Set cr (OverflowAddL op1 op2));
14418 
14419   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14420   ins_cost(INSN_COST);
14421   ins_encode %{
14422     __ cmn($op1$$Register, $op2$$constant);
14423   %}
14424 
14425   ins_pipe(icmp_reg_imm);
14426 %}
14427 
14428 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14429 %{
14430   match(Set cr (OverflowSubI op1 op2));
14431 
14432   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14433   ins_cost(INSN_COST);
14434   ins_encode %{
14435     __ cmpw($op1$$Register, $op2$$Register);
14436   %}
14437 
14438   ins_pipe(icmp_reg_reg);
14439 %}
14440 
14441 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14442 %{
14443   match(Set cr (OverflowSubI op1 op2));
14444 
14445   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14446   ins_cost(INSN_COST);
14447   ins_encode %{
14448     __ cmpw($op1$$Register, $op2$$constant);
14449   %}
14450 
14451   ins_pipe(icmp_reg_imm);
14452 %}
14453 
14454 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14455 %{
14456   match(Set cr (OverflowSubL op1 op2));
14457 
14458   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14459   ins_cost(INSN_COST);
14460   ins_encode %{
14461     __ cmp($op1$$Register, $op2$$Register);
14462   %}
14463 
14464   ins_pipe(icmp_reg_reg);
14465 %}
14466 
14467 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14468 %{
14469   match(Set cr (OverflowSubL op1 op2));
14470 
14471   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14472   ins_cost(INSN_COST);
14473   ins_encode %{
14474     __ subs(zr, $op1$$Register, $op2$$constant);
14475   %}
14476 
14477   ins_pipe(icmp_reg_imm);
14478 %}
14479 
14480 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14481 %{
14482   match(Set cr (OverflowSubI zero op1));
14483 
14484   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14485   ins_cost(INSN_COST);
14486   ins_encode %{
14487     __ cmpw(zr, $op1$$Register);
14488   %}
14489 
14490   ins_pipe(icmp_reg_imm);
14491 %}
14492 
14493 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14494 %{
14495   match(Set cr (OverflowSubL zero op1));
14496 
14497   format %{ "cmp   zr, $op1\t# overflow check long" %}
14498   ins_cost(INSN_COST);
14499   ins_encode %{
14500     __ cmp(zr, $op1$$Register);
14501   %}
14502 
14503   ins_pipe(icmp_reg_imm);
14504 %}
14505 
14506 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14507 %{
14508   match(Set cr (OverflowMulI op1 op2));
14509 
14510   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14511             "cmp   rscratch1, rscratch1, sxtw\n\t"
14512             "movw  rscratch1, #0x80000000\n\t"
14513             "cselw rscratch1, rscratch1, zr, NE\n\t"
14514             "cmpw  rscratch1, #1" %}
14515   ins_cost(5 * INSN_COST);
14516   ins_encode %{
14517     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14518     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14519     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14520     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14521     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14522   %}
14523 
14524   ins_pipe(pipe_slow);
14525 %}
14526 
14527 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14528 %{
14529   match(If cmp (OverflowMulI op1 op2));
14530   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14531             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14532   effect(USE labl, KILL cr);
14533 
14534   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14535             "cmp   rscratch1, rscratch1, sxtw\n\t"
14536             "b$cmp   $labl" %}
14537   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14538   ins_encode %{
14539     Label* L = $labl$$label;
14540     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14541     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14542     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14543     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14544   %}
14545 
14546   ins_pipe(pipe_serial);
14547 %}
14548 
14549 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14550 %{
14551   match(Set cr (OverflowMulL op1 op2));
14552 
14553   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14554             "smulh rscratch2, $op1, $op2\n\t"
14555             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14556             "movw  rscratch1, #0x80000000\n\t"
14557             "cselw rscratch1, rscratch1, zr, NE\n\t"
14558             "cmpw  rscratch1, #1" %}
14559   ins_cost(6 * INSN_COST);
14560   ins_encode %{
14561     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14562     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14563     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14564     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14565     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14566     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14567   %}
14568 
14569   ins_pipe(pipe_slow);
14570 %}
14571 
14572 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14573 %{
14574   match(If cmp (OverflowMulL op1 op2));
14575   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14576             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14577   effect(USE labl, KILL cr);
14578 
14579   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14580             "smulh rscratch2, $op1, $op2\n\t"
14581             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14582             "b$cmp $labl" %}
14583   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14584   ins_encode %{
14585     Label* L = $labl$$label;
14586     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14587     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14588     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14589     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14590     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14591   %}
14592 
14593   ins_pipe(pipe_serial);
14594 %}
14595 
14596 // ============================================================================
14597 // Compare Instructions
14598 
14599 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14600 %{
14601   match(Set cr (CmpI op1 op2));
14602 
14603   effect(DEF cr, USE op1, USE op2);
14604 
14605   ins_cost(INSN_COST);
14606   format %{ "cmpw  $op1, $op2" %}
14607 
14608   ins_encode(aarch64_enc_cmpw(op1, op2));
14609 
14610   ins_pipe(icmp_reg_reg);
14611 %}
14612 
14613 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14614 %{
14615   match(Set cr (CmpI op1 zero));
14616 
14617   effect(DEF cr, USE op1);
14618 
14619   ins_cost(INSN_COST);
14620   format %{ "cmpw $op1, 0" %}
14621 
14622   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14623 
14624   ins_pipe(icmp_reg_imm);
14625 %}
14626 
14627 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14628 %{
14629   match(Set cr (CmpI op1 op2));
14630 
14631   effect(DEF cr, USE op1);
14632 
14633   ins_cost(INSN_COST);
14634   format %{ "cmpw  $op1, $op2" %}
14635 
14636   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14637 
14638   ins_pipe(icmp_reg_imm);
14639 %}
14640 
14641 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14642 %{
14643   match(Set cr (CmpI op1 op2));
14644 
14645   effect(DEF cr, USE op1);
14646 
14647   ins_cost(INSN_COST * 2);
14648   format %{ "cmpw  $op1, $op2" %}
14649 
14650   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14651 
14652   ins_pipe(icmp_reg_imm);
14653 %}
14654 
14655 // Unsigned compare Instructions; really, same as signed compare
14656 // except it should only be used to feed an If or a CMovI which takes a
14657 // cmpOpU.
14658 
14659 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14660 %{
14661   match(Set cr (CmpU op1 op2));
14662 
14663   effect(DEF cr, USE op1, USE op2);
14664 
14665   ins_cost(INSN_COST);
14666   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14667 
14668   ins_encode(aarch64_enc_cmpw(op1, op2));
14669 
14670   ins_pipe(icmp_reg_reg);
14671 %}
14672 
14673 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14674 %{
14675   match(Set cr (CmpU op1 zero));
14676 
14677   effect(DEF cr, USE op1);
14678 
14679   ins_cost(INSN_COST);
14680   format %{ "cmpw $op1, #0\t# unsigned" %}
14681 
14682   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14683 
14684   ins_pipe(icmp_reg_imm);
14685 %}
14686 
14687 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14688 %{
14689   match(Set cr (CmpU op1 op2));
14690 
14691   effect(DEF cr, USE op1);
14692 
14693   ins_cost(INSN_COST);
14694   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14695 
14696   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14697 
14698   ins_pipe(icmp_reg_imm);
14699 %}
14700 
14701 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14702 %{
14703   match(Set cr (CmpU op1 op2));
14704 
14705   effect(DEF cr, USE op1);
14706 
14707   ins_cost(INSN_COST * 2);
14708   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14709 
14710   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14711 
14712   ins_pipe(icmp_reg_imm);
14713 %}
14714 
14715 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14716 %{
14717   match(Set cr (CmpL op1 op2));
14718 
14719   effect(DEF cr, USE op1, USE op2);
14720 
14721   ins_cost(INSN_COST);
14722   format %{ "cmp  $op1, $op2" %}
14723 
14724   ins_encode(aarch64_enc_cmp(op1, op2));
14725 
14726   ins_pipe(icmp_reg_reg);
14727 %}
14728 
14729 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14730 %{
14731   match(Set cr (CmpL op1 zero));
14732 
14733   effect(DEF cr, USE op1);
14734 
14735   ins_cost(INSN_COST);
14736   format %{ "tst  $op1" %}
14737 
14738   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14739 
14740   ins_pipe(icmp_reg_imm);
14741 %}
14742 
14743 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14744 %{
14745   match(Set cr (CmpL op1 op2));
14746 
14747   effect(DEF cr, USE op1);
14748 
14749   ins_cost(INSN_COST);
14750   format %{ "cmp  $op1, $op2" %}
14751 
14752   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14753 
14754   ins_pipe(icmp_reg_imm);
14755 %}
14756 
14757 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14758 %{
14759   match(Set cr (CmpL op1 op2));
14760 
14761   effect(DEF cr, USE op1);
14762 
14763   ins_cost(INSN_COST * 2);
14764   format %{ "cmp  $op1, $op2" %}
14765 
14766   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14767 
14768   ins_pipe(icmp_reg_imm);
14769 %}
14770 
14771 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14772 %{
14773   match(Set cr (CmpUL op1 op2));
14774 
14775   effect(DEF cr, USE op1, USE op2);
14776 
14777   ins_cost(INSN_COST);
14778   format %{ "cmp  $op1, $op2" %}
14779 
14780   ins_encode(aarch64_enc_cmp(op1, op2));
14781 
14782   ins_pipe(icmp_reg_reg);
14783 %}
14784 
14785 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14786 %{
14787   match(Set cr (CmpUL op1 zero));
14788 
14789   effect(DEF cr, USE op1);
14790 
14791   ins_cost(INSN_COST);
14792   format %{ "tst  $op1" %}
14793 
14794   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14795 
14796   ins_pipe(icmp_reg_imm);
14797 %}
14798 
14799 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14800 %{
14801   match(Set cr (CmpUL op1 op2));
14802 
14803   effect(DEF cr, USE op1);
14804 
14805   ins_cost(INSN_COST);
14806   format %{ "cmp  $op1, $op2" %}
14807 
14808   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14809 
14810   ins_pipe(icmp_reg_imm);
14811 %}
14812 
14813 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14814 %{
14815   match(Set cr (CmpUL op1 op2));
14816 
14817   effect(DEF cr, USE op1);
14818 
14819   ins_cost(INSN_COST * 2);
14820   format %{ "cmp  $op1, $op2" %}
14821 
14822   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14823 
14824   ins_pipe(icmp_reg_imm);
14825 %}
14826 
14827 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14828 %{
14829   match(Set cr (CmpP op1 op2));
14830 
14831   effect(DEF cr, USE op1, USE op2);
14832 
14833   ins_cost(INSN_COST);
14834   format %{ "cmp  $op1, $op2\t // ptr" %}
14835 
14836   ins_encode(aarch64_enc_cmpp(op1, op2));
14837 
14838   ins_pipe(icmp_reg_reg);
14839 %}
14840 
14841 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14842 %{
14843   match(Set cr (CmpN op1 op2));
14844 
14845   effect(DEF cr, USE op1, USE op2);
14846 
14847   ins_cost(INSN_COST);
14848   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14849 
14850   ins_encode(aarch64_enc_cmpn(op1, op2));
14851 
14852   ins_pipe(icmp_reg_reg);
14853 %}
14854 
14855 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14856 %{
14857   match(Set cr (CmpP op1 zero));
14858 
14859   effect(DEF cr, USE op1, USE zero);
14860 
14861   ins_cost(INSN_COST);
14862   format %{ "cmp  $op1, 0\t // ptr" %}
14863 
14864   ins_encode(aarch64_enc_testp(op1));
14865 
14866   ins_pipe(icmp_reg_imm);
14867 %}
14868 
14869 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14870 %{
14871   match(Set cr (CmpN op1 zero));
14872 
14873   effect(DEF cr, USE op1, USE zero);
14874 
14875   ins_cost(INSN_COST);
14876   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14877 
14878   ins_encode(aarch64_enc_testn(op1));
14879 
14880   ins_pipe(icmp_reg_imm);
14881 %}
14882 
14883 // FP comparisons
14884 //
14885 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14886 // using normal cmpOp. See declaration of rFlagsReg for details.
14887 
14888 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14889 %{
14890   match(Set cr (CmpF src1 src2));
14891 
14892   ins_cost(3 * INSN_COST);
14893   format %{ "fcmps $src1, $src2" %}
14894 
14895   ins_encode %{
14896     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14897   %}
14898 
14899   ins_pipe(pipe_class_compare);
14900 %}
14901 
14902 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14903 %{
14904   match(Set cr (CmpF src1 src2));
14905 
14906   ins_cost(3 * INSN_COST);
14907   format %{ "fcmps $src1, 0.0" %}
14908 
14909   ins_encode %{
14910     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14911   %}
14912 
14913   ins_pipe(pipe_class_compare);
14914 %}
14915 // FROM HERE
14916 
14917 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14918 %{
14919   match(Set cr (CmpD src1 src2));
14920 
14921   ins_cost(3 * INSN_COST);
14922   format %{ "fcmpd $src1, $src2" %}
14923 
14924   ins_encode %{
14925     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14926   %}
14927 
14928   ins_pipe(pipe_class_compare);
14929 %}
14930 
14931 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14932 %{
14933   match(Set cr (CmpD src1 src2));
14934 
14935   ins_cost(3 * INSN_COST);
14936   format %{ "fcmpd $src1, 0.0" %}
14937 
14938   ins_encode %{
14939     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14940   %}
14941 
14942   ins_pipe(pipe_class_compare);
14943 %}
14944 
14945 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14946 %{
14947   match(Set dst (CmpF3 src1 src2));
14948   effect(KILL cr);
14949 
14950   ins_cost(5 * INSN_COST);
14951   format %{ "fcmps $src1, $src2\n\t"
14952             "csinvw($dst, zr, zr, eq\n\t"
14953             "csnegw($dst, $dst, $dst, lt)"
14954   %}
14955 
14956   ins_encode %{
14957     Label done;
14958     FloatRegister s1 = as_FloatRegister($src1$$reg);
14959     FloatRegister s2 = as_FloatRegister($src2$$reg);
14960     Register d = as_Register($dst$$reg);
14961     __ fcmps(s1, s2);
14962     // installs 0 if EQ else -1
14963     __ csinvw(d, zr, zr, Assembler::EQ);
14964     // keeps -1 if less or unordered else installs 1
14965     __ csnegw(d, d, d, Assembler::LT);
14966     __ bind(done);
14967   %}
14968 
14969   ins_pipe(pipe_class_default);
14970 
14971 %}
14972 
14973 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14974 %{
14975   match(Set dst (CmpD3 src1 src2));
14976   effect(KILL cr);
14977 
14978   ins_cost(5 * INSN_COST);
14979   format %{ "fcmpd $src1, $src2\n\t"
14980             "csinvw($dst, zr, zr, eq\n\t"
14981             "csnegw($dst, $dst, $dst, lt)"
14982   %}
14983 
14984   ins_encode %{
14985     Label done;
14986     FloatRegister s1 = as_FloatRegister($src1$$reg);
14987     FloatRegister s2 = as_FloatRegister($src2$$reg);
14988     Register d = as_Register($dst$$reg);
14989     __ fcmpd(s1, s2);
14990     // installs 0 if EQ else -1
14991     __ csinvw(d, zr, zr, Assembler::EQ);
14992     // keeps -1 if less or unordered else installs 1
14993     __ csnegw(d, d, d, Assembler::LT);
14994     __ bind(done);
14995   %}
14996   ins_pipe(pipe_class_default);
14997 
14998 %}
14999 
15000 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15001 %{
15002   match(Set dst (CmpF3 src1 zero));
15003   effect(KILL cr);
15004 
15005   ins_cost(5 * INSN_COST);
15006   format %{ "fcmps $src1, 0.0\n\t"
15007             "csinvw($dst, zr, zr, eq\n\t"
15008             "csnegw($dst, $dst, $dst, lt)"
15009   %}
15010 
15011   ins_encode %{
15012     Label done;
15013     FloatRegister s1 = as_FloatRegister($src1$$reg);
15014     Register d = as_Register($dst$$reg);
15015     __ fcmps(s1, 0.0D);
15016     // installs 0 if EQ else -1
15017     __ csinvw(d, zr, zr, Assembler::EQ);
15018     // keeps -1 if less or unordered else installs 1
15019     __ csnegw(d, d, d, Assembler::LT);
15020     __ bind(done);
15021   %}
15022 
15023   ins_pipe(pipe_class_default);
15024 
15025 %}
15026 
15027 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15028 %{
15029   match(Set dst (CmpD3 src1 zero));
15030   effect(KILL cr);
15031 
15032   ins_cost(5 * INSN_COST);
15033   format %{ "fcmpd $src1, 0.0\n\t"
15034             "csinvw($dst, zr, zr, eq\n\t"
15035             "csnegw($dst, $dst, $dst, lt)"
15036   %}
15037 
15038   ins_encode %{
15039     Label done;
15040     FloatRegister s1 = as_FloatRegister($src1$$reg);
15041     Register d = as_Register($dst$$reg);
15042     __ fcmpd(s1, 0.0D);
15043     // installs 0 if EQ else -1
15044     __ csinvw(d, zr, zr, Assembler::EQ);
15045     // keeps -1 if less or unordered else installs 1
15046     __ csnegw(d, d, d, Assembler::LT);
15047     __ bind(done);
15048   %}
15049   ins_pipe(pipe_class_default);
15050 
15051 %}
15052 
15053 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15054 %{
15055   match(Set dst (CmpLTMask p q));
15056   effect(KILL cr);
15057 
15058   ins_cost(3 * INSN_COST);
15059 
15060   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15061             "csetw $dst, lt\n\t"
15062             "subw $dst, zr, $dst"
15063   %}
15064 
15065   ins_encode %{
15066     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15067     __ csetw(as_Register($dst$$reg), Assembler::LT);
15068     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15069   %}
15070 
15071   ins_pipe(ialu_reg_reg);
15072 %}
15073 
15074 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15075 %{
15076   match(Set dst (CmpLTMask src zero));
15077   effect(KILL cr);
15078 
15079   ins_cost(INSN_COST);
15080 
15081   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15082 
15083   ins_encode %{
15084     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15085   %}
15086 
15087   ins_pipe(ialu_reg_shift);
15088 %}
15089 
15090 // ============================================================================
15091 // Max and Min
15092 
15093 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15094 %{
15095   match(Set dst (MinI src1 src2));
15096 
15097   effect(DEF dst, USE src1, USE src2, KILL cr);
15098   size(8);
15099 
15100   ins_cost(INSN_COST * 3);
15101   format %{
15102     "cmpw $src1 $src2\t signed int\n\t"
15103     "cselw $dst, $src1, $src2 lt\t"
15104   %}
15105 
15106   ins_encode %{
15107     __ cmpw(as_Register($src1$$reg),
15108             as_Register($src2$$reg));
15109     __ cselw(as_Register($dst$$reg),
15110              as_Register($src1$$reg),
15111              as_Register($src2$$reg),
15112              Assembler::LT);
15113   %}
15114 
15115   ins_pipe(ialu_reg_reg);
15116 %}
15117 // FROM HERE
15118 
15119 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15120 %{
15121   match(Set dst (MaxI src1 src2));
15122 
15123   effect(DEF dst, USE src1, USE src2, KILL cr);
15124   size(8);
15125 
15126   ins_cost(INSN_COST * 3);
15127   format %{
15128     "cmpw $src1 $src2\t signed int\n\t"
15129     "cselw $dst, $src1, $src2 gt\t"
15130   %}
15131 
15132   ins_encode %{
15133     __ cmpw(as_Register($src1$$reg),
15134             as_Register($src2$$reg));
15135     __ cselw(as_Register($dst$$reg),
15136              as_Register($src1$$reg),
15137              as_Register($src2$$reg),
15138              Assembler::GT);
15139   %}
15140 
15141   ins_pipe(ialu_reg_reg);
15142 %}
15143 
15144 // ============================================================================
15145 // Branch Instructions
15146 
15147 // Direct Branch.
15148 instruct branch(label lbl)
15149 %{
15150   match(Goto);
15151 
15152   effect(USE lbl);
15153 
15154   ins_cost(BRANCH_COST);
15155   format %{ "b  $lbl" %}
15156 
15157   ins_encode(aarch64_enc_b(lbl));
15158 
15159   ins_pipe(pipe_branch);
15160 %}
15161 
15162 // Conditional Near Branch
15163 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15164 %{
15165   // Same match rule as `branchConFar'.
15166   match(If cmp cr);
15167 
15168   effect(USE lbl);
15169 
15170   ins_cost(BRANCH_COST);
15171   // If set to 1 this indicates that the current instruction is a
15172   // short variant of a long branch. This avoids using this
15173   // instruction in first-pass matching. It will then only be used in
15174   // the `Shorten_branches' pass.
15175   // ins_short_branch(1);
15176   format %{ "b$cmp  $lbl" %}
15177 
15178   ins_encode(aarch64_enc_br_con(cmp, lbl));
15179 
15180   ins_pipe(pipe_branch_cond);
15181 %}
15182 
15183 // Conditional Near Branch Unsigned
15184 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15185 %{
15186   // Same match rule as `branchConFar'.
15187   match(If cmp cr);
15188 
15189   effect(USE lbl);
15190 
15191   ins_cost(BRANCH_COST);
15192   // If set to 1 this indicates that the current instruction is a
15193   // short variant of a long branch. This avoids using this
15194   // instruction in first-pass matching. It will then only be used in
15195   // the `Shorten_branches' pass.
15196   // ins_short_branch(1);
15197   format %{ "b$cmp  $lbl\t# unsigned" %}
15198 
15199   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15200 
15201   ins_pipe(pipe_branch_cond);
15202 %}
15203 
15204 // Make use of CBZ and CBNZ.  These instructions, as well as being
15205 // shorter than (cmp; branch), have the additional benefit of not
15206 // killing the flags.
15207 
15208 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15209   match(If cmp (CmpI op1 op2));
15210   effect(USE labl);
15211 
15212   ins_cost(BRANCH_COST);
15213   format %{ "cbw$cmp   $op1, $labl" %}
15214   ins_encode %{
15215     Label* L = $labl$$label;
15216     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15217     if (cond == Assembler::EQ)
15218       __ cbzw($op1$$Register, *L);
15219     else
15220       __ cbnzw($op1$$Register, *L);
15221   %}
15222   ins_pipe(pipe_cmp_branch);
15223 %}
15224 
15225 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15226   match(If cmp (CmpL op1 op2));
15227   effect(USE labl);
15228 
15229   ins_cost(BRANCH_COST);
15230   format %{ "cb$cmp   $op1, $labl" %}
15231   ins_encode %{
15232     Label* L = $labl$$label;
15233     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15234     if (cond == Assembler::EQ)
15235       __ cbz($op1$$Register, *L);
15236     else
15237       __ cbnz($op1$$Register, *L);
15238   %}
15239   ins_pipe(pipe_cmp_branch);
15240 %}
15241 
15242 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15243   match(If cmp (CmpP op1 op2));
15244   effect(USE labl);
15245 
15246   ins_cost(BRANCH_COST);
15247   format %{ "cb$cmp   $op1, $labl" %}
15248   ins_encode %{
15249     Label* L = $labl$$label;
15250     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15251     if (cond == Assembler::EQ)
15252       __ cbz($op1$$Register, *L);
15253     else
15254       __ cbnz($op1$$Register, *L);
15255   %}
15256   ins_pipe(pipe_cmp_branch);
15257 %}
15258 
15259 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15260   match(If cmp (CmpN op1 op2));
15261   effect(USE labl);
15262 
15263   ins_cost(BRANCH_COST);
15264   format %{ "cbw$cmp   $op1, $labl" %}
15265   ins_encode %{
15266     Label* L = $labl$$label;
15267     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15268     if (cond == Assembler::EQ)
15269       __ cbzw($op1$$Register, *L);
15270     else
15271       __ cbnzw($op1$$Register, *L);
15272   %}
15273   ins_pipe(pipe_cmp_branch);
15274 %}
15275 
15276 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15277   match(If cmp (CmpP (DecodeN oop) zero));
15278   effect(USE labl);
15279 
15280   ins_cost(BRANCH_COST);
15281   format %{ "cb$cmp   $oop, $labl" %}
15282   ins_encode %{
15283     Label* L = $labl$$label;
15284     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15285     if (cond == Assembler::EQ)
15286       __ cbzw($oop$$Register, *L);
15287     else
15288       __ cbnzw($oop$$Register, *L);
15289   %}
15290   ins_pipe(pipe_cmp_branch);
15291 %}
15292 
15293 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15294   match(If cmp (CmpU op1 op2));
15295   effect(USE labl);
15296 
15297   ins_cost(BRANCH_COST);
15298   format %{ "cbw$cmp   $op1, $labl" %}
15299   ins_encode %{
15300     Label* L = $labl$$label;
15301     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15302     if (cond == Assembler::EQ || cond == Assembler::LS)
15303       __ cbzw($op1$$Register, *L);
15304     else
15305       __ cbnzw($op1$$Register, *L);
15306   %}
15307   ins_pipe(pipe_cmp_branch);
15308 %}
15309 
15310 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15311   match(If cmp (CmpUL op1 op2));
15312   effect(USE labl);
15313 
15314   ins_cost(BRANCH_COST);
15315   format %{ "cb$cmp   $op1, $labl" %}
15316   ins_encode %{
15317     Label* L = $labl$$label;
15318     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15319     if (cond == Assembler::EQ || cond == Assembler::LS)
15320       __ cbz($op1$$Register, *L);
15321     else
15322       __ cbnz($op1$$Register, *L);
15323   %}
15324   ins_pipe(pipe_cmp_branch);
15325 %}
15326 
15327 // Test bit and Branch
15328 
15329 // Patterns for short (< 32KiB) variants
15330 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15331   match(If cmp (CmpL op1 op2));
15332   effect(USE labl);
15333 
15334   ins_cost(BRANCH_COST);
15335   format %{ "cb$cmp   $op1, $labl # long" %}
15336   ins_encode %{
15337     Label* L = $labl$$label;
15338     Assembler::Condition cond =
15339       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15340     __ tbr(cond, $op1$$Register, 63, *L);
15341   %}
15342   ins_pipe(pipe_cmp_branch);
15343   ins_short_branch(1);
15344 %}
15345 
15346 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15347   match(If cmp (CmpI op1 op2));
15348   effect(USE labl);
15349 
15350   ins_cost(BRANCH_COST);
15351   format %{ "cb$cmp   $op1, $labl # int" %}
15352   ins_encode %{
15353     Label* L = $labl$$label;
15354     Assembler::Condition cond =
15355       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15356     __ tbr(cond, $op1$$Register, 31, *L);
15357   %}
15358   ins_pipe(pipe_cmp_branch);
15359   ins_short_branch(1);
15360 %}
15361 
15362 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15363   match(If cmp (CmpL (AndL op1 op2) op3));
15364   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15365   effect(USE labl);
15366 
15367   ins_cost(BRANCH_COST);
15368   format %{ "tb$cmp   $op1, $op2, $labl" %}
15369   ins_encode %{
15370     Label* L = $labl$$label;
15371     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15372     int bit = exact_log2($op2$$constant);
15373     __ tbr(cond, $op1$$Register, bit, *L);
15374   %}
15375   ins_pipe(pipe_cmp_branch);
15376   ins_short_branch(1);
15377 %}
15378 
15379 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15380   match(If cmp (CmpI (AndI op1 op2) op3));
15381   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15382   effect(USE labl);
15383 
15384   ins_cost(BRANCH_COST);
15385   format %{ "tb$cmp   $op1, $op2, $labl" %}
15386   ins_encode %{
15387     Label* L = $labl$$label;
15388     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15389     int bit = exact_log2($op2$$constant);
15390     __ tbr(cond, $op1$$Register, bit, *L);
15391   %}
15392   ins_pipe(pipe_cmp_branch);
15393   ins_short_branch(1);
15394 %}
15395 
15396 // And far variants
15397 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15398   match(If cmp (CmpL op1 op2));
15399   effect(USE labl);
15400 
15401   ins_cost(BRANCH_COST);
15402   format %{ "cb$cmp   $op1, $labl # long" %}
15403   ins_encode %{
15404     Label* L = $labl$$label;
15405     Assembler::Condition cond =
15406       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15407     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15408   %}
15409   ins_pipe(pipe_cmp_branch);
15410 %}
15411 
15412 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15413   match(If cmp (CmpI op1 op2));
15414   effect(USE labl);
15415 
15416   ins_cost(BRANCH_COST);
15417   format %{ "cb$cmp   $op1, $labl # int" %}
15418   ins_encode %{
15419     Label* L = $labl$$label;
15420     Assembler::Condition cond =
15421       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15422     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15423   %}
15424   ins_pipe(pipe_cmp_branch);
15425 %}
15426 
15427 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15428   match(If cmp (CmpL (AndL op1 op2) op3));
15429   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15430   effect(USE labl);
15431 
15432   ins_cost(BRANCH_COST);
15433   format %{ "tb$cmp   $op1, $op2, $labl" %}
15434   ins_encode %{
15435     Label* L = $labl$$label;
15436     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15437     int bit = exact_log2($op2$$constant);
15438     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15439   %}
15440   ins_pipe(pipe_cmp_branch);
15441 %}
15442 
15443 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15444   match(If cmp (CmpI (AndI op1 op2) op3));
15445   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15446   effect(USE labl);
15447 
15448   ins_cost(BRANCH_COST);
15449   format %{ "tb$cmp   $op1, $op2, $labl" %}
15450   ins_encode %{
15451     Label* L = $labl$$label;
15452     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15453     int bit = exact_log2($op2$$constant);
15454     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15455   %}
15456   ins_pipe(pipe_cmp_branch);
15457 %}
15458 
15459 // Test bits
15460 
15461 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15462   match(Set cr (CmpL (AndL op1 op2) op3));
15463   predicate(Assembler::operand_valid_for_logical_immediate
15464             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15465 
15466   ins_cost(INSN_COST);
15467   format %{ "tst $op1, $op2 # long" %}
15468   ins_encode %{
15469     __ tst($op1$$Register, $op2$$constant);
15470   %}
15471   ins_pipe(ialu_reg_reg);
15472 %}
15473 
15474 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15475   match(Set cr (CmpI (AndI op1 op2) op3));
15476   predicate(Assembler::operand_valid_for_logical_immediate
15477             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15478 
15479   ins_cost(INSN_COST);
15480   format %{ "tst $op1, $op2 # int" %}
15481   ins_encode %{
15482     __ tstw($op1$$Register, $op2$$constant);
15483   %}
15484   ins_pipe(ialu_reg_reg);
15485 %}
15486 
15487 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15488   match(Set cr (CmpL (AndL op1 op2) op3));
15489 
15490   ins_cost(INSN_COST);
15491   format %{ "tst $op1, $op2 # long" %}
15492   ins_encode %{
15493     __ tst($op1$$Register, $op2$$Register);
15494   %}
15495   ins_pipe(ialu_reg_reg);
15496 %}
15497 
15498 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15499   match(Set cr (CmpI (AndI op1 op2) op3));
15500 
15501   ins_cost(INSN_COST);
15502   format %{ "tstw $op1, $op2 # int" %}
15503   ins_encode %{
15504     __ tstw($op1$$Register, $op2$$Register);
15505   %}
15506   ins_pipe(ialu_reg_reg);
15507 %}
15508 
15509 
15510 // Conditional Far Branch
15511 // Conditional Far Branch Unsigned
15512 // TODO: fixme
15513 
15514 // counted loop end branch near
15515 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15516 %{
15517   match(CountedLoopEnd cmp cr);
15518 
15519   effect(USE lbl);
15520 
15521   ins_cost(BRANCH_COST);
15522   // short variant.
15523   // ins_short_branch(1);
15524   format %{ "b$cmp $lbl \t// counted loop end" %}
15525 
15526   ins_encode(aarch64_enc_br_con(cmp, lbl));
15527 
15528   ins_pipe(pipe_branch);
15529 %}
15530 
15531 // counted loop end branch near Unsigned
15532 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15533 %{
15534   match(CountedLoopEnd cmp cr);
15535 
15536   effect(USE lbl);
15537 
15538   ins_cost(BRANCH_COST);
15539   // short variant.
15540   // ins_short_branch(1);
15541   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15542 
15543   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15544 
15545   ins_pipe(pipe_branch);
15546 %}
15547 
15548 // counted loop end branch far
15549 // counted loop end branch far unsigned
15550 // TODO: fixme
15551 
15552 // ============================================================================
15553 // inlined locking and unlocking
15554 
15555 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15556 %{
15557   match(Set cr (FastLock object box));
15558   effect(TEMP tmp, TEMP tmp2);
15559 
15560   // TODO
15561   // identify correct cost
15562   ins_cost(5 * INSN_COST);
15563   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15564 
15565   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15566 
15567   ins_pipe(pipe_serial);
15568 %}
15569 
15570 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15571 %{
15572   match(Set cr (FastUnlock object box));
15573   effect(TEMP tmp, TEMP tmp2);
15574 
15575   ins_cost(5 * INSN_COST);
15576   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15577 
15578   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15579 
15580   ins_pipe(pipe_serial);
15581 %}
15582 
15583 
15584 // ============================================================================
15585 // Safepoint Instructions
15586 
15587 // TODO
15588 // provide a near and far version of this code
15589 
15590 instruct safePoint(iRegP poll)
15591 %{
15592   match(SafePoint poll);
15593 
15594   format %{
15595     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15596   %}
15597   ins_encode %{
15598     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15599   %}
15600   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15601 %}
15602 
15603 
15604 // ============================================================================
15605 // Procedure Call/Return Instructions
15606 
15607 // Call Java Static Instruction
15608 
15609 instruct CallStaticJavaDirect(method meth)
15610 %{
15611   match(CallStaticJava);
15612 
15613   effect(USE meth);
15614 
15615   ins_cost(CALL_COST);
15616 
15617   format %{ "call,static $meth \t// ==> " %}
15618 
15619   ins_encode( aarch64_enc_java_static_call(meth),
15620               aarch64_enc_call_epilog );
15621 
15622   ins_pipe(pipe_class_call);
15623 %}
15624 
15625 // TO HERE
15626 
15627 // Call Java Dynamic Instruction
15628 instruct CallDynamicJavaDirect(method meth)
15629 %{
15630   match(CallDynamicJava);
15631 
15632   effect(USE meth);
15633 
15634   ins_cost(CALL_COST);
15635 
15636   format %{ "CALL,dynamic $meth \t// ==> " %}
15637 
15638   ins_encode( aarch64_enc_java_dynamic_call(meth),
15639                aarch64_enc_call_epilog );
15640 
15641   ins_pipe(pipe_class_call);
15642 %}
15643 
15644 // Call Runtime Instruction
15645 
15646 instruct CallRuntimeDirect(method meth)
15647 %{
15648   match(CallRuntime);
15649 
15650   effect(USE meth);
15651 
15652   ins_cost(CALL_COST);
15653 
15654   format %{ "CALL, runtime $meth" %}
15655 
15656   ins_encode( aarch64_enc_java_to_runtime(meth) );
15657 
15658   ins_pipe(pipe_class_call);
15659 %}
15660 
15661 // Call Runtime Instruction
15662 
15663 instruct CallLeafDirect(method meth)
15664 %{
15665   match(CallLeaf);
15666 
15667   effect(USE meth);
15668 
15669   ins_cost(CALL_COST);
15670 
15671   format %{ "CALL, runtime leaf $meth" %}
15672 
15673   ins_encode( aarch64_enc_java_to_runtime(meth) );
15674 
15675   ins_pipe(pipe_class_call);
15676 %}
15677 
15678 // Call Runtime Instruction
15679 
15680 instruct CallLeafNoFPDirect(method meth)
15681 %{
15682   match(CallLeafNoFP);
15683 
15684   effect(USE meth);
15685 
15686   ins_cost(CALL_COST);
15687 
15688   format %{ "CALL, runtime leaf nofp $meth" %}
15689 
15690   ins_encode( aarch64_enc_java_to_runtime(meth) );
15691 
15692   ins_pipe(pipe_class_call);
15693 %}
15694 
15695 // Tail Call; Jump from runtime stub to Java code.
15696 // Also known as an 'interprocedural jump'.
15697 // Target of jump will eventually return to caller.
15698 // TailJump below removes the return address.
15699 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15700 %{
15701   match(TailCall jump_target method_oop);
15702 
15703   ins_cost(CALL_COST);
15704 
15705   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15706 
15707   ins_encode(aarch64_enc_tail_call(jump_target));
15708 
15709   ins_pipe(pipe_class_call);
15710 %}
15711 
15712 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15713 %{
15714   match(TailJump jump_target ex_oop);
15715 
15716   ins_cost(CALL_COST);
15717 
15718   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15719 
15720   ins_encode(aarch64_enc_tail_jmp(jump_target));
15721 
15722   ins_pipe(pipe_class_call);
15723 %}
15724 
15725 // Create exception oop: created by stack-crawling runtime code.
15726 // Created exception is now available to this handler, and is setup
15727 // just prior to jumping to this handler. No code emitted.
15728 // TODO check
15729 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15730 instruct CreateException(iRegP_R0 ex_oop)
15731 %{
15732   match(Set ex_oop (CreateEx));
15733 
15734   format %{ " -- \t// exception oop; no code emitted" %}
15735 
15736   size(0);
15737 
15738   ins_encode( /*empty*/ );
15739 
15740   ins_pipe(pipe_class_empty);
15741 %}
15742 
15743 // Rethrow exception: The exception oop will come in the first
15744 // argument position. Then JUMP (not call) to the rethrow stub code.
15745 instruct RethrowException() %{
15746   match(Rethrow);
15747   ins_cost(CALL_COST);
15748 
15749   format %{ "b rethrow_stub" %}
15750 
15751   ins_encode( aarch64_enc_rethrow() );
15752 
15753   ins_pipe(pipe_class_call);
15754 %}
15755 
15756 
15757 // Return Instruction
15758 // epilog node loads ret address into lr as part of frame pop
15759 instruct Ret()
15760 %{
15761   match(Return);
15762 
15763   format %{ "ret\t// return register" %}
15764 
15765   ins_encode( aarch64_enc_ret() );
15766 
15767   ins_pipe(pipe_branch);
15768 %}
15769 
15770 // Die now.
15771 instruct ShouldNotReachHere() %{
15772   match(Halt);
15773 
15774   ins_cost(CALL_COST);
15775   format %{ "ShouldNotReachHere" %}
15776 
15777   ins_encode %{
15778     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15779     // return true
15780     __ dpcs1(0xdead + 1);
15781   %}
15782 
15783   ins_pipe(pipe_class_default);
15784 %}
15785 
15786 // ============================================================================
15787 // Partial Subtype Check
15788 //
15789 // superklass array for an instance of the superklass.  Set a hidden
15790 // internal cache on a hit (cache is checked with exposed code in
15791 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15792 // encoding ALSO sets flags.
15793 
15794 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15795 %{
15796   match(Set result (PartialSubtypeCheck sub super));
15797   effect(KILL cr, KILL temp);
15798 
15799   ins_cost(1100);  // slightly larger than the next version
15800   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15801 
15802   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15803 
15804   opcode(0x1); // Force zero of result reg on hit
15805 
15806   ins_pipe(pipe_class_memory);
15807 %}
15808 
15809 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15810 %{
15811   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15812   effect(KILL temp, KILL result);
15813 
15814   ins_cost(1100);  // slightly larger than the next version
15815   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15816 
15817   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15818 
15819   opcode(0x0); // Don't zero result reg on hit
15820 
15821   ins_pipe(pipe_class_memory);
15822 %}
15823 
15824 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15825                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15826 %{
15827   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15828   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15829   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15830 
15831   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15832   ins_encode %{
15833     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15834     __ string_compare($str1$$Register, $str2$$Register,
15835                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15836                       $tmp1$$Register, $tmp2$$Register,
15837                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15838   %}
15839   ins_pipe(pipe_class_memory);
15840 %}
15841 
15842 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15843                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15844 %{
15845   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15846   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15847   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15848 
15849   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15850   ins_encode %{
15851     __ string_compare($str1$$Register, $str2$$Register,
15852                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15853                       $tmp1$$Register, $tmp2$$Register,
15854                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15855   %}
15856   ins_pipe(pipe_class_memory);
15857 %}
15858 
15859 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15860                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15861                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15862 %{
15863   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15864   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15865   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15866          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15867 
15868   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15869   ins_encode %{
15870     __ string_compare($str1$$Register, $str2$$Register,
15871                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15872                       $tmp1$$Register, $tmp2$$Register,
15873                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15874                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15875   %}
15876   ins_pipe(pipe_class_memory);
15877 %}
15878 
15879 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15880                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15881                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15882 %{
15883   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15884   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15885   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15886          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15887 
15888   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15889   ins_encode %{
15890     __ string_compare($str1$$Register, $str2$$Register,
15891                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15892                       $tmp1$$Register, $tmp2$$Register,
15893                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15894                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15895   %}
15896   ins_pipe(pipe_class_memory);
15897 %}
15898 
15899 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15900        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15901        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15902 %{
15903   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15904   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15905   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15906          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15907   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15908 
15909   ins_encode %{
15910     __ string_indexof($str1$$Register, $str2$$Register,
15911                       $cnt1$$Register, $cnt2$$Register,
15912                       $tmp1$$Register, $tmp2$$Register,
15913                       $tmp3$$Register, $tmp4$$Register,
15914                       $tmp5$$Register, $tmp6$$Register,
15915                       -1, $result$$Register, StrIntrinsicNode::UU);
15916   %}
15917   ins_pipe(pipe_class_memory);
15918 %}
15919 
15920 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15921        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15922        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15923 %{
15924   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15925   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15926   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15927          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15928   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15929 
15930   ins_encode %{
15931     __ string_indexof($str1$$Register, $str2$$Register,
15932                       $cnt1$$Register, $cnt2$$Register,
15933                       $tmp1$$Register, $tmp2$$Register,
15934                       $tmp3$$Register, $tmp4$$Register,
15935                       $tmp5$$Register, $tmp6$$Register,
15936                       -1, $result$$Register, StrIntrinsicNode::LL);
15937   %}
15938   ins_pipe(pipe_class_memory);
15939 %}
15940 
15941 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15942        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15943        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15944 %{
15945   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15946   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15947   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15948          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15949   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15950 
15951   ins_encode %{
15952     __ string_indexof($str1$$Register, $str2$$Register,
15953                       $cnt1$$Register, $cnt2$$Register,
15954                       $tmp1$$Register, $tmp2$$Register,
15955                       $tmp3$$Register, $tmp4$$Register,
15956                       $tmp5$$Register, $tmp6$$Register,
15957                       -1, $result$$Register, StrIntrinsicNode::UL);
15958   %}
15959   ins_pipe(pipe_class_memory);
15960 %}
15961 
15962 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15963                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15964                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15965 %{
15966   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15967   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15968   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15969          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15970   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15971 
15972   ins_encode %{
15973     int icnt2 = (int)$int_cnt2$$constant;
15974     __ string_indexof($str1$$Register, $str2$$Register,
15975                       $cnt1$$Register, zr,
15976                       $tmp1$$Register, $tmp2$$Register,
15977                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15978                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15979   %}
15980   ins_pipe(pipe_class_memory);
15981 %}
15982 
15983 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15984                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15985                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15986 %{
15987   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15988   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15989   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15990          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15991   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15992 
15993   ins_encode %{
15994     int icnt2 = (int)$int_cnt2$$constant;
15995     __ string_indexof($str1$$Register, $str2$$Register,
15996                       $cnt1$$Register, zr,
15997                       $tmp1$$Register, $tmp2$$Register,
15998                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15999                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16000   %}
16001   ins_pipe(pipe_class_memory);
16002 %}
16003 
16004 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16005                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16006                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16007 %{
16008   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16009   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16010   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16011          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16012   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16013 
16014   ins_encode %{
16015     int icnt2 = (int)$int_cnt2$$constant;
16016     __ string_indexof($str1$$Register, $str2$$Register,
16017                       $cnt1$$Register, zr,
16018                       $tmp1$$Register, $tmp2$$Register,
16019                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16020                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16021   %}
16022   ins_pipe(pipe_class_memory);
16023 %}
16024 
16025 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16026                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16027                               iRegINoSp tmp3, rFlagsReg cr)
16028 %{
16029   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16030   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16031          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16032 
16033   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16034 
16035   ins_encode %{
16036     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16037                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16038                            $tmp3$$Register);
16039   %}
16040   ins_pipe(pipe_class_memory);
16041 %}
16042 
16043 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16044                         iRegI_R0 result, rFlagsReg cr)
16045 %{
16046   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16047   match(Set result (StrEquals (Binary str1 str2) cnt));
16048   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16049 
16050   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16051   ins_encode %{
16052     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16053     __ string_equals($str1$$Register, $str2$$Register,
16054                      $result$$Register, $cnt$$Register, 1);
16055   %}
16056   ins_pipe(pipe_class_memory);
16057 %}
16058 
16059 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16060                         iRegI_R0 result, rFlagsReg cr)
16061 %{
16062   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16063   match(Set result (StrEquals (Binary str1 str2) cnt));
16064   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16065 
16066   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16067   ins_encode %{
16068     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16069     __ string_equals($str1$$Register, $str2$$Register,
16070                      $result$$Register, $cnt$$Register, 2);
16071   %}
16072   ins_pipe(pipe_class_memory);
16073 %}
16074 
16075 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16076                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16077                        iRegP_R10 tmp, rFlagsReg cr)
16078 %{
16079   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16080   match(Set result (AryEq ary1 ary2));
16081   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16082 
16083   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16084   ins_encode %{
16085     __ arrays_equals($ary1$$Register, $ary2$$Register,
16086                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16087                      $result$$Register, $tmp$$Register, 1);
16088     %}
16089   ins_pipe(pipe_class_memory);
16090 %}
16091 
16092 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16093                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16094                        iRegP_R10 tmp, rFlagsReg cr)
16095 %{
16096   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16097   match(Set result (AryEq ary1 ary2));
16098   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16099 
16100   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16101   ins_encode %{
16102     __ arrays_equals($ary1$$Register, $ary2$$Register,
16103                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16104                      $result$$Register, $tmp$$Register, 2);
16105   %}
16106   ins_pipe(pipe_class_memory);
16107 %}
16108 
16109 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16110 %{
16111   match(Set result (HasNegatives ary1 len));
16112   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16113   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16114   ins_encode %{
16115     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16116   %}
16117   ins_pipe( pipe_slow );
16118 %}
16119 
16120 // fast char[] to byte[] compression
16121 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16122                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16123                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16124                          iRegI_R0 result, rFlagsReg cr)
16125 %{
16126   match(Set result (StrCompressedCopy src (Binary dst len)));
16127   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16128 
16129   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16130   ins_encode %{
16131     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16132                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16133                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16134                            $result$$Register);
16135   %}
16136   ins_pipe( pipe_slow );
16137 %}
16138 
16139 // fast byte[] to char[] inflation
16140 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16141                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16142 %{
16143   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16144   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16145 
16146   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16147   ins_encode %{
16148     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16149                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16150   %}
16151   ins_pipe(pipe_class_memory);
16152 %}
16153 
16154 // encode char[] to byte[] in ISO_8859_1
16155 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16156                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16157                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16158                           iRegI_R0 result, rFlagsReg cr)
16159 %{
16160   match(Set result (EncodeISOArray src (Binary dst len)));
16161   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16162          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16163 
16164   format %{ "Encode array $src,$dst,$len -> $result" %}
16165   ins_encode %{
16166     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16167          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16168          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16169   %}
16170   ins_pipe( pipe_class_memory );
16171 %}
16172 
16173 // ============================================================================
16174 // This name is KNOWN by the ADLC and cannot be changed.
16175 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16176 // for this guy.
16177 instruct tlsLoadP(thread_RegP dst)
16178 %{
16179   match(Set dst (ThreadLocal));
16180 
16181   ins_cost(0);
16182 
16183   format %{ " -- \t// $dst=Thread::current(), empty" %}
16184 
16185   size(0);
16186 
16187   ins_encode( /*empty*/ );
16188 
16189   ins_pipe(pipe_class_empty);
16190 %}
16191 
16192 // ====================VECTOR INSTRUCTIONS=====================================
16193 
16194 // Load vector (32 bits)
16195 instruct loadV4(vecD dst, vmem4 mem)
16196 %{
16197   predicate(n->as_LoadVector()->memory_size() == 4);
16198   match(Set dst (LoadVector mem));
16199   ins_cost(4 * INSN_COST);
16200   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16201   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16202   ins_pipe(vload_reg_mem64);
16203 %}
16204 
16205 // Load vector (64 bits)
16206 instruct loadV8(vecD dst, vmem8 mem)
16207 %{
16208   predicate(n->as_LoadVector()->memory_size() == 8);
16209   match(Set dst (LoadVector mem));
16210   ins_cost(4 * INSN_COST);
16211   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16212   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16213   ins_pipe(vload_reg_mem64);
16214 %}
16215 
16216 // Load Vector (128 bits)
16217 instruct loadV16(vecX dst, vmem16 mem)
16218 %{
16219   predicate(n->as_LoadVector()->memory_size() == 16);
16220   match(Set dst (LoadVector mem));
16221   ins_cost(4 * INSN_COST);
16222   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16223   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16224   ins_pipe(vload_reg_mem128);
16225 %}
16226 
16227 // Store Vector (32 bits)
16228 instruct storeV4(vecD src, vmem4 mem)
16229 %{
16230   predicate(n->as_StoreVector()->memory_size() == 4);
16231   match(Set mem (StoreVector mem src));
16232   ins_cost(4 * INSN_COST);
16233   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16234   ins_encode( aarch64_enc_strvS(src, mem) );
16235   ins_pipe(vstore_reg_mem64);
16236 %}
16237 
16238 // Store Vector (64 bits)
16239 instruct storeV8(vecD src, vmem8 mem)
16240 %{
16241   predicate(n->as_StoreVector()->memory_size() == 8);
16242   match(Set mem (StoreVector mem src));
16243   ins_cost(4 * INSN_COST);
16244   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16245   ins_encode( aarch64_enc_strvD(src, mem) );
16246   ins_pipe(vstore_reg_mem64);
16247 %}
16248 
16249 // Store Vector (128 bits)
16250 instruct storeV16(vecX src, vmem16 mem)
16251 %{
16252   predicate(n->as_StoreVector()->memory_size() == 16);
16253   match(Set mem (StoreVector mem src));
16254   ins_cost(4 * INSN_COST);
16255   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16256   ins_encode( aarch64_enc_strvQ(src, mem) );
16257   ins_pipe(vstore_reg_mem128);
16258 %}
16259 
16260 instruct replicate8B(vecD dst, iRegIorL2I src)
16261 %{
16262   predicate(n->as_Vector()->length() == 4 ||
16263             n->as_Vector()->length() == 8);
16264   match(Set dst (ReplicateB src));
16265   ins_cost(INSN_COST);
16266   format %{ "dup  $dst, $src\t# vector (8B)" %}
16267   ins_encode %{
16268     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16269   %}
16270   ins_pipe(vdup_reg_reg64);
16271 %}
16272 
16273 instruct replicate16B(vecX dst, iRegIorL2I src)
16274 %{
16275   predicate(n->as_Vector()->length() == 16);
16276   match(Set dst (ReplicateB src));
16277   ins_cost(INSN_COST);
16278   format %{ "dup  $dst, $src\t# vector (16B)" %}
16279   ins_encode %{
16280     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16281   %}
16282   ins_pipe(vdup_reg_reg128);
16283 %}
16284 
16285 instruct replicate8B_imm(vecD dst, immI con)
16286 %{
16287   predicate(n->as_Vector()->length() == 4 ||
16288             n->as_Vector()->length() == 8);
16289   match(Set dst (ReplicateB con));
16290   ins_cost(INSN_COST);
16291   format %{ "movi  $dst, $con\t# vector(8B)" %}
16292   ins_encode %{
16293     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16294   %}
16295   ins_pipe(vmovi_reg_imm64);
16296 %}
16297 
16298 instruct replicate16B_imm(vecX dst, immI con)
16299 %{
16300   predicate(n->as_Vector()->length() == 16);
16301   match(Set dst (ReplicateB con));
16302   ins_cost(INSN_COST);
16303   format %{ "movi  $dst, $con\t# vector(16B)" %}
16304   ins_encode %{
16305     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16306   %}
16307   ins_pipe(vmovi_reg_imm128);
16308 %}
16309 
16310 instruct replicate4S(vecD dst, iRegIorL2I src)
16311 %{
16312   predicate(n->as_Vector()->length() == 2 ||
16313             n->as_Vector()->length() == 4);
16314   match(Set dst (ReplicateS src));
16315   ins_cost(INSN_COST);
16316   format %{ "dup  $dst, $src\t# vector (4S)" %}
16317   ins_encode %{
16318     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16319   %}
16320   ins_pipe(vdup_reg_reg64);
16321 %}
16322 
16323 instruct replicate8S(vecX dst, iRegIorL2I src)
16324 %{
16325   predicate(n->as_Vector()->length() == 8);
16326   match(Set dst (ReplicateS src));
16327   ins_cost(INSN_COST);
16328   format %{ "dup  $dst, $src\t# vector (8S)" %}
16329   ins_encode %{
16330     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16331   %}
16332   ins_pipe(vdup_reg_reg128);
16333 %}
16334 
16335 instruct replicate4S_imm(vecD dst, immI con)
16336 %{
16337   predicate(n->as_Vector()->length() == 2 ||
16338             n->as_Vector()->length() == 4);
16339   match(Set dst (ReplicateS con));
16340   ins_cost(INSN_COST);
16341   format %{ "movi  $dst, $con\t# vector(4H)" %}
16342   ins_encode %{
16343     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16344   %}
16345   ins_pipe(vmovi_reg_imm64);
16346 %}
16347 
16348 instruct replicate8S_imm(vecX dst, immI con)
16349 %{
16350   predicate(n->as_Vector()->length() == 8);
16351   match(Set dst (ReplicateS con));
16352   ins_cost(INSN_COST);
16353   format %{ "movi  $dst, $con\t# vector(8H)" %}
16354   ins_encode %{
16355     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16356   %}
16357   ins_pipe(vmovi_reg_imm128);
16358 %}
16359 
16360 instruct replicate2I(vecD dst, iRegIorL2I src)
16361 %{
16362   predicate(n->as_Vector()->length() == 2);
16363   match(Set dst (ReplicateI src));
16364   ins_cost(INSN_COST);
16365   format %{ "dup  $dst, $src\t# vector (2I)" %}
16366   ins_encode %{
16367     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16368   %}
16369   ins_pipe(vdup_reg_reg64);
16370 %}
16371 
16372 instruct replicate4I(vecX dst, iRegIorL2I src)
16373 %{
16374   predicate(n->as_Vector()->length() == 4);
16375   match(Set dst (ReplicateI src));
16376   ins_cost(INSN_COST);
16377   format %{ "dup  $dst, $src\t# vector (4I)" %}
16378   ins_encode %{
16379     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16380   %}
16381   ins_pipe(vdup_reg_reg128);
16382 %}
16383 
16384 instruct replicate2I_imm(vecD dst, immI con)
16385 %{
16386   predicate(n->as_Vector()->length() == 2);
16387   match(Set dst (ReplicateI con));
16388   ins_cost(INSN_COST);
16389   format %{ "movi  $dst, $con\t# vector(2I)" %}
16390   ins_encode %{
16391     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16392   %}
16393   ins_pipe(vmovi_reg_imm64);
16394 %}
16395 
16396 instruct replicate4I_imm(vecX dst, immI con)
16397 %{
16398   predicate(n->as_Vector()->length() == 4);
16399   match(Set dst (ReplicateI con));
16400   ins_cost(INSN_COST);
16401   format %{ "movi  $dst, $con\t# vector(4I)" %}
16402   ins_encode %{
16403     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16404   %}
16405   ins_pipe(vmovi_reg_imm128);
16406 %}
16407 
16408 instruct replicate2L(vecX dst, iRegL src)
16409 %{
16410   predicate(n->as_Vector()->length() == 2);
16411   match(Set dst (ReplicateL src));
16412   ins_cost(INSN_COST);
16413   format %{ "dup  $dst, $src\t# vector (2L)" %}
16414   ins_encode %{
16415     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16416   %}
16417   ins_pipe(vdup_reg_reg128);
16418 %}
16419 
16420 instruct replicate2L_zero(vecX dst, immI0 zero)
16421 %{
16422   predicate(n->as_Vector()->length() == 2);
16423   match(Set dst (ReplicateI zero));
16424   ins_cost(INSN_COST);
16425   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16426   ins_encode %{
16427     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16428            as_FloatRegister($dst$$reg),
16429            as_FloatRegister($dst$$reg));
16430   %}
16431   ins_pipe(vmovi_reg_imm128);
16432 %}
16433 
16434 instruct replicate2F(vecD dst, vRegF src)
16435 %{
16436   predicate(n->as_Vector()->length() == 2);
16437   match(Set dst (ReplicateF src));
16438   ins_cost(INSN_COST);
16439   format %{ "dup  $dst, $src\t# vector (2F)" %}
16440   ins_encode %{
16441     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16442            as_FloatRegister($src$$reg));
16443   %}
16444   ins_pipe(vdup_reg_freg64);
16445 %}
16446 
16447 instruct replicate4F(vecX dst, vRegF src)
16448 %{
16449   predicate(n->as_Vector()->length() == 4);
16450   match(Set dst (ReplicateF src));
16451   ins_cost(INSN_COST);
16452   format %{ "dup  $dst, $src\t# vector (4F)" %}
16453   ins_encode %{
16454     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16455            as_FloatRegister($src$$reg));
16456   %}
16457   ins_pipe(vdup_reg_freg128);
16458 %}
16459 
16460 instruct replicate2D(vecX dst, vRegD src)
16461 %{
16462   predicate(n->as_Vector()->length() == 2);
16463   match(Set dst (ReplicateD src));
16464   ins_cost(INSN_COST);
16465   format %{ "dup  $dst, $src\t# vector (2D)" %}
16466   ins_encode %{
16467     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16468            as_FloatRegister($src$$reg));
16469   %}
16470   ins_pipe(vdup_reg_dreg128);
16471 %}
16472 
16473 // ====================REDUCTION ARITHMETIC====================================
16474 
16475 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16476 %{
16477   match(Set dst (AddReductionVI src1 src2));
16478   ins_cost(INSN_COST);
16479   effect(TEMP tmp, TEMP tmp2);
16480   format %{ "umov  $tmp, $src2, S, 0\n\t"
16481             "umov  $tmp2, $src2, S, 1\n\t"
16482             "addw  $dst, $src1, $tmp\n\t"
16483             "addw  $dst, $dst, $tmp2\t add reduction2i"
16484   %}
16485   ins_encode %{
16486     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16487     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16488     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16489     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16490   %}
16491   ins_pipe(pipe_class_default);
16492 %}
16493 
16494 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16495 %{
16496   match(Set dst (AddReductionVI src1 src2));
16497   ins_cost(INSN_COST);
16498   effect(TEMP tmp, TEMP tmp2);
16499   format %{ "addv  $tmp, T4S, $src2\n\t"
16500             "umov  $tmp2, $tmp, S, 0\n\t"
16501             "addw  $dst, $tmp2, $src1\t add reduction4i"
16502   %}
16503   ins_encode %{
16504     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16505             as_FloatRegister($src2$$reg));
16506     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16507     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16508   %}
16509   ins_pipe(pipe_class_default);
16510 %}
16511 
16512 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16513 %{
16514   match(Set dst (MulReductionVI src1 src2));
16515   ins_cost(INSN_COST);
16516   effect(TEMP tmp, TEMP dst);
16517   format %{ "umov  $tmp, $src2, S, 0\n\t"
16518             "mul   $dst, $tmp, $src1\n\t"
16519             "umov  $tmp, $src2, S, 1\n\t"
16520             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16521   %}
16522   ins_encode %{
16523     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16524     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16525     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16526     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16527   %}
16528   ins_pipe(pipe_class_default);
16529 %}
16530 
16531 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16532 %{
16533   match(Set dst (MulReductionVI src1 src2));
16534   ins_cost(INSN_COST);
16535   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16536   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16537             "mul   $tmp, $tmp, $src2\n\t"
16538             "umov  $tmp2, $tmp, S, 0\n\t"
16539             "mul   $dst, $tmp2, $src1\n\t"
16540             "umov  $tmp2, $tmp, S, 1\n\t"
16541             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16542   %}
16543   ins_encode %{
16544     __ ins(as_FloatRegister($tmp$$reg), __ D,
16545            as_FloatRegister($src2$$reg), 0, 1);
16546     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16547            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16548     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16549     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16550     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16551     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16552   %}
16553   ins_pipe(pipe_class_default);
16554 %}
16555 
16556 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16557 %{
16558   match(Set dst (AddReductionVF src1 src2));
16559   ins_cost(INSN_COST);
16560   effect(TEMP tmp, TEMP dst);
16561   format %{ "fadds $dst, $src1, $src2\n\t"
16562             "ins   $tmp, S, $src2, 0, 1\n\t"
16563             "fadds $dst, $dst, $tmp\t add reduction2f"
16564   %}
16565   ins_encode %{
16566     __ fadds(as_FloatRegister($dst$$reg),
16567              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16568     __ ins(as_FloatRegister($tmp$$reg), __ S,
16569            as_FloatRegister($src2$$reg), 0, 1);
16570     __ fadds(as_FloatRegister($dst$$reg),
16571              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16572   %}
16573   ins_pipe(pipe_class_default);
16574 %}
16575 
16576 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16577 %{
16578   match(Set dst (AddReductionVF src1 src2));
16579   ins_cost(INSN_COST);
16580   effect(TEMP tmp, TEMP dst);
16581   format %{ "fadds $dst, $src1, $src2\n\t"
16582             "ins   $tmp, S, $src2, 0, 1\n\t"
16583             "fadds $dst, $dst, $tmp\n\t"
16584             "ins   $tmp, S, $src2, 0, 2\n\t"
16585             "fadds $dst, $dst, $tmp\n\t"
16586             "ins   $tmp, S, $src2, 0, 3\n\t"
16587             "fadds $dst, $dst, $tmp\t add reduction4f"
16588   %}
16589   ins_encode %{
16590     __ fadds(as_FloatRegister($dst$$reg),
16591              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16592     __ ins(as_FloatRegister($tmp$$reg), __ S,
16593            as_FloatRegister($src2$$reg), 0, 1);
16594     __ fadds(as_FloatRegister($dst$$reg),
16595              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16596     __ ins(as_FloatRegister($tmp$$reg), __ S,
16597            as_FloatRegister($src2$$reg), 0, 2);
16598     __ fadds(as_FloatRegister($dst$$reg),
16599              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16600     __ ins(as_FloatRegister($tmp$$reg), __ S,
16601            as_FloatRegister($src2$$reg), 0, 3);
16602     __ fadds(as_FloatRegister($dst$$reg),
16603              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16604   %}
16605   ins_pipe(pipe_class_default);
16606 %}
16607 
16608 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16609 %{
16610   match(Set dst (MulReductionVF src1 src2));
16611   ins_cost(INSN_COST);
16612   effect(TEMP tmp, TEMP dst);
16613   format %{ "fmuls $dst, $src1, $src2\n\t"
16614             "ins   $tmp, S, $src2, 0, 1\n\t"
16615             "fmuls $dst, $dst, $tmp\t add reduction4f"
16616   %}
16617   ins_encode %{
16618     __ fmuls(as_FloatRegister($dst$$reg),
16619              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16620     __ ins(as_FloatRegister($tmp$$reg), __ S,
16621            as_FloatRegister($src2$$reg), 0, 1);
16622     __ fmuls(as_FloatRegister($dst$$reg),
16623              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16624   %}
16625   ins_pipe(pipe_class_default);
16626 %}
16627 
16628 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16629 %{
16630   match(Set dst (MulReductionVF src1 src2));
16631   ins_cost(INSN_COST);
16632   effect(TEMP tmp, TEMP dst);
16633   format %{ "fmuls $dst, $src1, $src2\n\t"
16634             "ins   $tmp, S, $src2, 0, 1\n\t"
16635             "fmuls $dst, $dst, $tmp\n\t"
16636             "ins   $tmp, S, $src2, 0, 2\n\t"
16637             "fmuls $dst, $dst, $tmp\n\t"
16638             "ins   $tmp, S, $src2, 0, 3\n\t"
16639             "fmuls $dst, $dst, $tmp\t add reduction4f"
16640   %}
16641   ins_encode %{
16642     __ fmuls(as_FloatRegister($dst$$reg),
16643              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16644     __ ins(as_FloatRegister($tmp$$reg), __ S,
16645            as_FloatRegister($src2$$reg), 0, 1);
16646     __ fmuls(as_FloatRegister($dst$$reg),
16647              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16648     __ ins(as_FloatRegister($tmp$$reg), __ S,
16649            as_FloatRegister($src2$$reg), 0, 2);
16650     __ fmuls(as_FloatRegister($dst$$reg),
16651              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16652     __ ins(as_FloatRegister($tmp$$reg), __ S,
16653            as_FloatRegister($src2$$reg), 0, 3);
16654     __ fmuls(as_FloatRegister($dst$$reg),
16655              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16656   %}
16657   ins_pipe(pipe_class_default);
16658 %}
16659 
16660 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16661 %{
16662   match(Set dst (AddReductionVD src1 src2));
16663   ins_cost(INSN_COST);
16664   effect(TEMP tmp, TEMP dst);
16665   format %{ "faddd $dst, $src1, $src2\n\t"
16666             "ins   $tmp, D, $src2, 0, 1\n\t"
16667             "faddd $dst, $dst, $tmp\t add reduction2d"
16668   %}
16669   ins_encode %{
16670     __ faddd(as_FloatRegister($dst$$reg),
16671              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16672     __ ins(as_FloatRegister($tmp$$reg), __ D,
16673            as_FloatRegister($src2$$reg), 0, 1);
16674     __ faddd(as_FloatRegister($dst$$reg),
16675              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16676   %}
16677   ins_pipe(pipe_class_default);
16678 %}
16679 
16680 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16681 %{
16682   match(Set dst (MulReductionVD src1 src2));
16683   ins_cost(INSN_COST);
16684   effect(TEMP tmp, TEMP dst);
16685   format %{ "fmuld $dst, $src1, $src2\n\t"
16686             "ins   $tmp, D, $src2, 0, 1\n\t"
16687             "fmuld $dst, $dst, $tmp\t add reduction2d"
16688   %}
16689   ins_encode %{
16690     __ fmuld(as_FloatRegister($dst$$reg),
16691              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16692     __ ins(as_FloatRegister($tmp$$reg), __ D,
16693            as_FloatRegister($src2$$reg), 0, 1);
16694     __ fmuld(as_FloatRegister($dst$$reg),
16695              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16696   %}
16697   ins_pipe(pipe_class_default);
16698 %}
16699 
16700 // ====================VECTOR ARITHMETIC=======================================
16701 
16702 // --------------------------------- ADD --------------------------------------
16703 
16704 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16705 %{
16706   predicate(n->as_Vector()->length() == 4 ||
16707             n->as_Vector()->length() == 8);
16708   match(Set dst (AddVB src1 src2));
16709   ins_cost(INSN_COST);
16710   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16711   ins_encode %{
16712     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16713             as_FloatRegister($src1$$reg),
16714             as_FloatRegister($src2$$reg));
16715   %}
16716   ins_pipe(vdop64);
16717 %}
16718 
16719 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16720 %{
16721   predicate(n->as_Vector()->length() == 16);
16722   match(Set dst (AddVB src1 src2));
16723   ins_cost(INSN_COST);
16724   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16725   ins_encode %{
16726     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16727             as_FloatRegister($src1$$reg),
16728             as_FloatRegister($src2$$reg));
16729   %}
16730   ins_pipe(vdop128);
16731 %}
16732 
16733 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16734 %{
16735   predicate(n->as_Vector()->length() == 2 ||
16736             n->as_Vector()->length() == 4);
16737   match(Set dst (AddVS src1 src2));
16738   ins_cost(INSN_COST);
16739   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16740   ins_encode %{
16741     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16742             as_FloatRegister($src1$$reg),
16743             as_FloatRegister($src2$$reg));
16744   %}
16745   ins_pipe(vdop64);
16746 %}
16747 
16748 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16749 %{
16750   predicate(n->as_Vector()->length() == 8);
16751   match(Set dst (AddVS src1 src2));
16752   ins_cost(INSN_COST);
16753   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16754   ins_encode %{
16755     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16756             as_FloatRegister($src1$$reg),
16757             as_FloatRegister($src2$$reg));
16758   %}
16759   ins_pipe(vdop128);
16760 %}
16761 
16762 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16763 %{
16764   predicate(n->as_Vector()->length() == 2);
16765   match(Set dst (AddVI src1 src2));
16766   ins_cost(INSN_COST);
16767   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16768   ins_encode %{
16769     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16770             as_FloatRegister($src1$$reg),
16771             as_FloatRegister($src2$$reg));
16772   %}
16773   ins_pipe(vdop64);
16774 %}
16775 
16776 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16777 %{
16778   predicate(n->as_Vector()->length() == 4);
16779   match(Set dst (AddVI src1 src2));
16780   ins_cost(INSN_COST);
16781   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16782   ins_encode %{
16783     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16784             as_FloatRegister($src1$$reg),
16785             as_FloatRegister($src2$$reg));
16786   %}
16787   ins_pipe(vdop128);
16788 %}
16789 
16790 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16791 %{
16792   predicate(n->as_Vector()->length() == 2);
16793   match(Set dst (AddVL src1 src2));
16794   ins_cost(INSN_COST);
16795   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16796   ins_encode %{
16797     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16798             as_FloatRegister($src1$$reg),
16799             as_FloatRegister($src2$$reg));
16800   %}
16801   ins_pipe(vdop128);
16802 %}
16803 
16804 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16805 %{
16806   predicate(n->as_Vector()->length() == 2);
16807   match(Set dst (AddVF src1 src2));
16808   ins_cost(INSN_COST);
16809   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16810   ins_encode %{
16811     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16812             as_FloatRegister($src1$$reg),
16813             as_FloatRegister($src2$$reg));
16814   %}
16815   ins_pipe(vdop_fp64);
16816 %}
16817 
16818 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16819 %{
16820   predicate(n->as_Vector()->length() == 4);
16821   match(Set dst (AddVF src1 src2));
16822   ins_cost(INSN_COST);
16823   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16824   ins_encode %{
16825     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16826             as_FloatRegister($src1$$reg),
16827             as_FloatRegister($src2$$reg));
16828   %}
16829   ins_pipe(vdop_fp128);
16830 %}
16831 
16832 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16833 %{
16834   match(Set dst (AddVD src1 src2));
16835   ins_cost(INSN_COST);
16836   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16837   ins_encode %{
16838     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16839             as_FloatRegister($src1$$reg),
16840             as_FloatRegister($src2$$reg));
16841   %}
16842   ins_pipe(vdop_fp128);
16843 %}
16844 
16845 // --------------------------------- SUB --------------------------------------
16846 
16847 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16848 %{
16849   predicate(n->as_Vector()->length() == 4 ||
16850             n->as_Vector()->length() == 8);
16851   match(Set dst (SubVB src1 src2));
16852   ins_cost(INSN_COST);
16853   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16854   ins_encode %{
16855     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16856             as_FloatRegister($src1$$reg),
16857             as_FloatRegister($src2$$reg));
16858   %}
16859   ins_pipe(vdop64);
16860 %}
16861 
16862 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16863 %{
16864   predicate(n->as_Vector()->length() == 16);
16865   match(Set dst (SubVB src1 src2));
16866   ins_cost(INSN_COST);
16867   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16868   ins_encode %{
16869     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16870             as_FloatRegister($src1$$reg),
16871             as_FloatRegister($src2$$reg));
16872   %}
16873   ins_pipe(vdop128);
16874 %}
16875 
16876 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16877 %{
16878   predicate(n->as_Vector()->length() == 2 ||
16879             n->as_Vector()->length() == 4);
16880   match(Set dst (SubVS src1 src2));
16881   ins_cost(INSN_COST);
16882   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16883   ins_encode %{
16884     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16885             as_FloatRegister($src1$$reg),
16886             as_FloatRegister($src2$$reg));
16887   %}
16888   ins_pipe(vdop64);
16889 %}
16890 
16891 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16892 %{
16893   predicate(n->as_Vector()->length() == 8);
16894   match(Set dst (SubVS src1 src2));
16895   ins_cost(INSN_COST);
16896   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16897   ins_encode %{
16898     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16899             as_FloatRegister($src1$$reg),
16900             as_FloatRegister($src2$$reg));
16901   %}
16902   ins_pipe(vdop128);
16903 %}
16904 
16905 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16906 %{
16907   predicate(n->as_Vector()->length() == 2);
16908   match(Set dst (SubVI src1 src2));
16909   ins_cost(INSN_COST);
16910   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16911   ins_encode %{
16912     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16913             as_FloatRegister($src1$$reg),
16914             as_FloatRegister($src2$$reg));
16915   %}
16916   ins_pipe(vdop64);
16917 %}
16918 
16919 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16920 %{
16921   predicate(n->as_Vector()->length() == 4);
16922   match(Set dst (SubVI src1 src2));
16923   ins_cost(INSN_COST);
16924   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16925   ins_encode %{
16926     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16927             as_FloatRegister($src1$$reg),
16928             as_FloatRegister($src2$$reg));
16929   %}
16930   ins_pipe(vdop128);
16931 %}
16932 
16933 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16934 %{
16935   predicate(n->as_Vector()->length() == 2);
16936   match(Set dst (SubVL src1 src2));
16937   ins_cost(INSN_COST);
16938   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16939   ins_encode %{
16940     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16941             as_FloatRegister($src1$$reg),
16942             as_FloatRegister($src2$$reg));
16943   %}
16944   ins_pipe(vdop128);
16945 %}
16946 
16947 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16948 %{
16949   predicate(n->as_Vector()->length() == 2);
16950   match(Set dst (SubVF src1 src2));
16951   ins_cost(INSN_COST);
16952   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16953   ins_encode %{
16954     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16955             as_FloatRegister($src1$$reg),
16956             as_FloatRegister($src2$$reg));
16957   %}
16958   ins_pipe(vdop_fp64);
16959 %}
16960 
16961 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16962 %{
16963   predicate(n->as_Vector()->length() == 4);
16964   match(Set dst (SubVF src1 src2));
16965   ins_cost(INSN_COST);
16966   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16967   ins_encode %{
16968     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16969             as_FloatRegister($src1$$reg),
16970             as_FloatRegister($src2$$reg));
16971   %}
16972   ins_pipe(vdop_fp128);
16973 %}
16974 
16975 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16976 %{
16977   predicate(n->as_Vector()->length() == 2);
16978   match(Set dst (SubVD src1 src2));
16979   ins_cost(INSN_COST);
16980   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16981   ins_encode %{
16982     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16983             as_FloatRegister($src1$$reg),
16984             as_FloatRegister($src2$$reg));
16985   %}
16986   ins_pipe(vdop_fp128);
16987 %}
16988 
16989 // --------------------------------- MUL --------------------------------------
16990 
16991 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16992 %{
16993   predicate(n->as_Vector()->length() == 2 ||
16994             n->as_Vector()->length() == 4);
16995   match(Set dst (MulVS src1 src2));
16996   ins_cost(INSN_COST);
16997   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16998   ins_encode %{
16999     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17000             as_FloatRegister($src1$$reg),
17001             as_FloatRegister($src2$$reg));
17002   %}
17003   ins_pipe(vmul64);
17004 %}
17005 
17006 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17007 %{
17008   predicate(n->as_Vector()->length() == 8);
17009   match(Set dst (MulVS src1 src2));
17010   ins_cost(INSN_COST);
17011   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17012   ins_encode %{
17013     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17014             as_FloatRegister($src1$$reg),
17015             as_FloatRegister($src2$$reg));
17016   %}
17017   ins_pipe(vmul128);
17018 %}
17019 
17020 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17021 %{
17022   predicate(n->as_Vector()->length() == 2);
17023   match(Set dst (MulVI src1 src2));
17024   ins_cost(INSN_COST);
17025   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17026   ins_encode %{
17027     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17028             as_FloatRegister($src1$$reg),
17029             as_FloatRegister($src2$$reg));
17030   %}
17031   ins_pipe(vmul64);
17032 %}
17033 
17034 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17035 %{
17036   predicate(n->as_Vector()->length() == 4);
17037   match(Set dst (MulVI src1 src2));
17038   ins_cost(INSN_COST);
17039   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17040   ins_encode %{
17041     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17042             as_FloatRegister($src1$$reg),
17043             as_FloatRegister($src2$$reg));
17044   %}
17045   ins_pipe(vmul128);
17046 %}
17047 
17048 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17049 %{
17050   predicate(n->as_Vector()->length() == 2);
17051   match(Set dst (MulVF src1 src2));
17052   ins_cost(INSN_COST);
17053   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17054   ins_encode %{
17055     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17056             as_FloatRegister($src1$$reg),
17057             as_FloatRegister($src2$$reg));
17058   %}
17059   ins_pipe(vmuldiv_fp64);
17060 %}
17061 
17062 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17063 %{
17064   predicate(n->as_Vector()->length() == 4);
17065   match(Set dst (MulVF src1 src2));
17066   ins_cost(INSN_COST);
17067   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17068   ins_encode %{
17069     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17070             as_FloatRegister($src1$$reg),
17071             as_FloatRegister($src2$$reg));
17072   %}
17073   ins_pipe(vmuldiv_fp128);
17074 %}
17075 
17076 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17077 %{
17078   predicate(n->as_Vector()->length() == 2);
17079   match(Set dst (MulVD src1 src2));
17080   ins_cost(INSN_COST);
17081   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17082   ins_encode %{
17083     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17084             as_FloatRegister($src1$$reg),
17085             as_FloatRegister($src2$$reg));
17086   %}
17087   ins_pipe(vmuldiv_fp128);
17088 %}
17089 
17090 // --------------------------------- MLA --------------------------------------
17091 
17092 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17093 %{
17094   predicate(n->as_Vector()->length() == 2 ||
17095             n->as_Vector()->length() == 4);
17096   match(Set dst (AddVS dst (MulVS src1 src2)));
17097   ins_cost(INSN_COST);
17098   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17099   ins_encode %{
17100     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17101             as_FloatRegister($src1$$reg),
17102             as_FloatRegister($src2$$reg));
17103   %}
17104   ins_pipe(vmla64);
17105 %}
17106 
17107 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17108 %{
17109   predicate(n->as_Vector()->length() == 8);
17110   match(Set dst (AddVS dst (MulVS src1 src2)));
17111   ins_cost(INSN_COST);
17112   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17113   ins_encode %{
17114     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17115             as_FloatRegister($src1$$reg),
17116             as_FloatRegister($src2$$reg));
17117   %}
17118   ins_pipe(vmla128);
17119 %}
17120 
17121 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17122 %{
17123   predicate(n->as_Vector()->length() == 2);
17124   match(Set dst (AddVI dst (MulVI src1 src2)));
17125   ins_cost(INSN_COST);
17126   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17127   ins_encode %{
17128     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17129             as_FloatRegister($src1$$reg),
17130             as_FloatRegister($src2$$reg));
17131   %}
17132   ins_pipe(vmla64);
17133 %}
17134 
17135 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17136 %{
17137   predicate(n->as_Vector()->length() == 4);
17138   match(Set dst (AddVI dst (MulVI src1 src2)));
17139   ins_cost(INSN_COST);
17140   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17141   ins_encode %{
17142     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17143             as_FloatRegister($src1$$reg),
17144             as_FloatRegister($src2$$reg));
17145   %}
17146   ins_pipe(vmla128);
17147 %}
17148 
17149 // dst + src1 * src2
17150 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17151   predicate(UseFMA && n->as_Vector()->length() == 2);
17152   match(Set dst (FmaVF  dst (Binary src1 src2)));
17153   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17154   ins_cost(INSN_COST);
17155   ins_encode %{
17156     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17157             as_FloatRegister($src1$$reg),
17158             as_FloatRegister($src2$$reg));
17159   %}
17160   ins_pipe(vmuldiv_fp64);
17161 %}
17162 
17163 // dst + src1 * src2
17164 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17165   predicate(UseFMA && n->as_Vector()->length() == 4);
17166   match(Set dst (FmaVF  dst (Binary src1 src2)));
17167   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17168   ins_cost(INSN_COST);
17169   ins_encode %{
17170     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17171             as_FloatRegister($src1$$reg),
17172             as_FloatRegister($src2$$reg));
17173   %}
17174   ins_pipe(vmuldiv_fp128);
17175 %}
17176 
17177 // dst + src1 * src2
17178 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17179   predicate(UseFMA && n->as_Vector()->length() == 2);
17180   match(Set dst (FmaVD  dst (Binary src1 src2)));
17181   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17182   ins_cost(INSN_COST);
17183   ins_encode %{
17184     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17185             as_FloatRegister($src1$$reg),
17186             as_FloatRegister($src2$$reg));
17187   %}
17188   ins_pipe(vmuldiv_fp128);
17189 %}
17190 
17191 // --------------------------------- MLS --------------------------------------
17192 
17193 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17194 %{
17195   predicate(n->as_Vector()->length() == 2 ||
17196             n->as_Vector()->length() == 4);
17197   match(Set dst (SubVS dst (MulVS src1 src2)));
17198   ins_cost(INSN_COST);
17199   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17200   ins_encode %{
17201     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17202             as_FloatRegister($src1$$reg),
17203             as_FloatRegister($src2$$reg));
17204   %}
17205   ins_pipe(vmla64);
17206 %}
17207 
17208 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17209 %{
17210   predicate(n->as_Vector()->length() == 8);
17211   match(Set dst (SubVS dst (MulVS src1 src2)));
17212   ins_cost(INSN_COST);
17213   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17214   ins_encode %{
17215     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17216             as_FloatRegister($src1$$reg),
17217             as_FloatRegister($src2$$reg));
17218   %}
17219   ins_pipe(vmla128);
17220 %}
17221 
17222 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17223 %{
17224   predicate(n->as_Vector()->length() == 2);
17225   match(Set dst (SubVI dst (MulVI src1 src2)));
17226   ins_cost(INSN_COST);
17227   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17228   ins_encode %{
17229     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17230             as_FloatRegister($src1$$reg),
17231             as_FloatRegister($src2$$reg));
17232   %}
17233   ins_pipe(vmla64);
17234 %}
17235 
17236 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17237 %{
17238   predicate(n->as_Vector()->length() == 4);
17239   match(Set dst (SubVI dst (MulVI src1 src2)));
17240   ins_cost(INSN_COST);
17241   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17242   ins_encode %{
17243     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17244             as_FloatRegister($src1$$reg),
17245             as_FloatRegister($src2$$reg));
17246   %}
17247   ins_pipe(vmla128);
17248 %}
17249 
17250 // dst - src1 * src2
17251 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17252   predicate(UseFMA && n->as_Vector()->length() == 2);
17253   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17254   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17255   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17256   ins_cost(INSN_COST);
17257   ins_encode %{
17258     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17259             as_FloatRegister($src1$$reg),
17260             as_FloatRegister($src2$$reg));
17261   %}
17262   ins_pipe(vmuldiv_fp64);
17263 %}
17264 
17265 // dst - src1 * src2
17266 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17267   predicate(UseFMA && n->as_Vector()->length() == 4);
17268   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17269   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17270   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17271   ins_cost(INSN_COST);
17272   ins_encode %{
17273     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17274             as_FloatRegister($src1$$reg),
17275             as_FloatRegister($src2$$reg));
17276   %}
17277   ins_pipe(vmuldiv_fp128);
17278 %}
17279 
17280 // dst - src1 * src2
17281 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17282   predicate(UseFMA && n->as_Vector()->length() == 2);
17283   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17284   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17285   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17286   ins_cost(INSN_COST);
17287   ins_encode %{
17288     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17289             as_FloatRegister($src1$$reg),
17290             as_FloatRegister($src2$$reg));
17291   %}
17292   ins_pipe(vmuldiv_fp128);
17293 %}
17294 
17295 // --------------------------------- DIV --------------------------------------
17296 
17297 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17298 %{
17299   predicate(n->as_Vector()->length() == 2);
17300   match(Set dst (DivVF src1 src2));
17301   ins_cost(INSN_COST);
17302   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17303   ins_encode %{
17304     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17305             as_FloatRegister($src1$$reg),
17306             as_FloatRegister($src2$$reg));
17307   %}
17308   ins_pipe(vmuldiv_fp64);
17309 %}
17310 
17311 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17312 %{
17313   predicate(n->as_Vector()->length() == 4);
17314   match(Set dst (DivVF src1 src2));
17315   ins_cost(INSN_COST);
17316   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17317   ins_encode %{
17318     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17319             as_FloatRegister($src1$$reg),
17320             as_FloatRegister($src2$$reg));
17321   %}
17322   ins_pipe(vmuldiv_fp128);
17323 %}
17324 
17325 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17326 %{
17327   predicate(n->as_Vector()->length() == 2);
17328   match(Set dst (DivVD src1 src2));
17329   ins_cost(INSN_COST);
17330   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17331   ins_encode %{
17332     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17333             as_FloatRegister($src1$$reg),
17334             as_FloatRegister($src2$$reg));
17335   %}
17336   ins_pipe(vmuldiv_fp128);
17337 %}
17338 
17339 // --------------------------------- SQRT -------------------------------------
17340 
17341 instruct vsqrt2D(vecX dst, vecX src)
17342 %{
17343   predicate(n->as_Vector()->length() == 2);
17344   match(Set dst (SqrtVD src));
17345   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17346   ins_encode %{
17347     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17348              as_FloatRegister($src$$reg));
17349   %}
17350   ins_pipe(vsqrt_fp128);
17351 %}
17352 
17353 // --------------------------------- ABS --------------------------------------
17354 
17355 instruct vabs2F(vecD dst, vecD src)
17356 %{
17357   predicate(n->as_Vector()->length() == 2);
17358   match(Set dst (AbsVF src));
17359   ins_cost(INSN_COST * 3);
17360   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17361   ins_encode %{
17362     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17363             as_FloatRegister($src$$reg));
17364   %}
17365   ins_pipe(vunop_fp64);
17366 %}
17367 
17368 instruct vabs4F(vecX dst, vecX src)
17369 %{
17370   predicate(n->as_Vector()->length() == 4);
17371   match(Set dst (AbsVF src));
17372   ins_cost(INSN_COST * 3);
17373   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17374   ins_encode %{
17375     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17376             as_FloatRegister($src$$reg));
17377   %}
17378   ins_pipe(vunop_fp128);
17379 %}
17380 
17381 instruct vabs2D(vecX dst, vecX src)
17382 %{
17383   predicate(n->as_Vector()->length() == 2);
17384   match(Set dst (AbsVD src));
17385   ins_cost(INSN_COST * 3);
17386   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17387   ins_encode %{
17388     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17389             as_FloatRegister($src$$reg));
17390   %}
17391   ins_pipe(vunop_fp128);
17392 %}
17393 
17394 // --------------------------------- NEG --------------------------------------
17395 
17396 instruct vneg2F(vecD dst, vecD src)
17397 %{
17398   predicate(n->as_Vector()->length() == 2);
17399   match(Set dst (NegVF src));
17400   ins_cost(INSN_COST * 3);
17401   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17402   ins_encode %{
17403     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17404             as_FloatRegister($src$$reg));
17405   %}
17406   ins_pipe(vunop_fp64);
17407 %}
17408 
17409 instruct vneg4F(vecX dst, vecX src)
17410 %{
17411   predicate(n->as_Vector()->length() == 4);
17412   match(Set dst (NegVF src));
17413   ins_cost(INSN_COST * 3);
17414   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17415   ins_encode %{
17416     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17417             as_FloatRegister($src$$reg));
17418   %}
17419   ins_pipe(vunop_fp128);
17420 %}
17421 
17422 instruct vneg2D(vecX dst, vecX src)
17423 %{
17424   predicate(n->as_Vector()->length() == 2);
17425   match(Set dst (NegVD src));
17426   ins_cost(INSN_COST * 3);
17427   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17428   ins_encode %{
17429     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17430             as_FloatRegister($src$$reg));
17431   %}
17432   ins_pipe(vunop_fp128);
17433 %}
17434 
17435 // --------------------------------- AND --------------------------------------
17436 
17437 instruct vand8B(vecD dst, vecD src1, vecD src2)
17438 %{
17439   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17440             n->as_Vector()->length_in_bytes() == 8);
17441   match(Set dst (AndV src1 src2));
17442   ins_cost(INSN_COST);
17443   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17444   ins_encode %{
17445     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17446             as_FloatRegister($src1$$reg),
17447             as_FloatRegister($src2$$reg));
17448   %}
17449   ins_pipe(vlogical64);
17450 %}
17451 
17452 instruct vand16B(vecX dst, vecX src1, vecX src2)
17453 %{
17454   predicate(n->as_Vector()->length_in_bytes() == 16);
17455   match(Set dst (AndV src1 src2));
17456   ins_cost(INSN_COST);
17457   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17458   ins_encode %{
17459     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17460             as_FloatRegister($src1$$reg),
17461             as_FloatRegister($src2$$reg));
17462   %}
17463   ins_pipe(vlogical128);
17464 %}
17465 
17466 // --------------------------------- OR ---------------------------------------
17467 
17468 instruct vor8B(vecD dst, vecD src1, vecD src2)
17469 %{
17470   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17471             n->as_Vector()->length_in_bytes() == 8);
17472   match(Set dst (OrV src1 src2));
17473   ins_cost(INSN_COST);
17474   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17475   ins_encode %{
17476     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17477             as_FloatRegister($src1$$reg),
17478             as_FloatRegister($src2$$reg));
17479   %}
17480   ins_pipe(vlogical64);
17481 %}
17482 
17483 instruct vor16B(vecX dst, vecX src1, vecX src2)
17484 %{
17485   predicate(n->as_Vector()->length_in_bytes() == 16);
17486   match(Set dst (OrV src1 src2));
17487   ins_cost(INSN_COST);
17488   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17489   ins_encode %{
17490     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17491             as_FloatRegister($src1$$reg),
17492             as_FloatRegister($src2$$reg));
17493   %}
17494   ins_pipe(vlogical128);
17495 %}
17496 
17497 // --------------------------------- XOR --------------------------------------
17498 
17499 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17500 %{
17501   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17502             n->as_Vector()->length_in_bytes() == 8);
17503   match(Set dst (XorV src1 src2));
17504   ins_cost(INSN_COST);
17505   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17506   ins_encode %{
17507     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17508             as_FloatRegister($src1$$reg),
17509             as_FloatRegister($src2$$reg));
17510   %}
17511   ins_pipe(vlogical64);
17512 %}
17513 
17514 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17515 %{
17516   predicate(n->as_Vector()->length_in_bytes() == 16);
17517   match(Set dst (XorV src1 src2));
17518   ins_cost(INSN_COST);
17519   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17520   ins_encode %{
17521     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17522             as_FloatRegister($src1$$reg),
17523             as_FloatRegister($src2$$reg));
17524   %}
17525   ins_pipe(vlogical128);
17526 %}
17527 
17528 // ------------------------------ Shift ---------------------------------------
17529 
17530 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17531   match(Set dst (LShiftCntV cnt));
17532   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17533   ins_encode %{
17534     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17535   %}
17536   ins_pipe(vdup_reg_reg128);
17537 %}
17538 
17539 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17540 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17541   match(Set dst (RShiftCntV cnt));
17542   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17543   ins_encode %{
17544     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17545     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17546   %}
17547   ins_pipe(vdup_reg_reg128);
17548 %}
17549 
17550 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17551   predicate(n->as_Vector()->length() == 4 ||
17552             n->as_Vector()->length() == 8);
17553   match(Set dst (LShiftVB src shift));
17554   match(Set dst (RShiftVB src shift));
17555   ins_cost(INSN_COST);
17556   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17557   ins_encode %{
17558     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17559             as_FloatRegister($src$$reg),
17560             as_FloatRegister($shift$$reg));
17561   %}
17562   ins_pipe(vshift64);
17563 %}
17564 
17565 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17566   predicate(n->as_Vector()->length() == 16);
17567   match(Set dst (LShiftVB src shift));
17568   match(Set dst (RShiftVB src shift));
17569   ins_cost(INSN_COST);
17570   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17571   ins_encode %{
17572     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17573             as_FloatRegister($src$$reg),
17574             as_FloatRegister($shift$$reg));
17575   %}
17576   ins_pipe(vshift128);
17577 %}
17578 
17579 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17580   predicate(n->as_Vector()->length() == 4 ||
17581             n->as_Vector()->length() == 8);
17582   match(Set dst (URShiftVB src shift));
17583   ins_cost(INSN_COST);
17584   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17585   ins_encode %{
17586     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17587             as_FloatRegister($src$$reg),
17588             as_FloatRegister($shift$$reg));
17589   %}
17590   ins_pipe(vshift64);
17591 %}
17592 
17593 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17594   predicate(n->as_Vector()->length() == 16);
17595   match(Set dst (URShiftVB src shift));
17596   ins_cost(INSN_COST);
17597   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17598   ins_encode %{
17599     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17600             as_FloatRegister($src$$reg),
17601             as_FloatRegister($shift$$reg));
17602   %}
17603   ins_pipe(vshift128);
17604 %}
17605 
17606 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17607   predicate(n->as_Vector()->length() == 4 ||
17608             n->as_Vector()->length() == 8);
17609   match(Set dst (LShiftVB src shift));
17610   ins_cost(INSN_COST);
17611   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17612   ins_encode %{
17613     int sh = (int)$shift$$constant;
17614     if (sh >= 8) {
17615       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17616              as_FloatRegister($src$$reg),
17617              as_FloatRegister($src$$reg));
17618     } else {
17619       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17620              as_FloatRegister($src$$reg), sh);
17621     }
17622   %}
17623   ins_pipe(vshift64_imm);
17624 %}
17625 
17626 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17627   predicate(n->as_Vector()->length() == 16);
17628   match(Set dst (LShiftVB src shift));
17629   ins_cost(INSN_COST);
17630   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17631   ins_encode %{
17632     int sh = (int)$shift$$constant;
17633     if (sh >= 8) {
17634       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17635              as_FloatRegister($src$$reg),
17636              as_FloatRegister($src$$reg));
17637     } else {
17638       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17639              as_FloatRegister($src$$reg), sh);
17640     }
17641   %}
17642   ins_pipe(vshift128_imm);
17643 %}
17644 
17645 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17646   predicate(n->as_Vector()->length() == 4 ||
17647             n->as_Vector()->length() == 8);
17648   match(Set dst (RShiftVB src shift));
17649   ins_cost(INSN_COST);
17650   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17651   ins_encode %{
17652     int sh = (int)$shift$$constant;
17653     if (sh >= 8) sh = 7;
17654     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17655            as_FloatRegister($src$$reg), sh);
17656   %}
17657   ins_pipe(vshift64_imm);
17658 %}
17659 
17660 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17661   predicate(n->as_Vector()->length() == 16);
17662   match(Set dst (RShiftVB src shift));
17663   ins_cost(INSN_COST);
17664   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17665   ins_encode %{
17666     int sh = (int)$shift$$constant;
17667     if (sh >= 8) sh = 7;
17668     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17669            as_FloatRegister($src$$reg), sh);
17670   %}
17671   ins_pipe(vshift128_imm);
17672 %}
17673 
17674 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17675   predicate(n->as_Vector()->length() == 4 ||
17676             n->as_Vector()->length() == 8);
17677   match(Set dst (URShiftVB src shift));
17678   ins_cost(INSN_COST);
17679   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17680   ins_encode %{
17681     int sh = (int)$shift$$constant;
17682     if (sh >= 8) {
17683       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17684              as_FloatRegister($src$$reg),
17685              as_FloatRegister($src$$reg));
17686     } else {
17687       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17688              as_FloatRegister($src$$reg), sh);
17689     }
17690   %}
17691   ins_pipe(vshift64_imm);
17692 %}
17693 
17694 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17695   predicate(n->as_Vector()->length() == 16);
17696   match(Set dst (URShiftVB src shift));
17697   ins_cost(INSN_COST);
17698   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17699   ins_encode %{
17700     int sh = (int)$shift$$constant;
17701     if (sh >= 8) {
17702       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17703              as_FloatRegister($src$$reg),
17704              as_FloatRegister($src$$reg));
17705     } else {
17706       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17707              as_FloatRegister($src$$reg), sh);
17708     }
17709   %}
17710   ins_pipe(vshift128_imm);
17711 %}
17712 
17713 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17714   predicate(n->as_Vector()->length() == 2 ||
17715             n->as_Vector()->length() == 4);
17716   match(Set dst (LShiftVS src shift));
17717   match(Set dst (RShiftVS src shift));
17718   ins_cost(INSN_COST);
17719   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17720   ins_encode %{
17721     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17722             as_FloatRegister($src$$reg),
17723             as_FloatRegister($shift$$reg));
17724   %}
17725   ins_pipe(vshift64);
17726 %}
17727 
17728 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17729   predicate(n->as_Vector()->length() == 8);
17730   match(Set dst (LShiftVS src shift));
17731   match(Set dst (RShiftVS src shift));
17732   ins_cost(INSN_COST);
17733   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17734   ins_encode %{
17735     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17736             as_FloatRegister($src$$reg),
17737             as_FloatRegister($shift$$reg));
17738   %}
17739   ins_pipe(vshift128);
17740 %}
17741 
17742 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17743   predicate(n->as_Vector()->length() == 2 ||
17744             n->as_Vector()->length() == 4);
17745   match(Set dst (URShiftVS src shift));
17746   ins_cost(INSN_COST);
17747   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17748   ins_encode %{
17749     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17750             as_FloatRegister($src$$reg),
17751             as_FloatRegister($shift$$reg));
17752   %}
17753   ins_pipe(vshift64);
17754 %}
17755 
17756 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17757   predicate(n->as_Vector()->length() == 8);
17758   match(Set dst (URShiftVS src shift));
17759   ins_cost(INSN_COST);
17760   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17761   ins_encode %{
17762     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17763             as_FloatRegister($src$$reg),
17764             as_FloatRegister($shift$$reg));
17765   %}
17766   ins_pipe(vshift128);
17767 %}
17768 
17769 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17770   predicate(n->as_Vector()->length() == 2 ||
17771             n->as_Vector()->length() == 4);
17772   match(Set dst (LShiftVS src shift));
17773   ins_cost(INSN_COST);
17774   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17775   ins_encode %{
17776     int sh = (int)$shift$$constant;
17777     if (sh >= 16) {
17778       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17779              as_FloatRegister($src$$reg),
17780              as_FloatRegister($src$$reg));
17781     } else {
17782       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17783              as_FloatRegister($src$$reg), sh);
17784     }
17785   %}
17786   ins_pipe(vshift64_imm);
17787 %}
17788 
17789 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17790   predicate(n->as_Vector()->length() == 8);
17791   match(Set dst (LShiftVS src shift));
17792   ins_cost(INSN_COST);
17793   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17794   ins_encode %{
17795     int sh = (int)$shift$$constant;
17796     if (sh >= 16) {
17797       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17798              as_FloatRegister($src$$reg),
17799              as_FloatRegister($src$$reg));
17800     } else {
17801       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17802              as_FloatRegister($src$$reg), sh);
17803     }
17804   %}
17805   ins_pipe(vshift128_imm);
17806 %}
17807 
17808 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17809   predicate(n->as_Vector()->length() == 2 ||
17810             n->as_Vector()->length() == 4);
17811   match(Set dst (RShiftVS src shift));
17812   ins_cost(INSN_COST);
17813   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17814   ins_encode %{
17815     int sh = (int)$shift$$constant;
17816     if (sh >= 16) sh = 15;
17817     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17818            as_FloatRegister($src$$reg), sh);
17819   %}
17820   ins_pipe(vshift64_imm);
17821 %}
17822 
17823 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17824   predicate(n->as_Vector()->length() == 8);
17825   match(Set dst (RShiftVS src shift));
17826   ins_cost(INSN_COST);
17827   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17828   ins_encode %{
17829     int sh = (int)$shift$$constant;
17830     if (sh >= 16) sh = 15;
17831     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17832            as_FloatRegister($src$$reg), sh);
17833   %}
17834   ins_pipe(vshift128_imm);
17835 %}
17836 
17837 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17838   predicate(n->as_Vector()->length() == 2 ||
17839             n->as_Vector()->length() == 4);
17840   match(Set dst (URShiftVS src shift));
17841   ins_cost(INSN_COST);
17842   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17843   ins_encode %{
17844     int sh = (int)$shift$$constant;
17845     if (sh >= 16) {
17846       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17847              as_FloatRegister($src$$reg),
17848              as_FloatRegister($src$$reg));
17849     } else {
17850       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17851              as_FloatRegister($src$$reg), sh);
17852     }
17853   %}
17854   ins_pipe(vshift64_imm);
17855 %}
17856 
17857 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17858   predicate(n->as_Vector()->length() == 8);
17859   match(Set dst (URShiftVS src shift));
17860   ins_cost(INSN_COST);
17861   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17862   ins_encode %{
17863     int sh = (int)$shift$$constant;
17864     if (sh >= 16) {
17865       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17866              as_FloatRegister($src$$reg),
17867              as_FloatRegister($src$$reg));
17868     } else {
17869       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17870              as_FloatRegister($src$$reg), sh);
17871     }
17872   %}
17873   ins_pipe(vshift128_imm);
17874 %}
17875 
17876 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17877   predicate(n->as_Vector()->length() == 2);
17878   match(Set dst (LShiftVI src shift));
17879   match(Set dst (RShiftVI src shift));
17880   ins_cost(INSN_COST);
17881   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17882   ins_encode %{
17883     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17884             as_FloatRegister($src$$reg),
17885             as_FloatRegister($shift$$reg));
17886   %}
17887   ins_pipe(vshift64);
17888 %}
17889 
17890 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17891   predicate(n->as_Vector()->length() == 4);
17892   match(Set dst (LShiftVI src shift));
17893   match(Set dst (RShiftVI src shift));
17894   ins_cost(INSN_COST);
17895   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17896   ins_encode %{
17897     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17898             as_FloatRegister($src$$reg),
17899             as_FloatRegister($shift$$reg));
17900   %}
17901   ins_pipe(vshift128);
17902 %}
17903 
17904 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17905   predicate(n->as_Vector()->length() == 2);
17906   match(Set dst (URShiftVI src shift));
17907   ins_cost(INSN_COST);
17908   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17909   ins_encode %{
17910     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17911             as_FloatRegister($src$$reg),
17912             as_FloatRegister($shift$$reg));
17913   %}
17914   ins_pipe(vshift64);
17915 %}
17916 
17917 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17918   predicate(n->as_Vector()->length() == 4);
17919   match(Set dst (URShiftVI src shift));
17920   ins_cost(INSN_COST);
17921   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17922   ins_encode %{
17923     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17924             as_FloatRegister($src$$reg),
17925             as_FloatRegister($shift$$reg));
17926   %}
17927   ins_pipe(vshift128);
17928 %}
17929 
17930 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17931   predicate(n->as_Vector()->length() == 2);
17932   match(Set dst (LShiftVI src shift));
17933   ins_cost(INSN_COST);
17934   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17935   ins_encode %{
17936     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17937            as_FloatRegister($src$$reg),
17938            (int)$shift$$constant);
17939   %}
17940   ins_pipe(vshift64_imm);
17941 %}
17942 
17943 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17944   predicate(n->as_Vector()->length() == 4);
17945   match(Set dst (LShiftVI src shift));
17946   ins_cost(INSN_COST);
17947   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17948   ins_encode %{
17949     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17950            as_FloatRegister($src$$reg),
17951            (int)$shift$$constant);
17952   %}
17953   ins_pipe(vshift128_imm);
17954 %}
17955 
17956 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17957   predicate(n->as_Vector()->length() == 2);
17958   match(Set dst (RShiftVI src shift));
17959   ins_cost(INSN_COST);
17960   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17961   ins_encode %{
17962     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17963             as_FloatRegister($src$$reg),
17964             (int)$shift$$constant);
17965   %}
17966   ins_pipe(vshift64_imm);
17967 %}
17968 
17969 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17970   predicate(n->as_Vector()->length() == 4);
17971   match(Set dst (RShiftVI src shift));
17972   ins_cost(INSN_COST);
17973   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17974   ins_encode %{
17975     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17976             as_FloatRegister($src$$reg),
17977             (int)$shift$$constant);
17978   %}
17979   ins_pipe(vshift128_imm);
17980 %}
17981 
17982 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17983   predicate(n->as_Vector()->length() == 2);
17984   match(Set dst (URShiftVI src shift));
17985   ins_cost(INSN_COST);
17986   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17987   ins_encode %{
17988     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17989             as_FloatRegister($src$$reg),
17990             (int)$shift$$constant);
17991   %}
17992   ins_pipe(vshift64_imm);
17993 %}
17994 
17995 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17996   predicate(n->as_Vector()->length() == 4);
17997   match(Set dst (URShiftVI src shift));
17998   ins_cost(INSN_COST);
17999   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18000   ins_encode %{
18001     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18002             as_FloatRegister($src$$reg),
18003             (int)$shift$$constant);
18004   %}
18005   ins_pipe(vshift128_imm);
18006 %}
18007 
18008 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18009   predicate(n->as_Vector()->length() == 2);
18010   match(Set dst (LShiftVL src shift));
18011   match(Set dst (RShiftVL src shift));
18012   ins_cost(INSN_COST);
18013   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18014   ins_encode %{
18015     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18016             as_FloatRegister($src$$reg),
18017             as_FloatRegister($shift$$reg));
18018   %}
18019   ins_pipe(vshift128);
18020 %}
18021 
18022 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18023   predicate(n->as_Vector()->length() == 2);
18024   match(Set dst (URShiftVL src shift));
18025   ins_cost(INSN_COST);
18026   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18027   ins_encode %{
18028     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18029             as_FloatRegister($src$$reg),
18030             as_FloatRegister($shift$$reg));
18031   %}
18032   ins_pipe(vshift128);
18033 %}
18034 
18035 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18036   predicate(n->as_Vector()->length() == 2);
18037   match(Set dst (LShiftVL src shift));
18038   ins_cost(INSN_COST);
18039   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18040   ins_encode %{
18041     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18042            as_FloatRegister($src$$reg),
18043            (int)$shift$$constant);
18044   %}
18045   ins_pipe(vshift128_imm);
18046 %}
18047 
18048 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18049   predicate(n->as_Vector()->length() == 2);
18050   match(Set dst (RShiftVL src shift));
18051   ins_cost(INSN_COST);
18052   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18053   ins_encode %{
18054     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18055             as_FloatRegister($src$$reg),
18056             (int)$shift$$constant);
18057   %}
18058   ins_pipe(vshift128_imm);
18059 %}
18060 
18061 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18062   predicate(n->as_Vector()->length() == 2);
18063   match(Set dst (URShiftVL src shift));
18064   ins_cost(INSN_COST);
18065   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18066   ins_encode %{
18067     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18068             as_FloatRegister($src$$reg),
18069             (int)$shift$$constant);
18070   %}
18071   ins_pipe(vshift128_imm);
18072 %}
18073 
18074 //----------PEEPHOLE RULES-----------------------------------------------------
18075 // These must follow all instruction definitions as they use the names
18076 // defined in the instructions definitions.
18077 //
18078 // peepmatch ( root_instr_name [preceding_instruction]* );
18079 //
18080 // peepconstraint %{
18081 // (instruction_number.operand_name relational_op instruction_number.operand_name
18082 //  [, ...] );
18083 // // instruction numbers are zero-based using left to right order in peepmatch
18084 //
18085 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18086 // // provide an instruction_number.operand_name for each operand that appears
18087 // // in the replacement instruction's match rule
18088 //
18089 // ---------VM FLAGS---------------------------------------------------------
18090 //
18091 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18092 //
18093 // Each peephole rule is given an identifying number starting with zero and
18094 // increasing by one in the order seen by the parser.  An individual peephole
18095 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18096 // on the command-line.
18097 //
18098 // ---------CURRENT LIMITATIONS----------------------------------------------
18099 //
18100 // Only match adjacent instructions in same basic block
18101 // Only equality constraints
18102 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18103 // Only one replacement instruction
18104 //
18105 // ---------EXAMPLE----------------------------------------------------------
18106 //
18107 // // pertinent parts of existing instructions in architecture description
18108 // instruct movI(iRegINoSp dst, iRegI src)
18109 // %{
18110 //   match(Set dst (CopyI src));
18111 // %}
18112 //
18113 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18114 // %{
18115 //   match(Set dst (AddI dst src));
18116 //   effect(KILL cr);
18117 // %}
18118 //
18119 // // Change (inc mov) to lea
18120 // peephole %{
18121 //   // increment preceeded by register-register move
18122 //   peepmatch ( incI_iReg movI );
18123 //   // require that the destination register of the increment
18124 //   // match the destination register of the move
18125 //   peepconstraint ( 0.dst == 1.dst );
18126 //   // construct a replacement instruction that sets
18127 //   // the destination to ( move's source register + one )
18128 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18129 // %}
18130 //
18131 
18132 // Implementation no longer uses movX instructions since
18133 // machine-independent system no longer uses CopyX nodes.
18134 //
18135 // peephole
18136 // %{
18137 //   peepmatch (incI_iReg movI);
18138 //   peepconstraint (0.dst == 1.dst);
18139 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18140 // %}
18141 
18142 // peephole
18143 // %{
18144 //   peepmatch (decI_iReg movI);
18145 //   peepconstraint (0.dst == 1.dst);
18146 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18147 // %}
18148 
18149 // peephole
18150 // %{
18151 //   peepmatch (addI_iReg_imm movI);
18152 //   peepconstraint (0.dst == 1.dst);
18153 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18154 // %}
18155 
18156 // peephole
18157 // %{
18158 //   peepmatch (incL_iReg movL);
18159 //   peepconstraint (0.dst == 1.dst);
18160 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18161 // %}
18162 
18163 // peephole
18164 // %{
18165 //   peepmatch (decL_iReg movL);
18166 //   peepconstraint (0.dst == 1.dst);
18167 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18168 // %}
18169 
18170 // peephole
18171 // %{
18172 //   peepmatch (addL_iReg_imm movL);
18173 //   peepconstraint (0.dst == 1.dst);
18174 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18175 // %}
18176 
18177 // peephole
18178 // %{
18179 //   peepmatch (addP_iReg_imm movP);
18180 //   peepconstraint (0.dst == 1.dst);
18181 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18182 // %}
18183 
18184 // // Change load of spilled value to only a spill
18185 // instruct storeI(memory mem, iRegI src)
18186 // %{
18187 //   match(Set mem (StoreI mem src));
18188 // %}
18189 //
18190 // instruct loadI(iRegINoSp dst, memory mem)
18191 // %{
18192 //   match(Set dst (LoadI mem));
18193 // %}
18194 //
18195 
18196 //----------SMARTSPILL RULES---------------------------------------------------
18197 // These must follow all instruction definitions as they use the names
18198 // defined in the instructions definitions.
18199 
18200 // Local Variables:
18201 // mode: c++
18202 // End: