1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   // graph traversal helpers
1040 
1041   MemBarNode *parent_membar(const Node *n);
1042   MemBarNode *child_membar(const MemBarNode *n);
1043   bool leading_membar(const MemBarNode *barrier);
1044 
1045   bool is_card_mark_membar(const MemBarNode *barrier);
1046   bool is_CAS(int opcode);
1047 
1048   MemBarNode *leading_to_normal(MemBarNode *leading);
1049   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1050   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1051   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1052   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1053 
1054   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1055 
1056   bool unnecessary_acquire(const Node *barrier);
1057   bool needs_acquiring_load(const Node *load);
1058 
1059   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1060 
1061   bool unnecessary_release(const Node *barrier);
1062   bool unnecessary_volatile(const Node *barrier);
1063   bool needs_releasing_store(const Node *store);
1064 
1065   // predicate controlling translation of CompareAndSwapX
1066   bool needs_acquiring_load_exclusive(const Node *load);
1067 
1068   // predicate controlling translation of StoreCM
1069   bool unnecessary_storestore(const Node *storecm);
1070 
1071   // predicate controlling addressing modes
1072   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1073 %}
1074 
1075 source %{
1076 
1077   // Optimizaton of volatile gets and puts
1078   // -------------------------------------
1079   //
1080   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1081   // use to implement volatile reads and writes. For a volatile read
1082   // we simply need
1083   //
1084   //   ldar<x>
1085   //
1086   // and for a volatile write we need
1087   //
1088   //   stlr<x>
1089   //
1090   // Alternatively, we can implement them by pairing a normal
1091   // load/store with a memory barrier. For a volatile read we need
1092   //
1093   //   ldr<x>
1094   //   dmb ishld
1095   //
1096   // for a volatile write
1097   //
1098   //   dmb ish
1099   //   str<x>
1100   //   dmb ish
1101   //
1102   // We can also use ldaxr and stlxr to implement compare and swap CAS
1103   // sequences. These are normally translated to an instruction
1104   // sequence like the following
1105   //
1106   //   dmb      ish
1107   // retry:
1108   //   ldxr<x>   rval raddr
1109   //   cmp       rval rold
1110   //   b.ne done
1111   //   stlxr<x>  rval, rnew, rold
1112   //   cbnz      rval retry
1113   // done:
1114   //   cset      r0, eq
1115   //   dmb ishld
1116   //
1117   // Note that the exclusive store is already using an stlxr
1118   // instruction. That is required to ensure visibility to other
1119   // threads of the exclusive write (assuming it succeeds) before that
1120   // of any subsequent writes.
1121   //
1122   // The following instruction sequence is an improvement on the above
1123   //
1124   // retry:
1125   //   ldaxr<x>  rval raddr
1126   //   cmp       rval rold
1127   //   b.ne done
1128   //   stlxr<x>  rval, rnew, rold
1129   //   cbnz      rval retry
1130   // done:
1131   //   cset      r0, eq
1132   //
1133   // We don't need the leading dmb ish since the stlxr guarantees
1134   // visibility of prior writes in the case that the swap is
1135   // successful. Crucially we don't have to worry about the case where
1136   // the swap is not successful since no valid program should be
1137   // relying on visibility of prior changes by the attempting thread
1138   // in the case where the CAS fails.
1139   //
1140   // Similarly, we don't need the trailing dmb ishld if we substitute
1141   // an ldaxr instruction since that will provide all the guarantees we
1142   // require regarding observation of changes made by other threads
1143   // before any change to the CAS address observed by the load.
1144   //
1145   // In order to generate the desired instruction sequence we need to
1146   // be able to identify specific 'signature' ideal graph node
1147   // sequences which i) occur as a translation of a volatile reads or
1148   // writes or CAS operations and ii) do not occur through any other
1149   // translation or graph transformation. We can then provide
1150   // alternative aldc matching rules which translate these node
1151   // sequences to the desired machine code sequences. Selection of the
1152   // alternative rules can be implemented by predicates which identify
1153   // the relevant node sequences.
1154   //
1155   // The ideal graph generator translates a volatile read to the node
1156   // sequence
1157   //
1158   //   LoadX[mo_acquire]
1159   //   MemBarAcquire
1160   //
1161   // As a special case when using the compressed oops optimization we
1162   // may also see this variant
1163   //
1164   //   LoadN[mo_acquire]
1165   //   DecodeN
1166   //   MemBarAcquire
1167   //
1168   // A volatile write is translated to the node sequence
1169   //
1170   //   MemBarRelease
1171   //   StoreX[mo_release] {CardMark}-optional
1172   //   MemBarVolatile
1173   //
1174   // n.b. the above node patterns are generated with a strict
1175   // 'signature' configuration of input and output dependencies (see
1176   // the predicates below for exact details). The card mark may be as
1177   // simple as a few extra nodes or, in a few GC configurations, may
1178   // include more complex control flow between the leading and
1179   // trailing memory barriers. However, whatever the card mark
1180   // configuration these signatures are unique to translated volatile
1181   // reads/stores -- they will not appear as a result of any other
1182   // bytecode translation or inlining nor as a consequence of
1183   // optimizing transforms.
1184   //
1185   // We also want to catch inlined unsafe volatile gets and puts and
1186   // be able to implement them using either ldar<x>/stlr<x> or some
1187   // combination of ldr<x>/stlr<x> and dmb instructions.
1188   //
1189   // Inlined unsafe volatiles puts manifest as a minor variant of the
1190   // normal volatile put node sequence containing an extra cpuorder
1191   // membar
1192   //
1193   //   MemBarRelease
1194   //   MemBarCPUOrder
1195   //   StoreX[mo_release] {CardMark}-optional
1196   //   MemBarCPUOrder
1197   //   MemBarVolatile
1198   //
1199   // n.b. as an aside, a cpuorder membar is not itself subject to
1200   // matching and translation by adlc rules.  However, the rule
1201   // predicates need to detect its presence in order to correctly
1202   // select the desired adlc rules.
1203   //
1204   // Inlined unsafe volatile gets manifest as a slightly different
1205   // node sequence to a normal volatile get because of the
1206   // introduction of some CPUOrder memory barriers to bracket the
1207   // Load. However, but the same basic skeleton of a LoadX feeding a
1208   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1209   // present
1210   //
1211   //   MemBarCPUOrder
1212   //        ||       \\
1213   //   MemBarCPUOrder LoadX[mo_acquire]
1214   //        ||            |
1215   //        ||       {DecodeN} optional
1216   //        ||       /
1217   //     MemBarAcquire
1218   //
1219   // In this case the acquire membar does not directly depend on the
1220   // load. However, we can be sure that the load is generated from an
1221   // inlined unsafe volatile get if we see it dependent on this unique
1222   // sequence of membar nodes. Similarly, given an acquire membar we
1223   // can know that it was added because of an inlined unsafe volatile
1224   // get if it is fed and feeds a cpuorder membar and if its feed
1225   // membar also feeds an acquiring load.
1226   //
1227   // Finally an inlined (Unsafe) CAS operation is translated to the
1228   // following ideal graph
1229   //
1230   //   MemBarRelease
1231   //   MemBarCPUOrder
1232   //   CompareAndSwapX {CardMark}-optional
1233   //   MemBarCPUOrder
1234   //   MemBarAcquire
1235   //
1236   // So, where we can identify these volatile read and write
1237   // signatures we can choose to plant either of the above two code
1238   // sequences. For a volatile read we can simply plant a normal
1239   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1240   // also choose to inhibit translation of the MemBarAcquire and
1241   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1242   //
1243   // When we recognise a volatile store signature we can choose to
1244   // plant at a dmb ish as a translation for the MemBarRelease, a
1245   // normal str<x> and then a dmb ish for the MemBarVolatile.
1246   // Alternatively, we can inhibit translation of the MemBarRelease
1247   // and MemBarVolatile and instead plant a simple stlr<x>
1248   // instruction.
1249   //
1250   // when we recognise a CAS signature we can choose to plant a dmb
1251   // ish as a translation for the MemBarRelease, the conventional
1252   // macro-instruction sequence for the CompareAndSwap node (which
1253   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1254   // Alternatively, we can elide generation of the dmb instructions
1255   // and plant the alternative CompareAndSwap macro-instruction
1256   // sequence (which uses ldaxr<x>).
1257   //
1258   // Of course, the above only applies when we see these signature
1259   // configurations. We still want to plant dmb instructions in any
1260   // other cases where we may see a MemBarAcquire, MemBarRelease or
1261   // MemBarVolatile. For example, at the end of a constructor which
1262   // writes final/volatile fields we will see a MemBarRelease
1263   // instruction and this needs a 'dmb ish' lest we risk the
1264   // constructed object being visible without making the
1265   // final/volatile field writes visible.
1266   //
1267   // n.b. the translation rules below which rely on detection of the
1268   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1269   // If we see anything other than the signature configurations we
1270   // always just translate the loads and stores to ldr<x> and str<x>
1271   // and translate acquire, release and volatile membars to the
1272   // relevant dmb instructions.
1273   //
1274 
1275   // graph traversal helpers used for volatile put/get and CAS
1276   // optimization
1277 
1278   // 1) general purpose helpers
1279 
1280   // if node n is linked to a parent MemBarNode by an intervening
1281   // Control and Memory ProjNode return the MemBarNode otherwise return
1282   // NULL.
1283   //
1284   // n may only be a Load or a MemBar.
1285 
1286   MemBarNode *parent_membar(const Node *n)
1287   {
1288     Node *ctl = NULL;
1289     Node *mem = NULL;
1290     Node *membar = NULL;
1291 
1292     if (n->is_Load()) {
1293       ctl = n->lookup(LoadNode::Control);
1294       mem = n->lookup(LoadNode::Memory);
1295     } else if (n->is_MemBar()) {
1296       ctl = n->lookup(TypeFunc::Control);
1297       mem = n->lookup(TypeFunc::Memory);
1298     } else {
1299         return NULL;
1300     }
1301 
1302     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1303       return NULL;
1304     }
1305 
1306     membar = ctl->lookup(0);
1307 
1308     if (!membar || !membar->is_MemBar()) {
1309       return NULL;
1310     }
1311 
1312     if (mem->lookup(0) != membar) {
1313       return NULL;
1314     }
1315 
1316     return membar->as_MemBar();
1317   }
1318 
1319   // if n is linked to a child MemBarNode by intervening Control and
1320   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1321 
1322   MemBarNode *child_membar(const MemBarNode *n)
1323   {
1324     ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
1325     ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
1326 
1327     // MemBar needs to have both a Ctl and Mem projection
1328     if (! ctl || ! mem)
1329       return NULL;
1330 
1331     MemBarNode *child = NULL;
1332     Node *x;
1333 
1334     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1335       x = ctl->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x->is_MemBar()) {
1339           child = x->as_MemBar();
1340           break;
1341       }
1342     }
1343 
1344     if (child == NULL) {
1345       return NULL;
1346     }
1347 
1348     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1349       x = mem->fast_out(i);
1350       // if we see a membar we keep hold of it. we may also see a new
1351       // arena copy of the original but it will appear later
1352       if (x == child) {
1353         return child;
1354       }
1355     }
1356     return NULL;
1357   }
1358 
1359   // helper predicate use to filter candidates for a leading memory
1360   // barrier
1361   //
1362   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1363   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1364 
1365   bool leading_membar(const MemBarNode *barrier)
1366   {
1367     int opcode = barrier->Opcode();
1368     // if this is a release membar we are ok
1369     if (opcode == Op_MemBarRelease) {
1370       return true;
1371     }
1372     // if its a cpuorder membar . . .
1373     if (opcode != Op_MemBarCPUOrder) {
1374       return false;
1375     }
1376     // then the parent has to be a release membar
1377     MemBarNode *parent = parent_membar(barrier);
1378     if (!parent) {
1379       return false;
1380     }
1381     opcode = parent->Opcode();
1382     return opcode == Op_MemBarRelease;
1383   }
1384 
1385   // 2) card mark detection helper
1386 
1387   // helper predicate which can be used to detect a volatile membar
1388   // introduced as part of a conditional card mark sequence either by
1389   // G1 or by CMS when UseCondCardMark is true.
1390   //
1391   // membar can be definitively determined to be part of a card mark
1392   // sequence if and only if all the following hold
1393   //
1394   // i) it is a MemBarVolatile
1395   //
1396   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1397   // true
1398   //
1399   // iii) the node's Mem projection feeds a StoreCM node.
1400 
1401   bool is_card_mark_membar(const MemBarNode *barrier)
1402   {
1403     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1404       return false;
1405     }
1406 
1407     if (barrier->Opcode() != Op_MemBarVolatile) {
1408       return false;
1409     }
1410 
1411     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1412 
1413     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1414       Node *y = mem->fast_out(i);
1415       if (y->Opcode() == Op_StoreCM) {
1416         return true;
1417       }
1418     }
1419 
1420     return false;
1421   }
1422 
1423 
1424   // 3) helper predicates to traverse volatile put or CAS graphs which
1425   // may contain GC barrier subgraphs
1426 
1427   // Preamble
1428   // --------
1429   //
1430   // for volatile writes we can omit generating barriers and employ a
1431   // releasing store when we see a node sequence sequence with a
1432   // leading MemBarRelease and a trailing MemBarVolatile as follows
1433   //
1434   //   MemBarRelease
1435   //  {      ||      } -- optional
1436   //  {MemBarCPUOrder}
1437   //         ||     \\
1438   //         ||     StoreX[mo_release]
1439   //         | \     /
1440   //         | MergeMem
1441   //         | /
1442   //  {MemBarCPUOrder} -- optional
1443   //  {      ||      }
1444   //   MemBarVolatile
1445   //
1446   // where
1447   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1448   //  | \ and / indicate further routing of the Ctl and Mem feeds
1449   //
1450   // this is the graph we see for non-object stores. however, for a
1451   // volatile Object store (StoreN/P) we may see other nodes below the
1452   // leading membar because of the need for a GC pre- or post-write
1453   // barrier.
1454   //
1455   // with most GC configurations we with see this simple variant which
1456   // includes a post-write barrier card mark.
1457   //
1458   //   MemBarRelease______________________________
1459   //         ||    \\               Ctl \        \\
1460   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1461   //         | \     /                       . . .  /
1462   //         | MergeMem
1463   //         | /
1464   //         ||      /
1465   //  {MemBarCPUOrder} -- optional
1466   //  {      ||      }
1467   //   MemBarVolatile
1468   //
1469   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1470   // the object address to an int used to compute the card offset) and
1471   // Ctl+Mem to a StoreB node (which does the actual card mark).
1472   //
1473   // n.b. a StoreCM node will only appear in this configuration when
1474   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1475   // because it implies a requirement to order visibility of the card
1476   // mark (StoreCM) relative to the object put (StoreP/N) using a
1477   // StoreStore memory barrier (arguably this ought to be represented
1478   // explicitly in the ideal graph but that is not how it works). This
1479   // ordering is required for both non-volatile and volatile
1480   // puts. Normally that means we need to translate a StoreCM using
1481   // the sequence
1482   //
1483   //   dmb ishst
1484   //   stlrb
1485   //
1486   // However, in the case of a volatile put if we can recognise this
1487   // configuration and plant an stlr for the object write then we can
1488   // omit the dmb and just plant an strb since visibility of the stlr
1489   // is ordered before visibility of subsequent stores. StoreCM nodes
1490   // also arise when using G1 or using CMS with conditional card
1491   // marking. In these cases (as we shall see) we don't need to insert
1492   // the dmb when translating StoreCM because there is already an
1493   // intervening StoreLoad barrier between it and the StoreP/N.
1494   //
1495   // It is also possible to perform the card mark conditionally on it
1496   // currently being unmarked in which case the volatile put graph
1497   // will look slightly different
1498   //
1499   //   MemBarRelease____________________________________________
1500   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1501   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1502   //         | \     /                              \            |
1503   //         | MergeMem                            . . .      StoreB
1504   //         | /                                                /
1505   //         ||     /
1506   //   MemBarVolatile
1507   //
1508   // It is worth noting at this stage that both the above
1509   // configurations can be uniquely identified by checking that the
1510   // memory flow includes the following subgraph:
1511   //
1512   //   MemBarRelease
1513   //  {MemBarCPUOrder}
1514   //          |  \      . . .
1515   //          |  StoreX[mo_release]  . . .
1516   //          |   /
1517   //         MergeMem
1518   //          |
1519   //  {MemBarCPUOrder}
1520   //   MemBarVolatile
1521   //
1522   // This is referred to as a *normal* subgraph. It can easily be
1523   // detected starting from any candidate MemBarRelease,
1524   // StoreX[mo_release] or MemBarVolatile.
1525   //
1526   // A simple variation on this normal case occurs for an unsafe CAS
1527   // operation. The basic graph for a non-object CAS is
1528   //
1529   //   MemBarRelease
1530   //         ||
1531   //   MemBarCPUOrder
1532   //         ||     \\   . . .
1533   //         ||     CompareAndSwapX
1534   //         ||       |
1535   //         ||     SCMemProj
1536   //         | \     /
1537   //         | MergeMem
1538   //         | /
1539   //   MemBarCPUOrder
1540   //         ||
1541   //   MemBarAcquire
1542   //
1543   // The same basic variations on this arrangement (mutatis mutandis)
1544   // occur when a card mark is introduced. i.e. we se the same basic
1545   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1546   // tail of the graph is a pair comprising a MemBarCPUOrder +
1547   // MemBarAcquire.
1548   //
1549   // So, in the case of a CAS the normal graph has the variant form
1550   //
1551   //   MemBarRelease
1552   //   MemBarCPUOrder
1553   //          |   \      . . .
1554   //          |  CompareAndSwapX  . . .
1555   //          |    |
1556   //          |   SCMemProj
1557   //          |   /  . . .
1558   //         MergeMem
1559   //          |
1560   //   MemBarCPUOrder
1561   //   MemBarAcquire
1562   //
1563   // This graph can also easily be detected starting from any
1564   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1565   //
1566   // the code below uses two helper predicates, leading_to_normal and
1567   // normal_to_leading to identify these normal graphs, one validating
1568   // the layout starting from the top membar and searching down and
1569   // the other validating the layout starting from the lower membar
1570   // and searching up.
1571   //
1572   // There are two special case GC configurations when a normal graph
1573   // may not be generated: when using G1 (which always employs a
1574   // conditional card mark); and when using CMS with conditional card
1575   // marking configured. These GCs are both concurrent rather than
1576   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1577   // graph between the leading and trailing membar nodes, in
1578   // particular enforcing stronger memory serialisation beween the
1579   // object put and the corresponding conditional card mark. CMS
1580   // employs a post-write GC barrier while G1 employs both a pre- and
1581   // post-write GC barrier. Of course the extra nodes may be absent --
1582   // they are only inserted for object puts/swaps. This significantly
1583   // complicates the task of identifying whether a MemBarRelease,
1584   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1585   // when using these GC configurations (see below). It adds similar
1586   // complexity to the task of identifying whether a MemBarRelease,
1587   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1588   //
1589   // In both cases the post-write subtree includes an auxiliary
1590   // MemBarVolatile (StoreLoad barrier) separating the object put/swap
1591   // and the read of the corresponding card. This poses two additional
1592   // problems.
1593   //
1594   // Firstly, a card mark MemBarVolatile needs to be distinguished
1595   // from a normal trailing MemBarVolatile. Resolving this first
1596   // problem is straightforward: a card mark MemBarVolatile always
1597   // projects a Mem feed to a StoreCM node and that is a unique marker
1598   //
1599   //      MemBarVolatile (card mark)
1600   //       C |    \     . . .
1601   //         |   StoreCM   . . .
1602   //       . . .
1603   //
1604   // The second problem is how the code generator is to translate the
1605   // card mark barrier? It always needs to be translated to a "dmb
1606   // ish" instruction whether or not it occurs as part of a volatile
1607   // put. A StoreLoad barrier is needed after the object put to ensure
1608   // i) visibility to GC threads of the object put and ii) visibility
1609   // to the mutator thread of any card clearing write by a GC
1610   // thread. Clearly a normal store (str) will not guarantee this
1611   // ordering but neither will a releasing store (stlr). The latter
1612   // guarantees that the object put is visible but does not guarantee
1613   // that writes by other threads have also been observed.
1614   //
1615   // So, returning to the task of translating the object put and the
1616   // leading/trailing membar nodes: what do the non-normal node graph
1617   // look like for these 2 special cases? and how can we determine the
1618   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1619   // in both normal and non-normal cases?
1620   //
1621   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1622   // which selects conditonal execution based on the value loaded
1623   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1624   // intervening StoreLoad barrier (MemBarVolatile).
1625   //
1626   // So, with CMS we may see a node graph for a volatile object store
1627   // which looks like this
1628   //
1629   //   MemBarRelease
1630   //  {MemBarCPUOrder}_(leading)_________________
1631   //     C |    M \       \\                   C \
1632   //       |       \    StoreN/P[mo_release]  CastP2X
1633   //       |    Bot \    /
1634   //       |       MergeMem
1635   //       |         /
1636   //      MemBarVolatile (card mark)
1637   //     C |  ||    M |
1638   //       | LoadB    |
1639   //       |   |      |
1640   //       | Cmp      |\
1641   //       | /        | \
1642   //       If         |  \
1643   //       | \        |   \
1644   // IfFalse  IfTrue  |    \
1645   //       \     / \  |     \
1646   //        \   / StoreCM    |
1647   //         \ /      |      |
1648   //        Region   . . .   |
1649   //          | \           /
1650   //          |  . . .  \  / Bot
1651   //          |       MergeMem
1652   //          |          |
1653   //       {MemBarCPUOrder}
1654   //        MemBarVolatile (trailing)
1655   //
1656   // The first MergeMem merges the AliasIdxBot Mem slice from the
1657   // leading membar and the oopptr Mem slice from the Store into the
1658   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1659   // Mem slice from the card mark membar and the AliasIdxRaw slice
1660   // from the StoreCM into the trailing membar (n.b. the latter
1661   // proceeds via a Phi associated with the If region).
1662   //
1663   // The graph for a CAS varies slightly, the difference being
1664   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1665   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1666   // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
1667   //
1668   //   MemBarRelease
1669   //   MemBarCPUOrder_(leading)_______________
1670   //     C |    M \       \\                C \
1671   //       |       \    CompareAndSwapN/P  CastP2X
1672   //       |        \      |
1673   //       |         \   SCMemProj
1674   //       |      Bot \   /
1675   //       |        MergeMem
1676   //       |         /
1677   //      MemBarVolatile (card mark)
1678   //     C |  ||    M |
1679   //       | LoadB    |
1680   //       |   |      |
1681   //       | Cmp      |\
1682   //       | /        | \
1683   //       If         |  \
1684   //       | \        |   \
1685   // IfFalse  IfTrue  |    \
1686   //       \     / \  |     \
1687   //        \   / StoreCM    |
1688   //         \ /      |      |
1689   //        Region   . . .   |
1690   //          | \           /
1691   //          |  . . .  \  / Bot
1692   //          |       MergeMem
1693   //          |          |
1694   //        MemBarCPUOrder
1695   //        MemBarVolatile (trailing)
1696   //
1697   //
1698   // G1 is quite a lot more complicated. The nodes inserted on behalf
1699   // of G1 may comprise: a pre-write graph which adds the old value to
1700   // the SATB queue; the releasing store itself; and, finally, a
1701   // post-write graph which performs a card mark.
1702   //
1703   // The pre-write graph may be omitted, but only when the put is
1704   // writing to a newly allocated (young gen) object and then only if
1705   // there is a direct memory chain to the Initialize node for the
1706   // object allocation. This will not happen for a volatile put since
1707   // any memory chain passes through the leading membar.
1708   //
1709   // The pre-write graph includes a series of 3 If tests. The outermost
1710   // If tests whether SATB is enabled (no else case). The next If tests
1711   // whether the old value is non-NULL (no else case). The third tests
1712   // whether the SATB queue index is > 0, if so updating the queue. The
1713   // else case for this third If calls out to the runtime to allocate a
1714   // new queue buffer.
1715   //
1716   // So with G1 the pre-write and releasing store subgraph looks like
1717   // this (the nested Ifs are omitted).
1718   //
1719   //  MemBarRelease
1720   // {MemBarCPUOrder}_(leading)___________
1721   //     C |  ||  M \   M \    M \  M \ . . .
1722   //       | LoadB   \  LoadL  LoadN   \
1723   //       | /        \                 \
1724   //       If         |\                 \
1725   //       | \        | \                 \
1726   //  IfFalse  IfTrue |  \                 \
1727   //       |     |    |   \                 |
1728   //       |     If   |   /\                |
1729   //       |     |          \               |
1730   //       |                 \              |
1731   //       |    . . .         \             |
1732   //       | /       | /       |            |
1733   //      Region  Phi[M]       |            |
1734   //       | \       |         |            |
1735   //       |  \_____ | ___     |            |
1736   //     C | C \     |   C \ M |            |
1737   //       | CastP2X | StoreN/P[mo_release] |
1738   //       |         |         |            |
1739   //     C |       M |       M |          M |
1740   //        \        |         |           /
1741   //                  . . .
1742   //          (post write subtree elided)
1743   //                    . . .
1744   //             C \         M /
1745   //                \         /
1746   //             {MemBarCPUOrder}
1747   //              MemBarVolatile (trailing)
1748   //
1749   // n.b. the LoadB in this subgraph is not the card read -- it's a
1750   // read of the SATB queue active flag.
1751   //
1752   // The G1 post-write subtree is also optional, this time when the
1753   // new value being written is either null or can be identified as a
1754   // newly allocated (young gen) object with no intervening control
1755   // flow. The latter cannot happen but the former may, in which case
1756   // the card mark membar is omitted and the memory feeds form the
1757   // leading membar and the SToreN/P are merged direct into the
1758   // trailing membar as per the normal subgraph. So, the only special
1759   // case which arises is when the post-write subgraph is generated.
1760   //
1761   // The kernel of the post-write G1 subgraph is the card mark itself
1762   // which includes a card mark memory barrier (MemBarVolatile), a
1763   // card test (LoadB), and a conditional update (If feeding a
1764   // StoreCM). These nodes are surrounded by a series of nested Ifs
1765   // which try to avoid doing the card mark. The top level If skips if
1766   // the object reference does not cross regions (i.e. it tests if
1767   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1768   // need not be recorded. The next If, which skips on a NULL value,
1769   // may be absent (it is not generated if the type of value is >=
1770   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1771   // checking if card_val != young).  n.b. although this test requires
1772   // a pre-read of the card it can safely be done before the StoreLoad
1773   // barrier. However that does not bypass the need to reread the card
1774   // after the barrier. A final, 4th If tests if the card is already
1775   // marked.
1776   //
1777   //                (pre-write subtree elided)
1778   //        . . .                  . . .    . . .  . . .
1779   //        C |                    M |     M |    M |
1780   //       Region                  Phi[M] StoreN    |
1781   //          |                     / \      |      |
1782   //         / \_______            /   \     |      |
1783   //      C / C \      . . .            \    |      |
1784   //       If   CastP2X . . .            |   |      |
1785   //       / \                           |   |      |
1786   //      /   \                          |   |      |
1787   // IfFalse IfTrue                      |   |      |
1788   //   |       |                         |   |     /|
1789   //   |       If                        |   |    / |
1790   //   |      / \                        |   |   /  |
1791   //   |     /   \                        \  |  /   |
1792   //   | IfFalse IfTrue                   MergeMem  |
1793   //   |  . . .    / \                       /      |
1794   //   |          /   \                     /       |
1795   //   |     IfFalse IfTrue                /        |
1796   //   |      . . .    |                  /         |
1797   //   |               If                /          |
1798   //   |               / \              /           |
1799   //   |              /   \            /            |
1800   //   |         IfFalse IfTrue       /             |
1801   //   |           . . .   |         /              |
1802   //   |                    \       /               |
1803   //   |                     \     /                |
1804   //   |             MemBarVolatile__(card mark)    |
1805   //   |                ||   C |  M \  M \          |
1806   //   |               LoadB   If    |    |         |
1807   //   |                      / \    |    |         |
1808   //   |                     . . .   |    |         |
1809   //   |                          \  |    |        /
1810   //   |                        StoreCM   |       /
1811   //   |                          . . .   |      /
1812   //   |                        _________/      /
1813   //   |                       /  _____________/
1814   //   |   . . .       . . .  |  /            /
1815   //   |    |                 | /   _________/
1816   //   |    |               Phi[M] /        /
1817   //   |    |                 |   /        /
1818   //   |    |                 |  /        /
1819   //   |  Region  . . .     Phi[M]  _____/
1820   //   |    /                 |    /
1821   //   |                      |   /
1822   //   | . . .   . . .        |  /
1823   //   | /                    | /
1824   // Region           |  |  Phi[M]
1825   //   |              |  |  / Bot
1826   //    \            MergeMem
1827   //     \            /
1828   //    {MemBarCPUOrder}
1829   //     MemBarVolatile
1830   //
1831   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1832   // from the leading membar and the oopptr Mem slice from the Store
1833   // into the card mark membar i.e. the memory flow to the card mark
1834   // membar still looks like a normal graph.
1835   //
1836   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1837   // Mem slices (from the StoreCM and other card mark queue stores).
1838   // However in this case the AliasIdxBot Mem slice does not come
1839   // direct from the card mark membar. It is merged through a series
1840   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1841   // from the leading membar with the Mem feed from the card mark
1842   // membar. Each Phi corresponds to one of the Ifs which may skip
1843   // around the card mark membar. So when the If implementing the NULL
1844   // value check has been elided the total number of Phis is 2
1845   // otherwise it is 3.
1846   //
1847   // The CAS graph when using G1GC also includes a pre-write subgraph
1848   // and an optional post-write subgraph. The same variations are
1849   // introduced as for CMS with conditional card marking i.e. the
1850   // StoreP/N is swapped for a CompareAndSwapP/N with a following
1851   // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
1852   // MemBarAcquire pair. There may be an extra If test introduced in
1853   // the CAS case, when the boolean result of the CAS is tested by the
1854   // caller. In that case an extra Region and AliasIdxBot Phi may be
1855   // introduced before the MergeMem
1856   //
1857   // So, the upshot is that in all cases the subgraph will include a
1858   // *normal* memory subgraph betwen the leading membar and its child
1859   // membar: either a normal volatile put graph including a releasing
1860   // StoreX and terminating with a trailing volatile membar or card
1861   // mark volatile membar; or a normal CAS graph including a
1862   // CompareAndSwapX + SCMemProj pair and terminating with a card mark
1863   // volatile membar or a trailing cpu order and acquire membar
1864   // pair. If the child membar is not a (volatile) card mark membar
1865   // then it marks the end of the volatile put or CAS subgraph. If the
1866   // child is a card mark membar then the normal subgraph will form
1867   // part of a larger volatile put or CAS subgraph if and only if the
1868   // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
1869   // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
1870   // Phi nodes merging the leading barrier memory flow (for G1).
1871   //
1872   // The predicates controlling generation of instructions for store
1873   // and barrier nodes employ a few simple helper functions (described
1874   // below) which identify the presence or absence of all these
1875   // subgraph configurations and provide a means of traversing from
1876   // one node in the subgraph to another.
1877 
1878   // is_CAS(int opcode)
1879   //
1880   // return true if opcode is one of the possible CompareAndSwapX
1881   // values otherwise false.
1882 
1883   bool is_CAS(int opcode)
1884   {
1885     switch(opcode) {
1886       // We handle these
1887     case Op_CompareAndSwapI:
1888     case Op_CompareAndSwapL:
1889     case Op_CompareAndSwapP:
1890     case Op_CompareAndSwapN:
1891  // case Op_CompareAndSwapB:
1892  // case Op_CompareAndSwapS:
1893       return true;
1894       // These are TBD
1895     case Op_WeakCompareAndSwapB:
1896     case Op_WeakCompareAndSwapS:
1897     case Op_WeakCompareAndSwapI:
1898     case Op_WeakCompareAndSwapL:
1899     case Op_WeakCompareAndSwapP:
1900     case Op_WeakCompareAndSwapN:
1901     case Op_CompareAndExchangeB:
1902     case Op_CompareAndExchangeS:
1903     case Op_CompareAndExchangeI:
1904     case Op_CompareAndExchangeL:
1905     case Op_CompareAndExchangeP:
1906     case Op_CompareAndExchangeN:
1907       return false;
1908     default:
1909       return false;
1910     }
1911   }
1912 
1913   // helper to determine the maximum number of Phi nodes we may need to
1914   // traverse when searching from a card mark membar for the merge mem
1915   // feeding a trailing membar or vice versa
1916 
1917   int max_phis()
1918   {
1919     if (UseG1GC) {
1920       return 4;
1921     } else if (UseConcMarkSweepGC && UseCondCardMark) {
1922       return 1;
1923     } else {
1924       return 0;
1925     }
1926   }
1927 
1928   // leading_to_normal
1929   //
1930   // graph traversal helper which detects the normal case Mem feed
1931   // from a release membar (or, optionally, its cpuorder child) to a
1932   // dependent volatile or acquire membar i.e. it ensures that one of
1933   // the following 3 Mem flow subgraphs is present.
1934   //
1935   //   MemBarRelease
1936   //  {MemBarCPUOrder} {leading}
1937   //          |  \      . . .
1938   //          |  StoreN/P[mo_release]  . . .
1939   //          |   /
1940   //         MergeMem
1941   //          |
1942   //  {MemBarCPUOrder}
1943   //   MemBarVolatile {trailing or card mark}
1944   //
1945   //   MemBarRelease
1946   //   MemBarCPUOrder {leading}
1947   //          |  \      . . .
1948   //          |  CompareAndSwapX  . . .
1949   //          |   /
1950   //         MergeMem
1951   //          |
1952   //   MemBarVolatile {card mark}
1953   //
1954   //   MemBarRelease
1955   //   MemBarCPUOrder {leading}
1956   //          |  \      . . .
1957   //          |  CompareAndSwapX  . . .
1958   //          |   /
1959   //         MergeMem
1960   //          |
1961   //   MemBarCPUOrder
1962   //   MemBarAcquire {trailing}
1963   //
1964   // if the correct configuration is present returns the trailing
1965   // or cardmark membar otherwise NULL.
1966   //
1967   // the input membar is expected to be either a cpuorder membar or a
1968   // release membar. in the latter case it should not have a cpu membar
1969   // child.
1970   //
1971   // the returned value may be a card mark or trailing membar
1972   //
1973 
1974   MemBarNode *leading_to_normal(MemBarNode *leading)
1975   {
1976     assert((leading->Opcode() == Op_MemBarRelease ||
1977             leading->Opcode() == Op_MemBarCPUOrder),
1978            "expecting a volatile or cpuroder membar!");
1979 
1980     // check the mem flow
1981     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1982 
1983     if (!mem) {
1984       return NULL;
1985     }
1986 
1987     Node *x = NULL;
1988     StoreNode * st = NULL;
1989     LoadStoreNode *cas = NULL;
1990     MergeMemNode *mm = NULL;
1991 
1992     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1993       x = mem->fast_out(i);
1994       if (x->is_MergeMem()) {
1995         if (mm != NULL) {
1996           return NULL;
1997         }
1998         // two merge mems is one too many
1999         mm = x->as_MergeMem();
2000       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2001         // two releasing stores/CAS nodes is one too many
2002         if (st != NULL || cas != NULL) {
2003           return NULL;
2004         }
2005         st = x->as_Store();
2006       } else if (is_CAS(x->Opcode())) {
2007         if (st != NULL || cas != NULL) {
2008           return NULL;
2009         }
2010         cas = x->as_LoadStore();
2011       }
2012     }
2013 
2014     // must have a store or a cas
2015     if (!st && !cas) {
2016       return NULL;
2017     }
2018 
2019     // must have a merge
2020     if (!mm) {
2021       return NULL;
2022     }
2023 
2024     Node *feed = NULL;
2025     if (cas) {
2026       // look for an SCMemProj
2027       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2028         x = cas->fast_out(i);
2029         if (x->Opcode() == Op_SCMemProj) {
2030           feed = x;
2031           break;
2032         }
2033       }
2034       if (feed == NULL) {
2035         return NULL;
2036       }
2037     } else {
2038       feed = st;
2039     }
2040     // ensure the feed node feeds the existing mergemem;
2041     for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2042       x = feed->fast_out(i);
2043       if (x == mm) {
2044         break;
2045       }
2046     }
2047     if (x != mm) {
2048       return NULL;
2049     }
2050 
2051     MemBarNode *mbar = NULL;
2052     // ensure the merge feeds to the expected type of membar
2053     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2054       x = mm->fast_out(i);
2055       if (x->is_MemBar()) {
2056         if (x->Opcode() == Op_MemBarCPUOrder) {
2057           // with a store any cpu order membar should precede a
2058           // trailing volatile membar. with a cas it should precede a
2059           // trailing acquire membar. in either case try to skip to
2060           // that next membar
2061           MemBarNode *y =  x->as_MemBar();
2062           y = child_membar(y);
2063           if (y != NULL) {
2064             // skip to this new membar to do the check
2065             x = y;
2066           }
2067           
2068         }
2069         if (x->Opcode() == Op_MemBarVolatile) {
2070           mbar = x->as_MemBar();
2071           // for a volatile store this can be either a trailing membar
2072           // or a card mark membar. for a cas it must be a card mark
2073           // membar
2074           guarantee(cas == NULL || is_card_mark_membar(mbar),
2075                     "in CAS graph volatile membar must be a card mark");
2076         } else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
2077           mbar = x->as_MemBar();
2078         }
2079         break;
2080       }
2081     }
2082 
2083     return mbar;
2084   }
2085 
2086   // normal_to_leading
2087   //
2088   // graph traversal helper which detects the normal case Mem feed
2089   // from either a card mark or a trailing membar to a preceding
2090   // release membar (optionally its cpuorder child) i.e. it ensures
2091   // that one of the following 3 Mem flow subgraphs is present.
2092   //
2093   //   MemBarRelease
2094   //  {MemBarCPUOrder} {leading}
2095   //          |  \      . . .
2096   //          |  StoreN/P[mo_release]  . . .
2097   //          |   /
2098   //         MergeMem
2099   //          |
2100   //  {MemBarCPUOrder}
2101   //   MemBarVolatile {trailing or card mark}
2102   //
2103   //   MemBarRelease
2104   //   MemBarCPUOrder {leading}
2105   //          |  \      . . .
2106   //          |  CompareAndSwapX  . . .
2107   //          |   /
2108   //         MergeMem
2109   //          |
2110   //   MemBarVolatile {card mark}
2111   //
2112   //   MemBarRelease
2113   //   MemBarCPUOrder {leading}
2114   //          |  \      . . .
2115   //          |  CompareAndSwapX  . . .
2116   //          |   /
2117   //         MergeMem
2118   //          |
2119   //   MemBarCPUOrder
2120   //   MemBarAcquire {trailing}
2121   //
2122   // this predicate checks for the same flow as the previous predicate
2123   // but starting from the bottom rather than the top.
2124   //
2125   // if the configuration is present returns the cpuorder member for
2126   // preference or when absent the release membar otherwise NULL.
2127   //
2128   // n.b. the input membar is expected to be a MemBarVolatile but
2129   // need not be a card mark membar.
2130 
2131   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2132   {
2133     // input must be a volatile membar
2134     assert((barrier->Opcode() == Op_MemBarVolatile ||
2135             barrier->Opcode() == Op_MemBarAcquire),
2136            "expecting a volatile or an acquire membar");
2137     bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
2138 
2139     // if we have an intervening cpu order membar then start the
2140     // search from it
2141     
2142     Node *x = parent_membar(barrier);
2143 
2144     if (x == NULL) {
2145       // stick with the original barrier
2146       x = (Node *)barrier;
2147     } else if (x->Opcode() != Op_MemBarCPUOrder) {
2148       // any other barrier means this is not the graph we want
2149       return NULL;
2150     }
2151 
2152     // the Mem feed to the membar should be a merge
2153     x = x ->in(TypeFunc::Memory);
2154     if (!x->is_MergeMem())
2155       return NULL;
2156 
2157     MergeMemNode *mm = x->as_MergeMem();
2158 
2159     // the merge should get its Bottom mem feed from the leading membar
2160     x = mm->in(Compile::AliasIdxBot);
2161 
2162     // ensure this is a non control projection
2163     if (!x->is_Proj() || x->is_CFG()) {
2164       return NULL;
2165     }
2166     // if it is fed by a membar that's the one we want
2167     x = x->in(0);
2168 
2169     if (!x->is_MemBar()) {
2170       return NULL;
2171     }
2172 
2173     MemBarNode *leading = x->as_MemBar();
2174     // reject invalid candidates
2175     if (!leading_membar(leading)) {
2176       return NULL;
2177     }
2178 
2179     // ok, we have a leading membar, now for the sanity clauses
2180 
2181     // the leading membar must feed Mem to a releasing store or CAS
2182     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2183     StoreNode *st = NULL;
2184     LoadStoreNode *cas = NULL;
2185     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2186       x = mem->fast_out(i);
2187       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2188         // two stores or CASes is one too many
2189         if (st != NULL || cas != NULL) {
2190           return NULL;
2191         }
2192         st = x->as_Store();
2193       } else if (is_CAS(x->Opcode())) {
2194         if (st != NULL || cas != NULL) {
2195           return NULL;
2196         }
2197         cas = x->as_LoadStore();
2198       }
2199     }
2200 
2201     // we cannot have both a store and a cas
2202     if (st == NULL && cas == NULL) {
2203       // we have neither -- this is not a normal graph
2204       return NULL;
2205     }
2206     if (st == NULL) {
2207       // if we started from a volatile membar and found a CAS then the
2208       // original membar ought to be for a card mark
2209       guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
2210                 "unexpected volatile barrier (i.e. not card mark) in CAS graph");
2211       // check that the CAS feeds the merge we used to get here via an
2212       // intermediary SCMemProj
2213       Node *scmemproj = NULL;
2214       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2215         x = cas->fast_out(i);
2216         if (x->Opcode() == Op_SCMemProj) {
2217           scmemproj = x;
2218           break;
2219         }
2220       }
2221       if (scmemproj == NULL) {
2222         return NULL;
2223       }
2224       for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
2225         x = scmemproj->fast_out(i);
2226         if (x == mm) {
2227           return leading;
2228         }
2229       }
2230     } else {
2231       // we should not have found a store if we started from an acquire
2232       guarantee(!barrier_is_acquire,
2233                 "unexpected trailing acquire barrier in volatile store graph");
2234 
2235       // the store should feed the merge we used to get here
2236       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2237         if (st->fast_out(i) == mm) {
2238           return leading;
2239         }
2240       }
2241     }
2242 
2243     return NULL;
2244   }
2245 
2246   // card_mark_to_trailing
2247   //
2248   // graph traversal helper which detects extra, non-normal Mem feed
2249   // from a card mark volatile membar to a trailing membar i.e. it
2250   // ensures that one of the following three GC post-write Mem flow
2251   // subgraphs is present.
2252   //
2253   // 1)
2254   //     . . .
2255   //       |
2256   //   MemBarVolatile (card mark)
2257   //      |          |
2258   //      |        StoreCM
2259   //      |          |
2260   //      |        . . .
2261   //  Bot |  /
2262   //   MergeMem
2263   //      |
2264   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2265   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2266   //                                 
2267   //
2268   // 2)
2269   //   MemBarRelease/CPUOrder (leading)
2270   //    |
2271   //    |
2272   //    |\       . . .
2273   //    | \        |
2274   //    |  \  MemBarVolatile (card mark)
2275   //    |   \   |     |
2276   //     \   \  |   StoreCM    . . .
2277   //      \   \ |
2278   //       \  Phi
2279   //        \ /
2280   //        Phi  . . .
2281   //     Bot |   /
2282   //       MergeMem
2283   //         |
2284   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2285   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2286   //
2287   // 3)
2288   //   MemBarRelease/CPUOrder (leading)
2289   //    |
2290   //    |\
2291   //    | \
2292   //    |  \      . . .
2293   //    |   \       |
2294   //    |\   \  MemBarVolatile (card mark)
2295   //    | \   \   |     |
2296   //    |  \   \  |   StoreCM    . . .
2297   //    |   \   \ |
2298   //     \   \  Phi
2299   //      \   \ /
2300   //       \  Phi
2301   //        \ /
2302   //        Phi  . . .
2303   //     Bot |   /
2304   //       MergeMem
2305   //         |
2306   //         |
2307   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2308   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2309   //
2310   // 4)
2311   //   MemBarRelease/CPUOrder (leading)
2312   //    |
2313   //    |\
2314   //    | \
2315   //    |  \
2316   //    |   \
2317   //    |\   \
2318   //    | \   \
2319   //    |  \   \        . . .
2320   //    |   \   \         |
2321   //    |\   \   \   MemBarVolatile (card mark)
2322   //    | \   \   \   /   |
2323   //    |  \   \   \ /  StoreCM    . . .
2324   //    |   \   \  Phi
2325   //     \   \   \ /
2326   //      \   \  Phi
2327   //       \   \ /
2328   //        \  Phi
2329   //         \ /
2330   //         Phi  . . .
2331   //      Bot |   /
2332   //       MergeMem
2333   //          |
2334   //          |
2335   //    MemBarCPUOrder
2336   //    MemBarAcquire {trailing}
2337   //
2338   // configuration 1 is only valid if UseConcMarkSweepGC &&
2339   // UseCondCardMark
2340   //
2341   // configuration 2, is only valid if UseConcMarkSweepGC &&
2342   // UseCondCardMark or if UseG1GC
2343   //
2344   // configurations 3 and 4 are only valid if UseG1GC.
2345   //
2346   // if a valid configuration is present returns the trailing membar
2347   // otherwise NULL.
2348   //
2349   // n.b. the supplied membar is expected to be a card mark
2350   // MemBarVolatile i.e. the caller must ensure the input node has the
2351   // correct operand and feeds Mem to a StoreCM node
2352 
2353   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2354   {
2355     // input must be a card mark volatile membar
2356     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2357 
2358     Node *feed = barrier->proj_out(TypeFunc::Memory);
2359     Node *x;
2360     MergeMemNode *mm = NULL;
2361 
2362     const int MAX_PHIS = max_phis(); // max phis we will search through
2363     int phicount = 0;                // current search count
2364 
2365     bool retry_feed = true;
2366     while (retry_feed) {
2367       // see if we have a direct MergeMem feed
2368       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2369         x = feed->fast_out(i);
2370         // the correct Phi will be merging a Bot memory slice
2371         if (x->is_MergeMem()) {
2372           mm = x->as_MergeMem();
2373           break;
2374         }
2375       }
2376       if (mm) {
2377         retry_feed = false;
2378       } else if (phicount++ < MAX_PHIS) {
2379         // the barrier may feed indirectly via one or two Phi nodes
2380         PhiNode *phi = NULL;
2381         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2382           x = feed->fast_out(i);
2383           // the correct Phi will be merging a Bot memory slice
2384           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2385             phi = x->as_Phi();
2386             break;
2387           }
2388         }
2389         if (!phi) {
2390           return NULL;
2391         }
2392         // look for another merge below this phi
2393         feed = phi;
2394       } else {
2395         // couldn't find a merge
2396         return NULL;
2397       }
2398     }
2399 
2400     // sanity check this feed turns up as the expected slice
2401     guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2402 
2403     MemBarNode *trailing = NULL;
2404     // be sure we have a trailing membar fed by the merge
2405     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2406       x = mm->fast_out(i);
2407       if (x->is_MemBar()) {
2408         // if this is an intervening cpu order membar skip to the
2409         // following membar
2410         if (x->Opcode() == Op_MemBarCPUOrder) {
2411           MemBarNode *y =  x->as_MemBar();
2412           y = child_membar(y);
2413           if (y != NULL) {
2414             x = y;
2415           }
2416         }
2417         if (x->Opcode() == Op_MemBarVolatile ||
2418             x->Opcode() == Op_MemBarAcquire) {
2419           trailing = x->as_MemBar();
2420         }
2421         break;
2422       }
2423     }
2424 
2425     return trailing;
2426   }
2427 
2428   // trailing_to_card_mark
2429   //
2430   // graph traversal helper which detects extra, non-normal Mem feed
2431   // from a trailing volatile membar to a preceding card mark volatile
2432   // membar i.e. it identifies whether one of the three possible extra
2433   // GC post-write Mem flow subgraphs is present
2434   //
2435   // this predicate checks for the same flow as the previous predicate
2436   // but starting from the bottom rather than the top.
2437   //
2438   // if the configuration is present returns the card mark membar
2439   // otherwise NULL
2440   //
2441   // n.b. the supplied membar is expected to be a trailing
2442   // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
2443   // input node has the correct opcode
2444 
2445   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2446   {
2447     assert(trailing->Opcode() == Op_MemBarVolatile ||
2448            trailing->Opcode() == Op_MemBarAcquire,
2449            "expecting a volatile or acquire membar");
2450     assert(!is_card_mark_membar(trailing),
2451            "not expecting a card mark membar");
2452 
2453     Node *x = (Node *)trailing;
2454 
2455     // look for a preceding cpu order membar
2456     MemBarNode *y = parent_membar(x->as_MemBar());
2457     if (y != NULL) {
2458       // make sure it is a cpu order membar
2459       if (y->Opcode() != Op_MemBarCPUOrder) {
2460         // this is nto the graph we were looking for
2461         return NULL;
2462       }
2463       // start the search from here
2464       x = y;
2465     }
2466 
2467     // the Mem feed to the membar should be a merge
2468     x = x->in(TypeFunc::Memory);
2469     if (!x->is_MergeMem()) {
2470       return NULL;
2471     }
2472 
2473     MergeMemNode *mm = x->as_MergeMem();
2474 
2475     x = mm->in(Compile::AliasIdxBot);
2476     // with G1 we may possibly see a Phi or two before we see a Memory
2477     // Proj from the card mark membar
2478 
2479     const int MAX_PHIS = max_phis(); // max phis we will search through
2480     int phicount = 0;                    // current search count
2481 
2482     bool retry_feed = !x->is_Proj();
2483 
2484     while (retry_feed) {
2485       if (x->is_Phi() && phicount++ < MAX_PHIS) {
2486         PhiNode *phi = x->as_Phi();
2487         ProjNode *proj = NULL;
2488         PhiNode *nextphi = NULL;
2489         bool found_leading = false;
2490         for (uint i = 1; i < phi->req(); i++) {
2491           x = phi->in(i);
2492           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2493             nextphi = x->as_Phi();
2494           } else if (x->is_Proj()) {
2495             int opcode = x->in(0)->Opcode();
2496             if (opcode == Op_MemBarVolatile) {
2497               proj = x->as_Proj();
2498             } else if (opcode == Op_MemBarRelease ||
2499                        opcode == Op_MemBarCPUOrder) {
2500               // probably a leading membar
2501               found_leading = true;
2502             }
2503           }
2504         }
2505         // if we found a correct looking proj then retry from there
2506         // otherwise we must see a leading and a phi or this the
2507         // wrong config
2508         if (proj != NULL) {
2509           x = proj;
2510           retry_feed = false;
2511         } else if (found_leading && nextphi != NULL) {
2512           // retry from this phi to check phi2
2513           x = nextphi;
2514         } else {
2515           // not what we were looking for
2516           return NULL;
2517         }
2518       } else {
2519         return NULL;
2520       }
2521     }
2522     // the proj has to come from the card mark membar
2523     x = x->in(0);
2524     if (!x->is_MemBar()) {
2525       return NULL;
2526     }
2527 
2528     MemBarNode *card_mark_membar = x->as_MemBar();
2529 
2530     if (!is_card_mark_membar(card_mark_membar)) {
2531       return NULL;
2532     }
2533 
2534     return card_mark_membar;
2535   }
2536 
2537   // trailing_to_leading
2538   //
2539   // graph traversal helper which checks the Mem flow up the graph
2540   // from a (non-card mark) trailing membar attempting to locate and
2541   // return an associated leading membar. it first looks for a
2542   // subgraph in the normal configuration (relying on helper
2543   // normal_to_leading). failing that it then looks for one of the
2544   // possible post-write card mark subgraphs linking the trailing node
2545   // to a the card mark membar (relying on helper
2546   // trailing_to_card_mark), and then checks that the card mark membar
2547   // is fed by a leading membar (once again relying on auxiliary
2548   // predicate normal_to_leading).
2549   //
2550   // if the configuration is valid returns the cpuorder member for
2551   // preference or when absent the release membar otherwise NULL.
2552   //
2553   // n.b. the input membar is expected to be either a volatile or
2554   // acquire membar but in the former case must *not* be a card mark
2555   // membar.
2556 
2557   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2558   {
2559     assert((trailing->Opcode() == Op_MemBarAcquire ||
2560             trailing->Opcode() == Op_MemBarVolatile),
2561            "expecting an acquire or volatile membar");
2562     assert((trailing->Opcode() != Op_MemBarVolatile ||
2563             !is_card_mark_membar(trailing)),
2564            "not expecting a card mark membar");
2565 
2566     MemBarNode *leading = normal_to_leading(trailing);
2567 
2568     if (leading) {
2569       return leading;
2570     }
2571 
2572     // there is no normal path from trailing to leading membar. see if
2573     // we can arrive via a card mark membar
2574 
2575     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2576 
2577     if (!card_mark_membar) {
2578       return NULL;
2579     }
2580 
2581     return normal_to_leading(card_mark_membar);
2582   }
2583 
2584   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2585 
2586 bool unnecessary_acquire(const Node *barrier)
2587 {
2588   assert(barrier->is_MemBar(), "expecting a membar");
2589 
2590   if (UseBarriersForVolatile) {
2591     // we need to plant a dmb
2592     return false;
2593   }
2594 
2595   // a volatile read derived from bytecode (or also from an inlined
2596   // SHA field read via LibraryCallKit::load_field_from_object)
2597   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2598   // with a bogus read dependency on it's preceding load. so in those
2599   // cases we will find the load node at the PARMS offset of the
2600   // acquire membar.  n.b. there may be an intervening DecodeN node.
2601 
2602   Node *x = barrier->lookup(TypeFunc::Parms);
2603   if (x) {
2604     // we are starting from an acquire and it has a fake dependency
2605     //
2606     // need to check for
2607     //
2608     //   LoadX[mo_acquire]
2609     //   {  |1   }
2610     //   {DecodeN}
2611     //      |Parms
2612     //   MemBarAcquire*
2613     //
2614     // where * tags node we were passed
2615     // and |k means input k
2616     if (x->is_DecodeNarrowPtr()) {
2617       x = x->in(1);
2618     }
2619 
2620     return (x->is_Load() && x->as_Load()->is_acquire());
2621   }
2622 
2623   // other option for unnecessary membar is that it is a trailing node
2624   // belonging to a CAS
2625 
2626   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2627 
2628   return leading != NULL;
2629 }
2630 
2631 bool needs_acquiring_load(const Node *n)
2632 {
2633   assert(n->is_Load(), "expecting a load");
2634   if (UseBarriersForVolatile) {
2635     // we use a normal load and a dmb
2636     return false;
2637   }
2638 
2639   LoadNode *ld = n->as_Load();
2640 
2641   if (!ld->is_acquire()) {
2642     return false;
2643   }
2644 
2645   // check if this load is feeding an acquire membar
2646   //
2647   //   LoadX[mo_acquire]
2648   //   {  |1   }
2649   //   {DecodeN}
2650   //      |Parms
2651   //   MemBarAcquire*
2652   //
2653   // where * tags node we were passed
2654   // and |k means input k
2655 
2656   Node *start = ld;
2657   Node *mbacq = NULL;
2658 
2659   // if we hit a DecodeNarrowPtr we reset the start node and restart
2660   // the search through the outputs
2661  restart:
2662 
2663   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2664     Node *x = start->fast_out(i);
2665     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2666       mbacq = x;
2667     } else if (!mbacq &&
2668                (x->is_DecodeNarrowPtr() ||
2669                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2670       start = x;
2671       goto restart;
2672     }
2673   }
2674 
2675   if (mbacq) {
2676     return true;
2677   }
2678 
2679   return false;
2680 }
2681 
2682 bool unnecessary_release(const Node *n)
2683 {
2684   assert((n->is_MemBar() &&
2685           n->Opcode() == Op_MemBarRelease),
2686          "expecting a release membar");
2687 
2688   if (UseBarriersForVolatile) {
2689     // we need to plant a dmb
2690     return false;
2691   }
2692 
2693   // if there is a dependent CPUOrder barrier then use that as the
2694   // leading
2695 
2696   MemBarNode *barrier = n->as_MemBar();
2697   // check for an intervening cpuorder membar
2698   MemBarNode *b = child_membar(barrier);
2699   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2700     // ok, so start the check from the dependent cpuorder barrier
2701     barrier = b;
2702   }
2703 
2704   // must start with a normal feed
2705   MemBarNode *child_barrier = leading_to_normal(barrier);
2706 
2707   if (!child_barrier) {
2708     return false;
2709   }
2710 
2711   if (!is_card_mark_membar(child_barrier)) {
2712     // this is the trailing membar and we are done
2713     return true;
2714   }
2715 
2716   // must be sure this card mark feeds a trailing membar
2717   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2718   return (trailing != NULL);
2719 }
2720 
2721 bool unnecessary_volatile(const Node *n)
2722 {
2723   // assert n->is_MemBar();
2724   if (UseBarriersForVolatile) {
2725     // we need to plant a dmb
2726     return false;
2727   }
2728 
2729   MemBarNode *mbvol = n->as_MemBar();
2730 
2731   // first we check if this is part of a card mark. if so then we have
2732   // to generate a StoreLoad barrier
2733 
2734   if (is_card_mark_membar(mbvol)) {
2735       return false;
2736   }
2737 
2738   // ok, if it's not a card mark then we still need to check if it is
2739   // a trailing membar of a volatile put graph.
2740 
2741   return (trailing_to_leading(mbvol) != NULL);
2742 }
2743 
2744 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2745 
2746 bool needs_releasing_store(const Node *n)
2747 {
2748   // assert n->is_Store();
2749   if (UseBarriersForVolatile) {
2750     // we use a normal store and dmb combination
2751     return false;
2752   }
2753 
2754   StoreNode *st = n->as_Store();
2755 
2756   // the store must be marked as releasing
2757   if (!st->is_release()) {
2758     return false;
2759   }
2760 
2761   // the store must be fed by a membar
2762 
2763   Node *x = st->lookup(StoreNode::Memory);
2764 
2765   if (! x || !x->is_Proj()) {
2766     return false;
2767   }
2768 
2769   ProjNode *proj = x->as_Proj();
2770 
2771   x = proj->lookup(0);
2772 
2773   if (!x || !x->is_MemBar()) {
2774     return false;
2775   }
2776 
2777   MemBarNode *barrier = x->as_MemBar();
2778 
2779   // if the barrier is a release membar or a cpuorder mmebar fed by a
2780   // release membar then we need to check whether that forms part of a
2781   // volatile put graph.
2782 
2783   // reject invalid candidates
2784   if (!leading_membar(barrier)) {
2785     return false;
2786   }
2787 
2788   // does this lead a normal subgraph?
2789   MemBarNode *mbvol = leading_to_normal(barrier);
2790 
2791   if (!mbvol) {
2792     return false;
2793   }
2794 
2795   // all done unless this is a card mark
2796   if (!is_card_mark_membar(mbvol)) {
2797     return true;
2798   }
2799 
2800   // we found a card mark -- just make sure we have a trailing barrier
2801 
2802   return (card_mark_to_trailing(mbvol) != NULL);
2803 }
2804 
2805 // predicate controlling translation of CAS
2806 //
2807 // returns true if CAS needs to use an acquiring load otherwise false
2808 
2809 bool needs_acquiring_load_exclusive(const Node *n)
2810 {
2811   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2812   if (UseBarriersForVolatile) {
2813     return false;
2814   }
2815 
2816   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2817 #ifdef ASSERT
2818   LoadStoreNode *st = n->as_LoadStore();
2819 
2820   // the store must be fed by a membar
2821 
2822   Node *x = st->lookup(StoreNode::Memory);
2823 
2824   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2825 
2826   ProjNode *proj = x->as_Proj();
2827 
2828   x = proj->lookup(0);
2829 
2830   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2831 
2832   MemBarNode *barrier = x->as_MemBar();
2833 
2834   // the barrier must be a cpuorder mmebar fed by a release membar
2835 
2836   guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
2837             "CAS not fed by cpuorder membar!");
2838 
2839   MemBarNode *b = parent_membar(barrier);
2840   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2841           "CAS not fed by cpuorder+release membar pair!");
2842 
2843   // does this lead a normal subgraph?
2844   MemBarNode *mbar = leading_to_normal(barrier);
2845 
2846   guarantee(mbar != NULL, "CAS not embedded in normal graph!");
2847 
2848   // if this is a card mark membar check we have a trailing acquire
2849 
2850   if (is_card_mark_membar(mbar)) {
2851     mbar = card_mark_to_trailing(mbar);
2852   }
2853 
2854   guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
2855 
2856   guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2857 #endif // ASSERT
2858   // so we can just return true here
2859   return true;
2860 }
2861 
2862 // predicate controlling translation of StoreCM
2863 //
2864 // returns true if a StoreStore must precede the card write otherwise
2865 // false
2866 
2867 bool unnecessary_storestore(const Node *storecm)
2868 {
2869   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2870 
2871   // we only ever need to generate a dmb ishst between an object put
2872   // and the associated card mark when we are using CMS without
2873   // conditional card marking
2874 
2875   if (!UseConcMarkSweepGC || UseCondCardMark) {
2876     return true;
2877   }
2878 
2879   // if we are implementing volatile puts using barriers then the
2880   // object put is an str so we must insert the dmb ishst
2881 
2882   if (UseBarriersForVolatile) {
2883     return false;
2884   }
2885 
2886   // we can omit the dmb ishst if this StoreCM is part of a volatile
2887   // put because in thta case the put will be implemented by stlr
2888   //
2889   // we need to check for a normal subgraph feeding this StoreCM.
2890   // that means the StoreCM must be fed Memory from a leading membar,
2891   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2892   // leading membar must be part of a normal subgraph
2893 
2894   Node *x = storecm->in(StoreNode::Memory);
2895 
2896   if (!x->is_Proj()) {
2897     return false;
2898   }
2899 
2900   x = x->in(0);
2901 
2902   if (!x->is_MemBar()) {
2903     return false;
2904   }
2905 
2906   MemBarNode *leading = x->as_MemBar();
2907 
2908   // reject invalid candidates
2909   if (!leading_membar(leading)) {
2910     return false;
2911   }
2912 
2913   // we can omit the StoreStore if it is the head of a normal subgraph
2914   return (leading_to_normal(leading) != NULL);
2915 }
2916 
2917 
2918 #define __ _masm.
2919 
2920 // advance declarations for helper functions to convert register
2921 // indices to register objects
2922 
2923 // the ad file has to provide implementations of certain methods
2924 // expected by the generic code
2925 //
2926 // REQUIRED FUNCTIONALITY
2927 
2928 //=============================================================================
2929 
2930 // !!!!! Special hack to get all types of calls to specify the byte offset
2931 //       from the start of the call to the point where the return address
2932 //       will point.
2933 
2934 int MachCallStaticJavaNode::ret_addr_offset()
2935 {
2936   // call should be a simple bl
2937   int off = 4;
2938   return off;
2939 }
2940 
2941 int MachCallDynamicJavaNode::ret_addr_offset()
2942 {
2943   return 16; // movz, movk, movk, bl
2944 }
2945 
2946 int MachCallRuntimeNode::ret_addr_offset() {
2947   // for generated stubs the call will be
2948   //   far_call(addr)
2949   // for real runtime callouts it will be six instructions
2950   // see aarch64_enc_java_to_runtime
2951   //   adr(rscratch2, retaddr)
2952   //   lea(rscratch1, RuntimeAddress(addr)
2953   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2954   //   blrt rscratch1
2955   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2956   if (cb) {
2957     return MacroAssembler::far_branch_size();
2958   } else {
2959     return 6 * NativeInstruction::instruction_size;
2960   }
2961 }
2962 
2963 // Indicate if the safepoint node needs the polling page as an input
2964 
2965 // the shared code plants the oop data at the start of the generated
2966 // code for the safepoint node and that needs ot be at the load
2967 // instruction itself. so we cannot plant a mov of the safepoint poll
2968 // address followed by a load. setting this to true means the mov is
2969 // scheduled as a prior instruction. that's better for scheduling
2970 // anyway.
2971 
2972 bool SafePointNode::needs_polling_address_input()
2973 {
2974   return true;
2975 }
2976 
2977 //=============================================================================
2978 
2979 #ifndef PRODUCT
2980 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2981   st->print("BREAKPOINT");
2982 }
2983 #endif
2984 
2985 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2986   MacroAssembler _masm(&cbuf);
2987   __ brk(0);
2988 }
2989 
2990 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2991   return MachNode::size(ra_);
2992 }
2993 
2994 //=============================================================================
2995 
2996 #ifndef PRODUCT
2997   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2998     st->print("nop \t# %d bytes pad for loops and calls", _count);
2999   }
3000 #endif
3001 
3002   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
3003     MacroAssembler _masm(&cbuf);
3004     for (int i = 0; i < _count; i++) {
3005       __ nop();
3006     }
3007   }
3008 
3009   uint MachNopNode::size(PhaseRegAlloc*) const {
3010     return _count * NativeInstruction::instruction_size;
3011   }
3012 
3013 //=============================================================================
3014 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
3015 
3016 int Compile::ConstantTable::calculate_table_base_offset() const {
3017   return 0;  // absolute addressing, no offset
3018 }
3019 
3020 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
3021 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
3022   ShouldNotReachHere();
3023 }
3024 
3025 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
3026   // Empty encoding
3027 }
3028 
3029 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
3030   return 0;
3031 }
3032 
3033 #ifndef PRODUCT
3034 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3035   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3036 }
3037 #endif
3038 
3039 #ifndef PRODUCT
3040 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3041   Compile* C = ra_->C;
3042 
3043   int framesize = C->frame_slots() << LogBytesPerInt;
3044 
3045   if (C->need_stack_bang(framesize))
3046     st->print("# stack bang size=%d\n\t", framesize);
3047 
3048   if (framesize < ((1 << 9) + 2 * wordSize)) {
3049     st->print("sub  sp, sp, #%d\n\t", framesize);
3050     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3051     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3052   } else {
3053     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3054     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3055     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3056     st->print("sub  sp, sp, rscratch1");
3057   }
3058 }
3059 #endif
3060 
3061 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3062   Compile* C = ra_->C;
3063   MacroAssembler _masm(&cbuf);
3064 
3065   // n.b. frame size includes space for return pc and rfp
3066   const long framesize = C->frame_size_in_bytes();
3067   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3068 
3069   // insert a nop at the start of the prolog so we can patch in a
3070   // branch if we need to invalidate the method later
3071   __ nop();
3072 
3073   int bangsize = C->bang_size_in_bytes();
3074   if (C->need_stack_bang(bangsize) && UseStackBanging)
3075     __ generate_stack_overflow_check(bangsize);
3076 
3077   __ build_frame(framesize);
3078 
3079   if (NotifySimulator) {
3080     __ notify(Assembler::method_entry);
3081   }
3082 
3083   if (VerifyStackAtCalls) {
3084     Unimplemented();
3085   }
3086 
3087   C->set_frame_complete(cbuf.insts_size());
3088 
3089   if (C->has_mach_constant_base_node()) {
3090     // NOTE: We set the table base offset here because users might be
3091     // emitted before MachConstantBaseNode.
3092     Compile::ConstantTable& constant_table = C->constant_table();
3093     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3094   }
3095 }
3096 
3097 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3098 {
3099   return MachNode::size(ra_); // too many variables; just compute it
3100                               // the hard way
3101 }
3102 
3103 int MachPrologNode::reloc() const
3104 {
3105   return 0;
3106 }
3107 
3108 //=============================================================================
3109 
3110 #ifndef PRODUCT
3111 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3112   Compile* C = ra_->C;
3113   int framesize = C->frame_slots() << LogBytesPerInt;
3114 
3115   st->print("# pop frame %d\n\t",framesize);
3116 
3117   if (framesize == 0) {
3118     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3119   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3120     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3121     st->print("add  sp, sp, #%d\n\t", framesize);
3122   } else {
3123     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3124     st->print("add  sp, sp, rscratch1\n\t");
3125     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3126   }
3127 
3128   if (do_polling() && C->is_method_compilation()) {
3129     st->print("# touch polling page\n\t");
3130     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3131     st->print("ldr zr, [rscratch1]");
3132   }
3133 }
3134 #endif
3135 
3136 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3137   Compile* C = ra_->C;
3138   MacroAssembler _masm(&cbuf);
3139   int framesize = C->frame_slots() << LogBytesPerInt;
3140 
3141   __ remove_frame(framesize);
3142 
3143   if (NotifySimulator) {
3144     __ notify(Assembler::method_reentry);
3145   }
3146 
3147   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3148     __ reserved_stack_check();
3149   }
3150 
3151   if (do_polling() && C->is_method_compilation()) {
3152     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3153   }
3154 }
3155 
3156 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3157   // Variable size. Determine dynamically.
3158   return MachNode::size(ra_);
3159 }
3160 
3161 int MachEpilogNode::reloc() const {
3162   // Return number of relocatable values contained in this instruction.
3163   return 1; // 1 for polling page.
3164 }
3165 
3166 const Pipeline * MachEpilogNode::pipeline() const {
3167   return MachNode::pipeline_class();
3168 }
3169 
3170 // This method seems to be obsolete. It is declared in machnode.hpp
3171 // and defined in all *.ad files, but it is never called. Should we
3172 // get rid of it?
3173 int MachEpilogNode::safepoint_offset() const {
3174   assert(do_polling(), "no return for this epilog node");
3175   return 4;
3176 }
3177 
3178 //=============================================================================
3179 
3180 // Figure out which register class each belongs in: rc_int, rc_float or
3181 // rc_stack.
3182 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3183 
3184 static enum RC rc_class(OptoReg::Name reg) {
3185 
3186   if (reg == OptoReg::Bad) {
3187     return rc_bad;
3188   }
3189 
3190   // we have 30 int registers * 2 halves
3191   // (rscratch1 and rscratch2 are omitted)
3192 
3193   if (reg < 60) {
3194     return rc_int;
3195   }
3196 
3197   // we have 32 float register * 2 halves
3198   if (reg < 60 + 128) {
3199     return rc_float;
3200   }
3201 
3202   // Between float regs & stack is the flags regs.
3203   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3204 
3205   return rc_stack;
3206 }
3207 
3208 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3209   Compile* C = ra_->C;
3210 
3211   // Get registers to move.
3212   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3213   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3214   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3215   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3216 
3217   enum RC src_hi_rc = rc_class(src_hi);
3218   enum RC src_lo_rc = rc_class(src_lo);
3219   enum RC dst_hi_rc = rc_class(dst_hi);
3220   enum RC dst_lo_rc = rc_class(dst_lo);
3221 
3222   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3223 
3224   if (src_hi != OptoReg::Bad) {
3225     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3226            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3227            "expected aligned-adjacent pairs");
3228   }
3229 
3230   if (src_lo == dst_lo && src_hi == dst_hi) {
3231     return 0;            // Self copy, no move.
3232   }
3233 
3234   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3235               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3236   int src_offset = ra_->reg2offset(src_lo);
3237   int dst_offset = ra_->reg2offset(dst_lo);
3238 
3239   if (bottom_type()->isa_vect() != NULL) {
3240     uint ireg = ideal_reg();
3241     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3242     if (cbuf) {
3243       MacroAssembler _masm(cbuf);
3244       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3245       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3246         // stack->stack
3247         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3248         if (ireg == Op_VecD) {
3249           __ unspill(rscratch1, true, src_offset);
3250           __ spill(rscratch1, true, dst_offset);
3251         } else {
3252           __ spill_copy128(src_offset, dst_offset);
3253         }
3254       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3255         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3256                ireg == Op_VecD ? __ T8B : __ T16B,
3257                as_FloatRegister(Matcher::_regEncode[src_lo]));
3258       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3259         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3260                        ireg == Op_VecD ? __ D : __ Q,
3261                        ra_->reg2offset(dst_lo));
3262       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3263         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3264                        ireg == Op_VecD ? __ D : __ Q,
3265                        ra_->reg2offset(src_lo));
3266       } else {
3267         ShouldNotReachHere();
3268       }
3269     }
3270   } else if (cbuf) {
3271     MacroAssembler _masm(cbuf);
3272     switch (src_lo_rc) {
3273     case rc_int:
3274       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3275         if (is64) {
3276             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3277                    as_Register(Matcher::_regEncode[src_lo]));
3278         } else {
3279             MacroAssembler _masm(cbuf);
3280             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3281                     as_Register(Matcher::_regEncode[src_lo]));
3282         }
3283       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3284         if (is64) {
3285             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3286                      as_Register(Matcher::_regEncode[src_lo]));
3287         } else {
3288             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3289                      as_Register(Matcher::_regEncode[src_lo]));
3290         }
3291       } else {                    // gpr --> stack spill
3292         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3293         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3294       }
3295       break;
3296     case rc_float:
3297       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3298         if (is64) {
3299             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3300                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3301         } else {
3302             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3303                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3304         }
3305       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3306           if (cbuf) {
3307             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3308                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3309         } else {
3310             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3311                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3312         }
3313       } else {                    // fpr --> stack spill
3314         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3315         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3316                  is64 ? __ D : __ S, dst_offset);
3317       }
3318       break;
3319     case rc_stack:
3320       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3321         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3322       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3323         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3324                    is64 ? __ D : __ S, src_offset);
3325       } else {                    // stack --> stack copy
3326         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3327         __ unspill(rscratch1, is64, src_offset);
3328         __ spill(rscratch1, is64, dst_offset);
3329       }
3330       break;
3331     default:
3332       assert(false, "bad rc_class for spill");
3333       ShouldNotReachHere();
3334     }
3335   }
3336 
3337   if (st) {
3338     st->print("spill ");
3339     if (src_lo_rc == rc_stack) {
3340       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3341     } else {
3342       st->print("%s -> ", Matcher::regName[src_lo]);
3343     }
3344     if (dst_lo_rc == rc_stack) {
3345       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3346     } else {
3347       st->print("%s", Matcher::regName[dst_lo]);
3348     }
3349     if (bottom_type()->isa_vect() != NULL) {
3350       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3351     } else {
3352       st->print("\t# spill size = %d", is64 ? 64:32);
3353     }
3354   }
3355 
3356   return 0;
3357 
3358 }
3359 
3360 #ifndef PRODUCT
3361 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3362   if (!ra_)
3363     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3364   else
3365     implementation(NULL, ra_, false, st);
3366 }
3367 #endif
3368 
3369 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3370   implementation(&cbuf, ra_, false, NULL);
3371 }
3372 
3373 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3374   return MachNode::size(ra_);
3375 }
3376 
3377 //=============================================================================
3378 
3379 #ifndef PRODUCT
3380 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3381   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3382   int reg = ra_->get_reg_first(this);
3383   st->print("add %s, rsp, #%d]\t# box lock",
3384             Matcher::regName[reg], offset);
3385 }
3386 #endif
3387 
3388 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3389   MacroAssembler _masm(&cbuf);
3390 
3391   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3392   int reg    = ra_->get_encode(this);
3393 
3394   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3395     __ add(as_Register(reg), sp, offset);
3396   } else {
3397     ShouldNotReachHere();
3398   }
3399 }
3400 
3401 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3402   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3403   return 4;
3404 }
3405 
3406 //=============================================================================
3407 
3408 #ifndef PRODUCT
3409 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3410 {
3411   st->print_cr("# MachUEPNode");
3412   if (UseCompressedClassPointers) {
3413     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3414     if (Universe::narrow_klass_shift() != 0) {
3415       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3416     }
3417   } else {
3418    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3419   }
3420   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3421   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3422 }
3423 #endif
3424 
3425 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3426 {
3427   // This is the unverified entry point.
3428   MacroAssembler _masm(&cbuf);
3429 
3430   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3431   Label skip;
3432   // TODO
3433   // can we avoid this skip and still use a reloc?
3434   __ br(Assembler::EQ, skip);
3435   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3436   __ bind(skip);
3437 }
3438 
3439 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3440 {
3441   return MachNode::size(ra_);
3442 }
3443 
3444 // REQUIRED EMIT CODE
3445 
3446 //=============================================================================
3447 
3448 // Emit exception handler code.
3449 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3450 {
3451   // mov rscratch1 #exception_blob_entry_point
3452   // br rscratch1
3453   // Note that the code buffer's insts_mark is always relative to insts.
3454   // That's why we must use the macroassembler to generate a handler.
3455   MacroAssembler _masm(&cbuf);
3456   address base = __ start_a_stub(size_exception_handler());
3457   if (base == NULL) {
3458     ciEnv::current()->record_failure("CodeCache is full");
3459     return 0;  // CodeBuffer::expand failed
3460   }
3461   int offset = __ offset();
3462   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3463   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3464   __ end_a_stub();
3465   return offset;
3466 }
3467 
3468 // Emit deopt handler code.
3469 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3470 {
3471   // Note that the code buffer's insts_mark is always relative to insts.
3472   // That's why we must use the macroassembler to generate a handler.
3473   MacroAssembler _masm(&cbuf);
3474   address base = __ start_a_stub(size_deopt_handler());
3475   if (base == NULL) {
3476     ciEnv::current()->record_failure("CodeCache is full");
3477     return 0;  // CodeBuffer::expand failed
3478   }
3479   int offset = __ offset();
3480 
3481   __ adr(lr, __ pc());
3482   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3483 
3484   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3485   __ end_a_stub();
3486   return offset;
3487 }
3488 
3489 // REQUIRED MATCHER CODE
3490 
3491 //=============================================================================
3492 
3493 const bool Matcher::match_rule_supported(int opcode) {
3494 
3495   switch (opcode) {
3496   default:
3497     break;
3498   }
3499 
3500   if (!has_match_rule(opcode)) {
3501     return false;
3502   }
3503 
3504   return true;  // Per default match rules are supported.
3505 }
3506 
3507 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3508 
3509   // TODO
3510   // identify extra cases that we might want to provide match rules for
3511   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3512   bool ret_value = match_rule_supported(opcode);
3513   // Add rules here.
3514 
3515   return ret_value;  // Per default match rules are supported.
3516 }
3517 
3518 const bool Matcher::has_predicated_vectors(void) {
3519   return false;
3520 }
3521 
3522 const int Matcher::float_pressure(int default_pressure_threshold) {
3523   return default_pressure_threshold;
3524 }
3525 
3526 int Matcher::regnum_to_fpu_offset(int regnum)
3527 {
3528   Unimplemented();
3529   return 0;
3530 }
3531 
3532 // Is this branch offset short enough that a short branch can be used?
3533 //
3534 // NOTE: If the platform does not provide any short branch variants, then
3535 //       this method should return false for offset 0.
3536 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3537   // The passed offset is relative to address of the branch.
3538 
3539   return (-32768 <= offset && offset < 32768);
3540 }
3541 
3542 const bool Matcher::isSimpleConstant64(jlong value) {
3543   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3544   // Probably always true, even if a temp register is required.
3545   return true;
3546 }
3547 
3548 // true just means we have fast l2f conversion
3549 const bool Matcher::convL2FSupported(void) {
3550   return true;
3551 }
3552 
3553 // Vector width in bytes.
3554 const int Matcher::vector_width_in_bytes(BasicType bt) {
3555   int size = MIN2(16,(int)MaxVectorSize);
3556   // Minimum 2 values in vector
3557   if (size < 2*type2aelembytes(bt)) size = 0;
3558   // But never < 4
3559   if (size < 4) size = 0;
3560   return size;
3561 }
3562 
3563 // Limits on vector size (number of elements) loaded into vector.
3564 const int Matcher::max_vector_size(const BasicType bt) {
3565   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3566 }
3567 const int Matcher::min_vector_size(const BasicType bt) {
3568 //  For the moment limit the vector size to 8 bytes
3569     int size = 8 / type2aelembytes(bt);
3570     if (size < 2) size = 2;
3571     return size;
3572 }
3573 
3574 // Vector ideal reg.
3575 const uint Matcher::vector_ideal_reg(int len) {
3576   switch(len) {
3577     case  8: return Op_VecD;
3578     case 16: return Op_VecX;
3579   }
3580   ShouldNotReachHere();
3581   return 0;
3582 }
3583 
3584 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3585   return Op_VecX;
3586 }
3587 
3588 // AES support not yet implemented
3589 const bool Matcher::pass_original_key_for_aes() {
3590   return false;
3591 }
3592 
3593 // x86 supports misaligned vectors store/load.
3594 const bool Matcher::misaligned_vectors_ok() {
3595   return !AlignVector; // can be changed by flag
3596 }
3597 
3598 // false => size gets scaled to BytesPerLong, ok.
3599 const bool Matcher::init_array_count_is_in_bytes = false;
3600 
3601 // Use conditional move (CMOVL)
3602 const int Matcher::long_cmove_cost() {
3603   // long cmoves are no more expensive than int cmoves
3604   return 0;
3605 }
3606 
3607 const int Matcher::float_cmove_cost() {
3608   // float cmoves are no more expensive than int cmoves
3609   return 0;
3610 }
3611 
3612 // Does the CPU require late expand (see block.cpp for description of late expand)?
3613 const bool Matcher::require_postalloc_expand = false;
3614 
3615 // Do we need to mask the count passed to shift instructions or does
3616 // the cpu only look at the lower 5/6 bits anyway?
3617 const bool Matcher::need_masked_shift_count = false;
3618 
3619 // This affects two different things:
3620 //  - how Decode nodes are matched
3621 //  - how ImplicitNullCheck opportunities are recognized
3622 // If true, the matcher will try to remove all Decodes and match them
3623 // (as operands) into nodes. NullChecks are not prepared to deal with
3624 // Decodes by final_graph_reshaping().
3625 // If false, final_graph_reshaping() forces the decode behind the Cmp
3626 // for a NullCheck. The matcher matches the Decode node into a register.
3627 // Implicit_null_check optimization moves the Decode along with the
3628 // memory operation back up before the NullCheck.
3629 bool Matcher::narrow_oop_use_complex_address() {
3630   return Universe::narrow_oop_shift() == 0;
3631 }
3632 
3633 bool Matcher::narrow_klass_use_complex_address() {
3634 // TODO
3635 // decide whether we need to set this to true
3636   return false;
3637 }
3638 
3639 bool Matcher::const_oop_prefer_decode() {
3640   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3641   return Universe::narrow_oop_base() == NULL;
3642 }
3643 
3644 bool Matcher::const_klass_prefer_decode() {
3645   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3646   return Universe::narrow_klass_base() == NULL;
3647 }
3648 
3649 // Is it better to copy float constants, or load them directly from
3650 // memory?  Intel can load a float constant from a direct address,
3651 // requiring no extra registers.  Most RISCs will have to materialize
3652 // an address into a register first, so they would do better to copy
3653 // the constant from stack.
3654 const bool Matcher::rematerialize_float_constants = false;
3655 
3656 // If CPU can load and store mis-aligned doubles directly then no
3657 // fixup is needed.  Else we split the double into 2 integer pieces
3658 // and move it piece-by-piece.  Only happens when passing doubles into
3659 // C code as the Java calling convention forces doubles to be aligned.
3660 const bool Matcher::misaligned_doubles_ok = true;
3661 
3662 // No-op on amd64
3663 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3664   Unimplemented();
3665 }
3666 
3667 // Advertise here if the CPU requires explicit rounding operations to
3668 // implement the UseStrictFP mode.
3669 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3670 
3671 // Are floats converted to double when stored to stack during
3672 // deoptimization?
3673 bool Matcher::float_in_double() { return false; }
3674 
3675 // Do ints take an entire long register or just half?
3676 // The relevant question is how the int is callee-saved:
3677 // the whole long is written but de-opt'ing will have to extract
3678 // the relevant 32 bits.
3679 const bool Matcher::int_in_long = true;
3680 
3681 // Return whether or not this register is ever used as an argument.
3682 // This function is used on startup to build the trampoline stubs in
3683 // generateOptoStub.  Registers not mentioned will be killed by the VM
3684 // call in the trampoline, and arguments in those registers not be
3685 // available to the callee.
3686 bool Matcher::can_be_java_arg(int reg)
3687 {
3688   return
3689     reg ==  R0_num || reg == R0_H_num ||
3690     reg ==  R1_num || reg == R1_H_num ||
3691     reg ==  R2_num || reg == R2_H_num ||
3692     reg ==  R3_num || reg == R3_H_num ||
3693     reg ==  R4_num || reg == R4_H_num ||
3694     reg ==  R5_num || reg == R5_H_num ||
3695     reg ==  R6_num || reg == R6_H_num ||
3696     reg ==  R7_num || reg == R7_H_num ||
3697     reg ==  V0_num || reg == V0_H_num ||
3698     reg ==  V1_num || reg == V1_H_num ||
3699     reg ==  V2_num || reg == V2_H_num ||
3700     reg ==  V3_num || reg == V3_H_num ||
3701     reg ==  V4_num || reg == V4_H_num ||
3702     reg ==  V5_num || reg == V5_H_num ||
3703     reg ==  V6_num || reg == V6_H_num ||
3704     reg ==  V7_num || reg == V7_H_num;
3705 }
3706 
3707 bool Matcher::is_spillable_arg(int reg)
3708 {
3709   return can_be_java_arg(reg);
3710 }
3711 
3712 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3713   return false;
3714 }
3715 
3716 RegMask Matcher::divI_proj_mask() {
3717   ShouldNotReachHere();
3718   return RegMask();
3719 }
3720 
3721 // Register for MODI projection of divmodI.
3722 RegMask Matcher::modI_proj_mask() {
3723   ShouldNotReachHere();
3724   return RegMask();
3725 }
3726 
3727 // Register for DIVL projection of divmodL.
3728 RegMask Matcher::divL_proj_mask() {
3729   ShouldNotReachHere();
3730   return RegMask();
3731 }
3732 
3733 // Register for MODL projection of divmodL.
3734 RegMask Matcher::modL_proj_mask() {
3735   ShouldNotReachHere();
3736   return RegMask();
3737 }
3738 
3739 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3740   return FP_REG_mask();
3741 }
3742 
3743 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3744   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3745     Node* u = addp->fast_out(i);
3746     if (u->is_Mem()) {
3747       int opsize = u->as_Mem()->memory_size();
3748       assert(opsize > 0, "unexpected memory operand size");
3749       if (u->as_Mem()->memory_size() != (1<<shift)) {
3750         return false;
3751       }
3752     }
3753   }
3754   return true;
3755 }
3756 
3757 const bool Matcher::convi2l_type_required = false;
3758 
3759 // Should the Matcher clone shifts on addressing modes, expecting them
3760 // to be subsumed into complex addressing expressions or compute them
3761 // into registers?
3762 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3763   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3764     return true;
3765   }
3766 
3767   Node *off = m->in(AddPNode::Offset);
3768   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3769       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3770       // Are there other uses besides address expressions?
3771       !is_visited(off)) {
3772     address_visited.set(off->_idx); // Flag as address_visited
3773     mstack.push(off->in(2), Visit);
3774     Node *conv = off->in(1);
3775     if (conv->Opcode() == Op_ConvI2L &&
3776         // Are there other uses besides address expressions?
3777         !is_visited(conv)) {
3778       address_visited.set(conv->_idx); // Flag as address_visited
3779       mstack.push(conv->in(1), Pre_Visit);
3780     } else {
3781       mstack.push(conv, Pre_Visit);
3782     }
3783     address_visited.test_set(m->_idx); // Flag as address_visited
3784     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3785     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3786     return true;
3787   } else if (off->Opcode() == Op_ConvI2L &&
3788              // Are there other uses besides address expressions?
3789              !is_visited(off)) {
3790     address_visited.test_set(m->_idx); // Flag as address_visited
3791     address_visited.set(off->_idx); // Flag as address_visited
3792     mstack.push(off->in(1), Pre_Visit);
3793     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3794     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3795     return true;
3796   }
3797   return false;
3798 }
3799 
3800 void Compile::reshape_address(AddPNode* addp) {
3801 }
3802 
3803 // helper for encoding java_to_runtime calls on sim
3804 //
3805 // this is needed to compute the extra arguments required when
3806 // planting a call to the simulator blrt instruction. the TypeFunc
3807 // can be queried to identify the counts for integral, and floating
3808 // arguments and the return type
3809 
3810 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3811 {
3812   int gps = 0;
3813   int fps = 0;
3814   const TypeTuple *domain = tf->domain();
3815   int max = domain->cnt();
3816   for (int i = TypeFunc::Parms; i < max; i++) {
3817     const Type *t = domain->field_at(i);
3818     switch(t->basic_type()) {
3819     case T_FLOAT:
3820     case T_DOUBLE:
3821       fps++;
3822     default:
3823       gps++;
3824     }
3825   }
3826   gpcnt = gps;
3827   fpcnt = fps;
3828   BasicType rt = tf->return_type();
3829   switch (rt) {
3830   case T_VOID:
3831     rtype = MacroAssembler::ret_type_void;
3832     break;
3833   default:
3834     rtype = MacroAssembler::ret_type_integral;
3835     break;
3836   case T_FLOAT:
3837     rtype = MacroAssembler::ret_type_float;
3838     break;
3839   case T_DOUBLE:
3840     rtype = MacroAssembler::ret_type_double;
3841     break;
3842   }
3843 }
3844 
3845 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3846   MacroAssembler _masm(&cbuf);                                          \
3847   {                                                                     \
3848     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3849     guarantee(DISP == 0, "mode not permitted for volatile");            \
3850     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3851     __ INSN(REG, as_Register(BASE));                                    \
3852   }
3853 
3854 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3855 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3856 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3857                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3858 
3859   // Used for all non-volatile memory accesses.  The use of
3860   // $mem->opcode() to discover whether this pattern uses sign-extended
3861   // offsets is something of a kludge.
3862   static void loadStore(MacroAssembler masm, mem_insn insn,
3863                          Register reg, int opcode,
3864                          Register base, int index, int size, int disp)
3865   {
3866     Address::extend scale;
3867 
3868     // Hooboy, this is fugly.  We need a way to communicate to the
3869     // encoder that the index needs to be sign extended, so we have to
3870     // enumerate all the cases.
3871     switch (opcode) {
3872     case INDINDEXSCALEDI2L:
3873     case INDINDEXSCALEDI2LN:
3874     case INDINDEXI2L:
3875     case INDINDEXI2LN:
3876       scale = Address::sxtw(size);
3877       break;
3878     default:
3879       scale = Address::lsl(size);
3880     }
3881 
3882     if (index == -1) {
3883       (masm.*insn)(reg, Address(base, disp));
3884     } else {
3885       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3886       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3887     }
3888   }
3889 
3890   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3891                          FloatRegister reg, int opcode,
3892                          Register base, int index, int size, int disp)
3893   {
3894     Address::extend scale;
3895 
3896     switch (opcode) {
3897     case INDINDEXSCALEDI2L:
3898     case INDINDEXSCALEDI2LN:
3899       scale = Address::sxtw(size);
3900       break;
3901     default:
3902       scale = Address::lsl(size);
3903     }
3904 
3905      if (index == -1) {
3906       (masm.*insn)(reg, Address(base, disp));
3907     } else {
3908       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3909       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3910     }
3911   }
3912 
3913   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3914                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3915                          int opcode, Register base, int index, int size, int disp)
3916   {
3917     if (index == -1) {
3918       (masm.*insn)(reg, T, Address(base, disp));
3919     } else {
3920       assert(disp == 0, "unsupported address mode");
3921       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3922     }
3923   }
3924 
3925 %}
3926 
3927 
3928 
3929 //----------ENCODING BLOCK-----------------------------------------------------
3930 // This block specifies the encoding classes used by the compiler to
3931 // output byte streams.  Encoding classes are parameterized macros
3932 // used by Machine Instruction Nodes in order to generate the bit
3933 // encoding of the instruction.  Operands specify their base encoding
3934 // interface with the interface keyword.  There are currently
3935 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3936 // COND_INTER.  REG_INTER causes an operand to generate a function
3937 // which returns its register number when queried.  CONST_INTER causes
3938 // an operand to generate a function which returns the value of the
3939 // constant when queried.  MEMORY_INTER causes an operand to generate
3940 // four functions which return the Base Register, the Index Register,
3941 // the Scale Value, and the Offset Value of the operand when queried.
3942 // COND_INTER causes an operand to generate six functions which return
3943 // the encoding code (ie - encoding bits for the instruction)
3944 // associated with each basic boolean condition for a conditional
3945 // instruction.
3946 //
3947 // Instructions specify two basic values for encoding.  Again, a
3948 // function is available to check if the constant displacement is an
3949 // oop. They use the ins_encode keyword to specify their encoding
3950 // classes (which must be a sequence of enc_class names, and their
3951 // parameters, specified in the encoding block), and they use the
3952 // opcode keyword to specify, in order, their primary, secondary, and
3953 // tertiary opcode.  Only the opcode sections which a particular
3954 // instruction needs for encoding need to be specified.
3955 encode %{
3956   // Build emit functions for each basic byte or larger field in the
3957   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3958   // from C++ code in the enc_class source block.  Emit functions will
3959   // live in the main source block for now.  In future, we can
3960   // generalize this by adding a syntax that specifies the sizes of
3961   // fields in an order, so that the adlc can build the emit functions
3962   // automagically
3963 
3964   // catch all for unimplemented encodings
3965   enc_class enc_unimplemented %{
3966     MacroAssembler _masm(&cbuf);
3967     __ unimplemented("C2 catch all");
3968   %}
3969 
3970   // BEGIN Non-volatile memory access
3971 
3972   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3973     Register dst_reg = as_Register($dst$$reg);
3974     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3975                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3976   %}
3977 
3978   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3979     Register dst_reg = as_Register($dst$$reg);
3980     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3982   %}
3983 
3984   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3985     Register dst_reg = as_Register($dst$$reg);
3986     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3987                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3988   %}
3989 
3990   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3991     Register dst_reg = as_Register($dst$$reg);
3992     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3993                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3994   %}
3995 
3996   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3997     Register dst_reg = as_Register($dst$$reg);
3998     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3999                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4000   %}
4001 
4002   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
4003     Register dst_reg = as_Register($dst$$reg);
4004     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
4005                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4006   %}
4007 
4008   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
4009     Register dst_reg = as_Register($dst$$reg);
4010     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4011                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4012   %}
4013 
4014   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
4015     Register dst_reg = as_Register($dst$$reg);
4016     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4018   %}
4019 
4020   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
4021     Register dst_reg = as_Register($dst$$reg);
4022     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4023                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4024   %}
4025 
4026   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
4027     Register dst_reg = as_Register($dst$$reg);
4028     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4030   %}
4031 
4032   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
4033     Register dst_reg = as_Register($dst$$reg);
4034     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
4035                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4036   %}
4037 
4038   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4039     Register dst_reg = as_Register($dst$$reg);
4040     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4042   %}
4043 
4044   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4045     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4046     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4047                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4048   %}
4049 
4050   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4051     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4052     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4054   %}
4055 
4056   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4057     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4058     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4059        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4060   %}
4061 
4062   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4063     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4064     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4065        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4066   %}
4067 
4068   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4069     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4070     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4071        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4072   %}
4073 
4074   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4075     Register src_reg = as_Register($src$$reg);
4076     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4077                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4078   %}
4079 
4080   enc_class aarch64_enc_strb0(memory mem) %{
4081     MacroAssembler _masm(&cbuf);
4082     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4083                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4084   %}
4085 
4086   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4087     MacroAssembler _masm(&cbuf);
4088     __ membar(Assembler::StoreStore);
4089     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4091   %}
4092 
4093   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4094     Register src_reg = as_Register($src$$reg);
4095     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4096                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4097   %}
4098 
4099   enc_class aarch64_enc_strh0(memory mem) %{
4100     MacroAssembler _masm(&cbuf);
4101     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4102                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4103   %}
4104 
4105   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4106     Register src_reg = as_Register($src$$reg);
4107     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4108                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4109   %}
4110 
4111   enc_class aarch64_enc_strw0(memory mem) %{
4112     MacroAssembler _masm(&cbuf);
4113     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4114                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4115   %}
4116 
4117   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4118     Register src_reg = as_Register($src$$reg);
4119     // we sometimes get asked to store the stack pointer into the
4120     // current thread -- we cannot do that directly on AArch64
4121     if (src_reg == r31_sp) {
4122       MacroAssembler _masm(&cbuf);
4123       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4124       __ mov(rscratch2, sp);
4125       src_reg = rscratch2;
4126     }
4127     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4128                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4129   %}
4130 
4131   enc_class aarch64_enc_str0(memory mem) %{
4132     MacroAssembler _masm(&cbuf);
4133     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4134                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4135   %}
4136 
4137   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4138     FloatRegister src_reg = as_FloatRegister($src$$reg);
4139     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4140                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4141   %}
4142 
4143   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4144     FloatRegister src_reg = as_FloatRegister($src$$reg);
4145     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4146                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4147   %}
4148 
4149   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4150     FloatRegister src_reg = as_FloatRegister($src$$reg);
4151     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4152        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4153   %}
4154 
4155   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4156     FloatRegister src_reg = as_FloatRegister($src$$reg);
4157     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4158        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4159   %}
4160 
4161   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4162     FloatRegister src_reg = as_FloatRegister($src$$reg);
4163     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4164        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4165   %}
4166 
4167   // END Non-volatile memory access
4168 
4169   // volatile loads and stores
4170 
4171   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4172     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4173                  rscratch1, stlrb);
4174   %}
4175 
4176   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4177     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4178                  rscratch1, stlrh);
4179   %}
4180 
4181   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4182     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4183                  rscratch1, stlrw);
4184   %}
4185 
4186 
4187   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4188     Register dst_reg = as_Register($dst$$reg);
4189     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4190              rscratch1, ldarb);
4191     __ sxtbw(dst_reg, dst_reg);
4192   %}
4193 
4194   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4195     Register dst_reg = as_Register($dst$$reg);
4196     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4197              rscratch1, ldarb);
4198     __ sxtb(dst_reg, dst_reg);
4199   %}
4200 
4201   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4202     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4203              rscratch1, ldarb);
4204   %}
4205 
4206   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4207     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4208              rscratch1, ldarb);
4209   %}
4210 
4211   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4212     Register dst_reg = as_Register($dst$$reg);
4213     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4214              rscratch1, ldarh);
4215     __ sxthw(dst_reg, dst_reg);
4216   %}
4217 
4218   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4219     Register dst_reg = as_Register($dst$$reg);
4220     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4221              rscratch1, ldarh);
4222     __ sxth(dst_reg, dst_reg);
4223   %}
4224 
4225   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4226     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4227              rscratch1, ldarh);
4228   %}
4229 
4230   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4232              rscratch1, ldarh);
4233   %}
4234 
4235   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4237              rscratch1, ldarw);
4238   %}
4239 
4240   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4241     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4242              rscratch1, ldarw);
4243   %}
4244 
4245   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4246     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4247              rscratch1, ldar);
4248   %}
4249 
4250   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4251     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4252              rscratch1, ldarw);
4253     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4254   %}
4255 
4256   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4257     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4258              rscratch1, ldar);
4259     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4260   %}
4261 
4262   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4263     Register src_reg = as_Register($src$$reg);
4264     // we sometimes get asked to store the stack pointer into the
4265     // current thread -- we cannot do that directly on AArch64
4266     if (src_reg == r31_sp) {
4267         MacroAssembler _masm(&cbuf);
4268       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4269       __ mov(rscratch2, sp);
4270       src_reg = rscratch2;
4271     }
4272     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4273                  rscratch1, stlr);
4274   %}
4275 
4276   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4277     {
4278       MacroAssembler _masm(&cbuf);
4279       FloatRegister src_reg = as_FloatRegister($src$$reg);
4280       __ fmovs(rscratch2, src_reg);
4281     }
4282     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4283                  rscratch1, stlrw);
4284   %}
4285 
4286   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4287     {
4288       MacroAssembler _masm(&cbuf);
4289       FloatRegister src_reg = as_FloatRegister($src$$reg);
4290       __ fmovd(rscratch2, src_reg);
4291     }
4292     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4293                  rscratch1, stlr);
4294   %}
4295 
4296   // synchronized read/update encodings
4297 
4298   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4299     MacroAssembler _masm(&cbuf);
4300     Register dst_reg = as_Register($dst$$reg);
4301     Register base = as_Register($mem$$base);
4302     int index = $mem$$index;
4303     int scale = $mem$$scale;
4304     int disp = $mem$$disp;
4305     if (index == -1) {
4306        if (disp != 0) {
4307         __ lea(rscratch1, Address(base, disp));
4308         __ ldaxr(dst_reg, rscratch1);
4309       } else {
4310         // TODO
4311         // should we ever get anything other than this case?
4312         __ ldaxr(dst_reg, base);
4313       }
4314     } else {
4315       Register index_reg = as_Register(index);
4316       if (disp == 0) {
4317         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4318         __ ldaxr(dst_reg, rscratch1);
4319       } else {
4320         __ lea(rscratch1, Address(base, disp));
4321         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4322         __ ldaxr(dst_reg, rscratch1);
4323       }
4324     }
4325   %}
4326 
4327   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4328     MacroAssembler _masm(&cbuf);
4329     Register src_reg = as_Register($src$$reg);
4330     Register base = as_Register($mem$$base);
4331     int index = $mem$$index;
4332     int scale = $mem$$scale;
4333     int disp = $mem$$disp;
4334     if (index == -1) {
4335        if (disp != 0) {
4336         __ lea(rscratch2, Address(base, disp));
4337         __ stlxr(rscratch1, src_reg, rscratch2);
4338       } else {
4339         // TODO
4340         // should we ever get anything other than this case?
4341         __ stlxr(rscratch1, src_reg, base);
4342       }
4343     } else {
4344       Register index_reg = as_Register(index);
4345       if (disp == 0) {
4346         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4347         __ stlxr(rscratch1, src_reg, rscratch2);
4348       } else {
4349         __ lea(rscratch2, Address(base, disp));
4350         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4351         __ stlxr(rscratch1, src_reg, rscratch2);
4352       }
4353     }
4354     __ cmpw(rscratch1, zr);
4355   %}
4356 
4357   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4358     MacroAssembler _masm(&cbuf);
4359     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4360     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4361                Assembler::xword, /*acquire*/ false, /*release*/ true,
4362                /*weak*/ false, noreg);
4363   %}
4364 
4365   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4366     MacroAssembler _masm(&cbuf);
4367     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4368     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4369                Assembler::word, /*acquire*/ false, /*release*/ true,
4370                /*weak*/ false, noreg);
4371   %}
4372 
4373   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4374     MacroAssembler _masm(&cbuf);
4375     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4376     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4377                Assembler::halfword, /*acquire*/ false, /*release*/ true,
4378                /*weak*/ false, noreg);
4379   %}
4380 
4381   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4382     MacroAssembler _masm(&cbuf);
4383     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4384     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4385                Assembler::byte, /*acquire*/ false, /*release*/ true,
4386                /*weak*/ false, noreg);
4387   %}
4388 
4389 
4390   // The only difference between aarch64_enc_cmpxchg and
4391   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4392   // CompareAndSwap sequence to serve as a barrier on acquiring a
4393   // lock.
4394   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4395     MacroAssembler _masm(&cbuf);
4396     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4397     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4398                Assembler::xword, /*acquire*/ true, /*release*/ true,
4399                /*weak*/ false, noreg);
4400   %}
4401 
4402   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4403     MacroAssembler _masm(&cbuf);
4404     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4405     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4406                Assembler::word, /*acquire*/ true, /*release*/ true,
4407                /*weak*/ false, noreg);
4408   %}
4409 
4410 
4411   // auxiliary used for CompareAndSwapX to set result register
4412   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4413     MacroAssembler _masm(&cbuf);
4414     Register res_reg = as_Register($res$$reg);
4415     __ cset(res_reg, Assembler::EQ);
4416   %}
4417 
4418   // prefetch encodings
4419 
4420   enc_class aarch64_enc_prefetchw(memory mem) %{
4421     MacroAssembler _masm(&cbuf);
4422     Register base = as_Register($mem$$base);
4423     int index = $mem$$index;
4424     int scale = $mem$$scale;
4425     int disp = $mem$$disp;
4426     if (index == -1) {
4427       __ prfm(Address(base, disp), PSTL1KEEP);
4428     } else {
4429       Register index_reg = as_Register(index);
4430       if (disp == 0) {
4431         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4432       } else {
4433         __ lea(rscratch1, Address(base, disp));
4434         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4435       }
4436     }
4437   %}
4438 
4439   /// mov envcodings
4440 
4441   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4442     MacroAssembler _masm(&cbuf);
4443     u_int32_t con = (u_int32_t)$src$$constant;
4444     Register dst_reg = as_Register($dst$$reg);
4445     if (con == 0) {
4446       __ movw(dst_reg, zr);
4447     } else {
4448       __ movw(dst_reg, con);
4449     }
4450   %}
4451 
4452   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4453     MacroAssembler _masm(&cbuf);
4454     Register dst_reg = as_Register($dst$$reg);
4455     u_int64_t con = (u_int64_t)$src$$constant;
4456     if (con == 0) {
4457       __ mov(dst_reg, zr);
4458     } else {
4459       __ mov(dst_reg, con);
4460     }
4461   %}
4462 
4463   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4464     MacroAssembler _masm(&cbuf);
4465     Register dst_reg = as_Register($dst$$reg);
4466     address con = (address)$src$$constant;
4467     if (con == NULL || con == (address)1) {
4468       ShouldNotReachHere();
4469     } else {
4470       relocInfo::relocType rtype = $src->constant_reloc();
4471       if (rtype == relocInfo::oop_type) {
4472         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4473       } else if (rtype == relocInfo::metadata_type) {
4474         __ mov_metadata(dst_reg, (Metadata*)con);
4475       } else {
4476         assert(rtype == relocInfo::none, "unexpected reloc type");
4477         if (con < (address)(uintptr_t)os::vm_page_size()) {
4478           __ mov(dst_reg, con);
4479         } else {
4480           unsigned long offset;
4481           __ adrp(dst_reg, con, offset);
4482           __ add(dst_reg, dst_reg, offset);
4483         }
4484       }
4485     }
4486   %}
4487 
4488   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4489     MacroAssembler _masm(&cbuf);
4490     Register dst_reg = as_Register($dst$$reg);
4491     __ mov(dst_reg, zr);
4492   %}
4493 
4494   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4495     MacroAssembler _masm(&cbuf);
4496     Register dst_reg = as_Register($dst$$reg);
4497     __ mov(dst_reg, (u_int64_t)1);
4498   %}
4499 
4500   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4501     MacroAssembler _masm(&cbuf);
4502     address page = (address)$src$$constant;
4503     Register dst_reg = as_Register($dst$$reg);
4504     unsigned long off;
4505     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4506     assert(off == 0, "assumed offset == 0");
4507   %}
4508 
4509   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4510     MacroAssembler _masm(&cbuf);
4511     __ load_byte_map_base($dst$$Register);
4512   %}
4513 
4514   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4515     MacroAssembler _masm(&cbuf);
4516     Register dst_reg = as_Register($dst$$reg);
4517     address con = (address)$src$$constant;
4518     if (con == NULL) {
4519       ShouldNotReachHere();
4520     } else {
4521       relocInfo::relocType rtype = $src->constant_reloc();
4522       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4523       __ set_narrow_oop(dst_reg, (jobject)con);
4524     }
4525   %}
4526 
4527   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4528     MacroAssembler _masm(&cbuf);
4529     Register dst_reg = as_Register($dst$$reg);
4530     __ mov(dst_reg, zr);
4531   %}
4532 
4533   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4534     MacroAssembler _masm(&cbuf);
4535     Register dst_reg = as_Register($dst$$reg);
4536     address con = (address)$src$$constant;
4537     if (con == NULL) {
4538       ShouldNotReachHere();
4539     } else {
4540       relocInfo::relocType rtype = $src->constant_reloc();
4541       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4542       __ set_narrow_klass(dst_reg, (Klass *)con);
4543     }
4544   %}
4545 
4546   // arithmetic encodings
4547 
4548   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4549     MacroAssembler _masm(&cbuf);
4550     Register dst_reg = as_Register($dst$$reg);
4551     Register src_reg = as_Register($src1$$reg);
4552     int32_t con = (int32_t)$src2$$constant;
4553     // add has primary == 0, subtract has primary == 1
4554     if ($primary) { con = -con; }
4555     if (con < 0) {
4556       __ subw(dst_reg, src_reg, -con);
4557     } else {
4558       __ addw(dst_reg, src_reg, con);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register dst_reg = as_Register($dst$$reg);
4565     Register src_reg = as_Register($src1$$reg);
4566     int32_t con = (int32_t)$src2$$constant;
4567     // add has primary == 0, subtract has primary == 1
4568     if ($primary) { con = -con; }
4569     if (con < 0) {
4570       __ sub(dst_reg, src_reg, -con);
4571     } else {
4572       __ add(dst_reg, src_reg, con);
4573     }
4574   %}
4575 
4576   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4577     MacroAssembler _masm(&cbuf);
4578    Register dst_reg = as_Register($dst$$reg);
4579    Register src1_reg = as_Register($src1$$reg);
4580    Register src2_reg = as_Register($src2$$reg);
4581     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4582   %}
4583 
4584   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4585     MacroAssembler _masm(&cbuf);
4586    Register dst_reg = as_Register($dst$$reg);
4587    Register src1_reg = as_Register($src1$$reg);
4588    Register src2_reg = as_Register($src2$$reg);
4589     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4590   %}
4591 
4592   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4593     MacroAssembler _masm(&cbuf);
4594    Register dst_reg = as_Register($dst$$reg);
4595    Register src1_reg = as_Register($src1$$reg);
4596    Register src2_reg = as_Register($src2$$reg);
4597     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4598   %}
4599 
4600   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4601     MacroAssembler _masm(&cbuf);
4602    Register dst_reg = as_Register($dst$$reg);
4603    Register src1_reg = as_Register($src1$$reg);
4604    Register src2_reg = as_Register($src2$$reg);
4605     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4606   %}
4607 
4608   // compare instruction encodings
4609 
4610   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4611     MacroAssembler _masm(&cbuf);
4612     Register reg1 = as_Register($src1$$reg);
4613     Register reg2 = as_Register($src2$$reg);
4614     __ cmpw(reg1, reg2);
4615   %}
4616 
4617   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4618     MacroAssembler _masm(&cbuf);
4619     Register reg = as_Register($src1$$reg);
4620     int32_t val = $src2$$constant;
4621     if (val >= 0) {
4622       __ subsw(zr, reg, val);
4623     } else {
4624       __ addsw(zr, reg, -val);
4625     }
4626   %}
4627 
4628   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4629     MacroAssembler _masm(&cbuf);
4630     Register reg1 = as_Register($src1$$reg);
4631     u_int32_t val = (u_int32_t)$src2$$constant;
4632     __ movw(rscratch1, val);
4633     __ cmpw(reg1, rscratch1);
4634   %}
4635 
4636   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4637     MacroAssembler _masm(&cbuf);
4638     Register reg1 = as_Register($src1$$reg);
4639     Register reg2 = as_Register($src2$$reg);
4640     __ cmp(reg1, reg2);
4641   %}
4642 
4643   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4644     MacroAssembler _masm(&cbuf);
4645     Register reg = as_Register($src1$$reg);
4646     int64_t val = $src2$$constant;
4647     if (val >= 0) {
4648       __ subs(zr, reg, val);
4649     } else if (val != -val) {
4650       __ adds(zr, reg, -val);
4651     } else {
4652     // aargh, Long.MIN_VALUE is a special case
4653       __ orr(rscratch1, zr, (u_int64_t)val);
4654       __ subs(zr, reg, rscratch1);
4655     }
4656   %}
4657 
4658   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4659     MacroAssembler _masm(&cbuf);
4660     Register reg1 = as_Register($src1$$reg);
4661     u_int64_t val = (u_int64_t)$src2$$constant;
4662     __ mov(rscratch1, val);
4663     __ cmp(reg1, rscratch1);
4664   %}
4665 
4666   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4667     MacroAssembler _masm(&cbuf);
4668     Register reg1 = as_Register($src1$$reg);
4669     Register reg2 = as_Register($src2$$reg);
4670     __ cmp(reg1, reg2);
4671   %}
4672 
4673   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4674     MacroAssembler _masm(&cbuf);
4675     Register reg1 = as_Register($src1$$reg);
4676     Register reg2 = as_Register($src2$$reg);
4677     __ cmpw(reg1, reg2);
4678   %}
4679 
4680   enc_class aarch64_enc_testp(iRegP src) %{
4681     MacroAssembler _masm(&cbuf);
4682     Register reg = as_Register($src$$reg);
4683     __ cmp(reg, zr);
4684   %}
4685 
4686   enc_class aarch64_enc_testn(iRegN src) %{
4687     MacroAssembler _masm(&cbuf);
4688     Register reg = as_Register($src$$reg);
4689     __ cmpw(reg, zr);
4690   %}
4691 
4692   enc_class aarch64_enc_b(label lbl) %{
4693     MacroAssembler _masm(&cbuf);
4694     Label *L = $lbl$$label;
4695     __ b(*L);
4696   %}
4697 
4698   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4699     MacroAssembler _masm(&cbuf);
4700     Label *L = $lbl$$label;
4701     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4702   %}
4703 
4704   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4705     MacroAssembler _masm(&cbuf);
4706     Label *L = $lbl$$label;
4707     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4708   %}
4709 
4710   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4711   %{
4712      Register sub_reg = as_Register($sub$$reg);
4713      Register super_reg = as_Register($super$$reg);
4714      Register temp_reg = as_Register($temp$$reg);
4715      Register result_reg = as_Register($result$$reg);
4716 
4717      Label miss;
4718      MacroAssembler _masm(&cbuf);
4719      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4720                                      NULL, &miss,
4721                                      /*set_cond_codes:*/ true);
4722      if ($primary) {
4723        __ mov(result_reg, zr);
4724      }
4725      __ bind(miss);
4726   %}
4727 
4728   enc_class aarch64_enc_java_static_call(method meth) %{
4729     MacroAssembler _masm(&cbuf);
4730 
4731     address addr = (address)$meth$$method;
4732     address call;
4733     if (!_method) {
4734       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4735       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4736     } else {
4737       int method_index = resolved_method_index(cbuf);
4738       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4739                                                   : static_call_Relocation::spec(method_index);
4740       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4741 
4742       // Emit stub for static call
4743       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4744       if (stub == NULL) {
4745         ciEnv::current()->record_failure("CodeCache is full");
4746         return;
4747       }
4748     }
4749     if (call == NULL) {
4750       ciEnv::current()->record_failure("CodeCache is full");
4751       return;
4752     }
4753   %}
4754 
4755   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4756     MacroAssembler _masm(&cbuf);
4757     int method_index = resolved_method_index(cbuf);
4758     address call = __ ic_call((address)$meth$$method, method_index);
4759     if (call == NULL) {
4760       ciEnv::current()->record_failure("CodeCache is full");
4761       return;
4762     }
4763   %}
4764 
4765   enc_class aarch64_enc_call_epilog() %{
4766     MacroAssembler _masm(&cbuf);
4767     if (VerifyStackAtCalls) {
4768       // Check that stack depth is unchanged: find majik cookie on stack
4769       __ call_Unimplemented();
4770     }
4771   %}
4772 
4773   enc_class aarch64_enc_java_to_runtime(method meth) %{
4774     MacroAssembler _masm(&cbuf);
4775 
4776     // some calls to generated routines (arraycopy code) are scheduled
4777     // by C2 as runtime calls. if so we can call them using a br (they
4778     // will be in a reachable segment) otherwise we have to use a blrt
4779     // which loads the absolute address into a register.
4780     address entry = (address)$meth$$method;
4781     CodeBlob *cb = CodeCache::find_blob(entry);
4782     if (cb) {
4783       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4784       if (call == NULL) {
4785         ciEnv::current()->record_failure("CodeCache is full");
4786         return;
4787       }
4788     } else {
4789       int gpcnt;
4790       int fpcnt;
4791       int rtype;
4792       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4793       Label retaddr;
4794       __ adr(rscratch2, retaddr);
4795       __ lea(rscratch1, RuntimeAddress(entry));
4796       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4797       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4798       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4799       __ bind(retaddr);
4800       __ add(sp, sp, 2 * wordSize);
4801     }
4802   %}
4803 
4804   enc_class aarch64_enc_rethrow() %{
4805     MacroAssembler _masm(&cbuf);
4806     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4807   %}
4808 
4809   enc_class aarch64_enc_ret() %{
4810     MacroAssembler _masm(&cbuf);
4811     __ ret(lr);
4812   %}
4813 
4814   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4815     MacroAssembler _masm(&cbuf);
4816     Register target_reg = as_Register($jump_target$$reg);
4817     __ br(target_reg);
4818   %}
4819 
4820   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4821     MacroAssembler _masm(&cbuf);
4822     Register target_reg = as_Register($jump_target$$reg);
4823     // exception oop should be in r0
4824     // ret addr has been popped into lr
4825     // callee expects it in r3
4826     __ mov(r3, lr);
4827     __ br(target_reg);
4828   %}
4829 
4830   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4831     MacroAssembler _masm(&cbuf);
4832     Register oop = as_Register($object$$reg);
4833     Register box = as_Register($box$$reg);
4834     Register disp_hdr = as_Register($tmp$$reg);
4835     Register tmp = as_Register($tmp2$$reg);
4836     Label cont;
4837     Label object_has_monitor;
4838     Label cas_failed;
4839 
4840     assert_different_registers(oop, box, tmp, disp_hdr);
4841 
4842     // Load markOop from object into displaced_header.
4843     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4844 
4845     // Always do locking in runtime.
4846     if (EmitSync & 0x01) {
4847       __ cmp(oop, zr);
4848       return;
4849     }
4850 
4851     if (UseBiasedLocking && !UseOptoBiasInlining) {
4852       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4853     }
4854 
4855     // Handle existing monitor
4856     if ((EmitSync & 0x02) == 0) {
4857       // we can use AArch64's bit test and branch here but
4858       // markoopDesc does not define a bit index just the bit value
4859       // so assert in case the bit pos changes
4860 #     define __monitor_value_log2 1
4861       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4862       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4863 #     undef __monitor_value_log2
4864     }
4865 
4866     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4867     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4868 
4869     // Load Compare Value application register.
4870 
4871     // Initialize the box. (Must happen before we update the object mark!)
4872     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4873 
4874     // Compare object markOop with mark and if equal exchange scratch1
4875     // with object markOop.
4876     if (UseLSE) {
4877       __ mov(tmp, disp_hdr);
4878       __ casal(Assembler::xword, tmp, box, oop);
4879       __ cmp(tmp, disp_hdr);
4880       __ br(Assembler::EQ, cont);
4881     } else {
4882       Label retry_load;
4883       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4884         __ prfm(Address(oop), PSTL1STRM);
4885       __ bind(retry_load);
4886       __ ldaxr(tmp, oop);
4887       __ cmp(tmp, disp_hdr);
4888       __ br(Assembler::NE, cas_failed);
4889       // use stlxr to ensure update is immediately visible
4890       __ stlxr(tmp, box, oop);
4891       __ cbzw(tmp, cont);
4892       __ b(retry_load);
4893     }
4894 
4895     // Formerly:
4896     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4897     //               /*newv=*/box,
4898     //               /*addr=*/oop,
4899     //               /*tmp=*/tmp,
4900     //               cont,
4901     //               /*fail*/NULL);
4902 
4903     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4904 
4905     // If the compare-and-exchange succeeded, then we found an unlocked
4906     // object, will have now locked it will continue at label cont
4907 
4908     __ bind(cas_failed);
4909     // We did not see an unlocked object so try the fast recursive case.
4910 
4911     // Check if the owner is self by comparing the value in the
4912     // markOop of object (disp_hdr) with the stack pointer.
4913     __ mov(rscratch1, sp);
4914     __ sub(disp_hdr, disp_hdr, rscratch1);
4915     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4916     // If condition is true we are cont and hence we can store 0 as the
4917     // displaced header in the box, which indicates that it is a recursive lock.
4918     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4919     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4920 
4921     // Handle existing monitor.
4922     if ((EmitSync & 0x02) == 0) {
4923       __ b(cont);
4924 
4925       __ bind(object_has_monitor);
4926       // The object's monitor m is unlocked iff m->owner == NULL,
4927       // otherwise m->owner may contain a thread or a stack address.
4928       //
4929       // Try to CAS m->owner from NULL to current thread.
4930       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4931       __ mov(disp_hdr, zr);
4932 
4933       if (UseLSE) {
4934         __ mov(rscratch1, disp_hdr);
4935         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4936         __ cmp(rscratch1, disp_hdr);
4937       } else {
4938         Label retry_load, fail;
4939         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4940           __ prfm(Address(tmp), PSTL1STRM);
4941         __ bind(retry_load);
4942         __ ldaxr(rscratch1, tmp);
4943         __ cmp(disp_hdr, rscratch1);
4944         __ br(Assembler::NE, fail);
4945         // use stlxr to ensure update is immediately visible
4946         __ stlxr(rscratch1, rthread, tmp);
4947         __ cbnzw(rscratch1, retry_load);
4948         __ bind(fail);
4949       }
4950 
4951       // Label next;
4952       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4953       //               /*newv=*/rthread,
4954       //               /*addr=*/tmp,
4955       //               /*tmp=*/rscratch1,
4956       //               /*succeed*/next,
4957       //               /*fail*/NULL);
4958       // __ bind(next);
4959 
4960       // store a non-null value into the box.
4961       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4962 
4963       // PPC port checks the following invariants
4964       // #ifdef ASSERT
4965       // bne(flag, cont);
4966       // We have acquired the monitor, check some invariants.
4967       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4968       // Invariant 1: _recursions should be 0.
4969       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4970       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4971       //                        "monitor->_recursions should be 0", -1);
4972       // Invariant 2: OwnerIsThread shouldn't be 0.
4973       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4974       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4975       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4976       // #endif
4977     }
4978 
4979     __ bind(cont);
4980     // flag == EQ indicates success
4981     // flag == NE indicates failure
4982 
4983   %}
4984 
4985   // TODO
4986   // reimplement this with custom cmpxchgptr code
4987   // which avoids some of the unnecessary branching
4988   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4989     MacroAssembler _masm(&cbuf);
4990     Register oop = as_Register($object$$reg);
4991     Register box = as_Register($box$$reg);
4992     Register disp_hdr = as_Register($tmp$$reg);
4993     Register tmp = as_Register($tmp2$$reg);
4994     Label cont;
4995     Label object_has_monitor;
4996     Label cas_failed;
4997 
4998     assert_different_registers(oop, box, tmp, disp_hdr);
4999 
5000     // Always do locking in runtime.
5001     if (EmitSync & 0x01) {
5002       __ cmp(oop, zr); // Oop can't be 0 here => always false.
5003       return;
5004     }
5005 
5006     if (UseBiasedLocking && !UseOptoBiasInlining) {
5007       __ biased_locking_exit(oop, tmp, cont);
5008     }
5009 
5010     // Find the lock address and load the displaced header from the stack.
5011     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5012 
5013     // If the displaced header is 0, we have a recursive unlock.
5014     __ cmp(disp_hdr, zr);
5015     __ br(Assembler::EQ, cont);
5016 
5017 
5018     // Handle existing monitor.
5019     if ((EmitSync & 0x02) == 0) {
5020       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5021       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5022     }
5023 
5024     // Check if it is still a light weight lock, this is is true if we
5025     // see the stack address of the basicLock in the markOop of the
5026     // object.
5027 
5028       if (UseLSE) {
5029         __ mov(tmp, box);
5030         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5031         __ cmp(tmp, box);
5032       } else {
5033         Label retry_load;
5034         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
5035           __ prfm(Address(oop), PSTL1STRM);
5036         __ bind(retry_load);
5037         __ ldxr(tmp, oop);
5038         __ cmp(box, tmp);
5039         __ br(Assembler::NE, cas_failed);
5040         // use stlxr to ensure update is immediately visible
5041         __ stlxr(tmp, disp_hdr, oop);
5042         __ cbzw(tmp, cont);
5043         __ b(retry_load);
5044       }
5045 
5046     // __ cmpxchgptr(/*compare_value=*/box,
5047     //               /*exchange_value=*/disp_hdr,
5048     //               /*where=*/oop,
5049     //               /*result=*/tmp,
5050     //               cont,
5051     //               /*cas_failed*/NULL);
5052     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5053 
5054     __ bind(cas_failed);
5055 
5056     // Handle existing monitor.
5057     if ((EmitSync & 0x02) == 0) {
5058       __ b(cont);
5059 
5060       __ bind(object_has_monitor);
5061       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5062       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5063       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5064       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5065       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5066       __ cmp(rscratch1, zr);
5067       __ br(Assembler::NE, cont);
5068 
5069       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5070       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5071       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5072       __ cmp(rscratch1, zr);
5073       __ cbnz(rscratch1, cont);
5074       // need a release store here
5075       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5076       __ stlr(rscratch1, tmp); // rscratch1 is zero
5077     }
5078 
5079     __ bind(cont);
5080     // flag == EQ indicates success
5081     // flag == NE indicates failure
5082   %}
5083 
5084 %}
5085 
5086 //----------FRAME--------------------------------------------------------------
5087 // Definition of frame structure and management information.
5088 //
5089 //  S T A C K   L A Y O U T    Allocators stack-slot number
5090 //                             |   (to get allocators register number
5091 //  G  Owned by    |        |  v    add OptoReg::stack0())
5092 //  r   CALLER     |        |
5093 //  o     |        +--------+      pad to even-align allocators stack-slot
5094 //  w     V        |  pad0  |        numbers; owned by CALLER
5095 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5096 //  h     ^        |   in   |  5
5097 //        |        |  args  |  4   Holes in incoming args owned by SELF
5098 //  |     |        |        |  3
5099 //  |     |        +--------+
5100 //  V     |        | old out|      Empty on Intel, window on Sparc
5101 //        |    old |preserve|      Must be even aligned.
5102 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5103 //        |        |   in   |  3   area for Intel ret address
5104 //     Owned by    |preserve|      Empty on Sparc.
5105 //       SELF      +--------+
5106 //        |        |  pad2  |  2   pad to align old SP
5107 //        |        +--------+  1
5108 //        |        | locks  |  0
5109 //        |        +--------+----> OptoReg::stack0(), even aligned
5110 //        |        |  pad1  | 11   pad to align new SP
5111 //        |        +--------+
5112 //        |        |        | 10
5113 //        |        | spills |  9   spills
5114 //        V        |        |  8   (pad0 slot for callee)
5115 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5116 //        ^        |  out   |  7
5117 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5118 //     Owned by    +--------+
5119 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5120 //        |    new |preserve|      Must be even-aligned.
5121 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5122 //        |        |        |
5123 //
5124 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5125 //         known from SELF's arguments and the Java calling convention.
5126 //         Region 6-7 is determined per call site.
5127 // Note 2: If the calling convention leaves holes in the incoming argument
5128 //         area, those holes are owned by SELF.  Holes in the outgoing area
5129 //         are owned by the CALLEE.  Holes should not be nessecary in the
5130 //         incoming area, as the Java calling convention is completely under
5131 //         the control of the AD file.  Doubles can be sorted and packed to
5132 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5133 //         varargs C calling conventions.
5134 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5135 //         even aligned with pad0 as needed.
5136 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5137 //           (the latter is true on Intel but is it false on AArch64?)
5138 //         region 6-11 is even aligned; it may be padded out more so that
5139 //         the region from SP to FP meets the minimum stack alignment.
5140 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5141 //         alignment.  Region 11, pad1, may be dynamically extended so that
5142 //         SP meets the minimum alignment.
5143 
5144 frame %{
5145   // What direction does stack grow in (assumed to be same for C & Java)
5146   stack_direction(TOWARDS_LOW);
5147 
5148   // These three registers define part of the calling convention
5149   // between compiled code and the interpreter.
5150 
5151   // Inline Cache Register or methodOop for I2C.
5152   inline_cache_reg(R12);
5153 
5154   // Method Oop Register when calling interpreter.
5155   interpreter_method_oop_reg(R12);
5156 
5157   // Number of stack slots consumed by locking an object
5158   sync_stack_slots(2);
5159 
5160   // Compiled code's Frame Pointer
5161   frame_pointer(R31);
5162 
5163   // Interpreter stores its frame pointer in a register which is
5164   // stored to the stack by I2CAdaptors.
5165   // I2CAdaptors convert from interpreted java to compiled java.
5166   interpreter_frame_pointer(R29);
5167 
5168   // Stack alignment requirement
5169   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5170 
5171   // Number of stack slots between incoming argument block and the start of
5172   // a new frame.  The PROLOG must add this many slots to the stack.  The
5173   // EPILOG must remove this many slots. aarch64 needs two slots for
5174   // return address and fp.
5175   // TODO think this is correct but check
5176   in_preserve_stack_slots(4);
5177 
5178   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5179   // for calls to C.  Supports the var-args backing area for register parms.
5180   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5181 
5182   // The after-PROLOG location of the return address.  Location of
5183   // return address specifies a type (REG or STACK) and a number
5184   // representing the register number (i.e. - use a register name) or
5185   // stack slot.
5186   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5187   // Otherwise, it is above the locks and verification slot and alignment word
5188   // TODO this may well be correct but need to check why that - 2 is there
5189   // ppc port uses 0 but we definitely need to allow for fixed_slots
5190   // which folds in the space used for monitors
5191   return_addr(STACK - 2 +
5192               align_up((Compile::current()->in_preserve_stack_slots() +
5193                         Compile::current()->fixed_slots()),
5194                        stack_alignment_in_slots()));
5195 
5196   // Body of function which returns an integer array locating
5197   // arguments either in registers or in stack slots.  Passed an array
5198   // of ideal registers called "sig" and a "length" count.  Stack-slot
5199   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5200   // arguments for a CALLEE.  Incoming stack arguments are
5201   // automatically biased by the preserve_stack_slots field above.
5202 
5203   calling_convention
5204   %{
5205     // No difference between ingoing/outgoing just pass false
5206     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5207   %}
5208 
5209   c_calling_convention
5210   %{
5211     // This is obviously always outgoing
5212     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5213   %}
5214 
5215   // Location of compiled Java return values.  Same as C for now.
5216   return_value
5217   %{
5218     // TODO do we allow ideal_reg == Op_RegN???
5219     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5220            "only return normal values");
5221 
5222     static const int lo[Op_RegL + 1] = { // enum name
5223       0,                                 // Op_Node
5224       0,                                 // Op_Set
5225       R0_num,                            // Op_RegN
5226       R0_num,                            // Op_RegI
5227       R0_num,                            // Op_RegP
5228       V0_num,                            // Op_RegF
5229       V0_num,                            // Op_RegD
5230       R0_num                             // Op_RegL
5231     };
5232 
5233     static const int hi[Op_RegL + 1] = { // enum name
5234       0,                                 // Op_Node
5235       0,                                 // Op_Set
5236       OptoReg::Bad,                       // Op_RegN
5237       OptoReg::Bad,                      // Op_RegI
5238       R0_H_num,                          // Op_RegP
5239       OptoReg::Bad,                      // Op_RegF
5240       V0_H_num,                          // Op_RegD
5241       R0_H_num                           // Op_RegL
5242     };
5243 
5244     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5245   %}
5246 %}
5247 
5248 //----------ATTRIBUTES---------------------------------------------------------
5249 //----------Operand Attributes-------------------------------------------------
5250 op_attrib op_cost(1);        // Required cost attribute
5251 
5252 //----------Instruction Attributes---------------------------------------------
5253 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5254 ins_attrib ins_size(32);        // Required size attribute (in bits)
5255 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5256                                 // a non-matching short branch variant
5257                                 // of some long branch?
5258 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5259                                 // be a power of 2) specifies the
5260                                 // alignment that some part of the
5261                                 // instruction (not necessarily the
5262                                 // start) requires.  If > 1, a
5263                                 // compute_padding() function must be
5264                                 // provided for the instruction
5265 
5266 //----------OPERANDS-----------------------------------------------------------
5267 // Operand definitions must precede instruction definitions for correct parsing
5268 // in the ADLC because operands constitute user defined types which are used in
5269 // instruction definitions.
5270 
5271 //----------Simple Operands----------------------------------------------------
5272 
5273 // Integer operands 32 bit
5274 // 32 bit immediate
5275 operand immI()
5276 %{
5277   match(ConI);
5278 
5279   op_cost(0);
5280   format %{ %}
5281   interface(CONST_INTER);
5282 %}
5283 
5284 // 32 bit zero
5285 operand immI0()
5286 %{
5287   predicate(n->get_int() == 0);
5288   match(ConI);
5289 
5290   op_cost(0);
5291   format %{ %}
5292   interface(CONST_INTER);
5293 %}
5294 
5295 // 32 bit unit increment
5296 operand immI_1()
5297 %{
5298   predicate(n->get_int() == 1);
5299   match(ConI);
5300 
5301   op_cost(0);
5302   format %{ %}
5303   interface(CONST_INTER);
5304 %}
5305 
5306 // 32 bit unit decrement
5307 operand immI_M1()
5308 %{
5309   predicate(n->get_int() == -1);
5310   match(ConI);
5311 
5312   op_cost(0);
5313   format %{ %}
5314   interface(CONST_INTER);
5315 %}
5316 
5317 // Shift values for add/sub extension shift
5318 operand immIExt()
5319 %{
5320   predicate(0 <= n->get_int() && (n->get_int() <= 4));
5321   match(ConI);
5322 
5323   op_cost(0);
5324   format %{ %}
5325   interface(CONST_INTER);
5326 %}
5327 
5328 operand immI_le_4()
5329 %{
5330   predicate(n->get_int() <= 4);
5331   match(ConI);
5332 
5333   op_cost(0);
5334   format %{ %}
5335   interface(CONST_INTER);
5336 %}
5337 
5338 operand immI_31()
5339 %{
5340   predicate(n->get_int() == 31);
5341   match(ConI);
5342 
5343   op_cost(0);
5344   format %{ %}
5345   interface(CONST_INTER);
5346 %}
5347 
5348 operand immI_8()
5349 %{
5350   predicate(n->get_int() == 8);
5351   match(ConI);
5352 
5353   op_cost(0);
5354   format %{ %}
5355   interface(CONST_INTER);
5356 %}
5357 
5358 operand immI_16()
5359 %{
5360   predicate(n->get_int() == 16);
5361   match(ConI);
5362 
5363   op_cost(0);
5364   format %{ %}
5365   interface(CONST_INTER);
5366 %}
5367 
5368 operand immI_24()
5369 %{
5370   predicate(n->get_int() == 24);
5371   match(ConI);
5372 
5373   op_cost(0);
5374   format %{ %}
5375   interface(CONST_INTER);
5376 %}
5377 
5378 operand immI_32()
5379 %{
5380   predicate(n->get_int() == 32);
5381   match(ConI);
5382 
5383   op_cost(0);
5384   format %{ %}
5385   interface(CONST_INTER);
5386 %}
5387 
5388 operand immI_48()
5389 %{
5390   predicate(n->get_int() == 48);
5391   match(ConI);
5392 
5393   op_cost(0);
5394   format %{ %}
5395   interface(CONST_INTER);
5396 %}
5397 
5398 operand immI_56()
5399 %{
5400   predicate(n->get_int() == 56);
5401   match(ConI);
5402 
5403   op_cost(0);
5404   format %{ %}
5405   interface(CONST_INTER);
5406 %}
5407 
5408 operand immI_63()
5409 %{
5410   predicate(n->get_int() == 63);
5411   match(ConI);
5412 
5413   op_cost(0);
5414   format %{ %}
5415   interface(CONST_INTER);
5416 %}
5417 
5418 operand immI_64()
5419 %{
5420   predicate(n->get_int() == 64);
5421   match(ConI);
5422 
5423   op_cost(0);
5424   format %{ %}
5425   interface(CONST_INTER);
5426 %}
5427 
5428 operand immI_255()
5429 %{
5430   predicate(n->get_int() == 255);
5431   match(ConI);
5432 
5433   op_cost(0);
5434   format %{ %}
5435   interface(CONST_INTER);
5436 %}
5437 
5438 operand immI_65535()
5439 %{
5440   predicate(n->get_int() == 65535);
5441   match(ConI);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 operand immL_255()
5449 %{
5450   predicate(n->get_long() == 255L);
5451   match(ConL);
5452 
5453   op_cost(0);
5454   format %{ %}
5455   interface(CONST_INTER);
5456 %}
5457 
5458 operand immL_65535()
5459 %{
5460   predicate(n->get_long() == 65535L);
5461   match(ConL);
5462 
5463   op_cost(0);
5464   format %{ %}
5465   interface(CONST_INTER);
5466 %}
5467 
5468 operand immL_4294967295()
5469 %{
5470   predicate(n->get_long() == 4294967295L);
5471   match(ConL);
5472 
5473   op_cost(0);
5474   format %{ %}
5475   interface(CONST_INTER);
5476 %}
5477 
5478 operand immL_bitmask()
5479 %{
5480   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5481             && is_power_of_2(n->get_long() + 1));
5482   match(ConL);
5483 
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 operand immI_bitmask()
5490 %{
5491   predicate(((n->get_int() & 0xc0000000) == 0)
5492             && is_power_of_2(n->get_int() + 1));
5493   match(ConI);
5494 
5495   op_cost(0);
5496   format %{ %}
5497   interface(CONST_INTER);
5498 %}
5499 
5500 // Scale values for scaled offset addressing modes (up to long but not quad)
5501 operand immIScale()
5502 %{
5503   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5504   match(ConI);
5505 
5506   op_cost(0);
5507   format %{ %}
5508   interface(CONST_INTER);
5509 %}
5510 
5511 // 26 bit signed offset -- for pc-relative branches
5512 operand immI26()
5513 %{
5514   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5515   match(ConI);
5516 
5517   op_cost(0);
5518   format %{ %}
5519   interface(CONST_INTER);
5520 %}
5521 
5522 // 19 bit signed offset -- for pc-relative loads
5523 operand immI19()
5524 %{
5525   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5526   match(ConI);
5527 
5528   op_cost(0);
5529   format %{ %}
5530   interface(CONST_INTER);
5531 %}
5532 
5533 // 12 bit unsigned offset -- for base plus immediate loads
5534 operand immIU12()
5535 %{
5536   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5537   match(ConI);
5538 
5539   op_cost(0);
5540   format %{ %}
5541   interface(CONST_INTER);
5542 %}
5543 
5544 operand immLU12()
5545 %{
5546   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5547   match(ConL);
5548 
5549   op_cost(0);
5550   format %{ %}
5551   interface(CONST_INTER);
5552 %}
5553 
5554 // Offset for scaled or unscaled immediate loads and stores
5555 operand immIOffset()
5556 %{
5557   predicate(Address::offset_ok_for_immed(n->get_int()));
5558   match(ConI);
5559 
5560   op_cost(0);
5561   format %{ %}
5562   interface(CONST_INTER);
5563 %}
5564 
5565 operand immIOffset4()
5566 %{
5567   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5568   match(ConI);
5569 
5570   op_cost(0);
5571   format %{ %}
5572   interface(CONST_INTER);
5573 %}
5574 
5575 operand immIOffset8()
5576 %{
5577   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5578   match(ConI);
5579 
5580   op_cost(0);
5581   format %{ %}
5582   interface(CONST_INTER);
5583 %}
5584 
5585 operand immIOffset16()
5586 %{
5587   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5588   match(ConI);
5589 
5590   op_cost(0);
5591   format %{ %}
5592   interface(CONST_INTER);
5593 %}
5594 
5595 operand immLoffset()
5596 %{
5597   predicate(Address::offset_ok_for_immed(n->get_long()));
5598   match(ConL);
5599 
5600   op_cost(0);
5601   format %{ %}
5602   interface(CONST_INTER);
5603 %}
5604 
5605 operand immLoffset4()
5606 %{
5607   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5608   match(ConL);
5609 
5610   op_cost(0);
5611   format %{ %}
5612   interface(CONST_INTER);
5613 %}
5614 
5615 operand immLoffset8()
5616 %{
5617   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5618   match(ConL);
5619 
5620   op_cost(0);
5621   format %{ %}
5622   interface(CONST_INTER);
5623 %}
5624 
5625 operand immLoffset16()
5626 %{
5627   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5628   match(ConL);
5629 
5630   op_cost(0);
5631   format %{ %}
5632   interface(CONST_INTER);
5633 %}
5634 
5635 // 32 bit integer valid for add sub immediate
5636 operand immIAddSub()
5637 %{
5638   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5639   match(ConI);
5640   op_cost(0);
5641   format %{ %}
5642   interface(CONST_INTER);
5643 %}
5644 
5645 // 32 bit unsigned integer valid for logical immediate
5646 // TODO -- check this is right when e.g the mask is 0x80000000
5647 operand immILog()
5648 %{
5649   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5650   match(ConI);
5651 
5652   op_cost(0);
5653   format %{ %}
5654   interface(CONST_INTER);
5655 %}
5656 
5657 // Integer operands 64 bit
5658 // 64 bit immediate
5659 operand immL()
5660 %{
5661   match(ConL);
5662 
5663   op_cost(0);
5664   format %{ %}
5665   interface(CONST_INTER);
5666 %}
5667 
5668 // 64 bit zero
5669 operand immL0()
5670 %{
5671   predicate(n->get_long() == 0);
5672   match(ConL);
5673 
5674   op_cost(0);
5675   format %{ %}
5676   interface(CONST_INTER);
5677 %}
5678 
5679 // 64 bit unit increment
5680 operand immL_1()
5681 %{
5682   predicate(n->get_long() == 1);
5683   match(ConL);
5684 
5685   op_cost(0);
5686   format %{ %}
5687   interface(CONST_INTER);
5688 %}
5689 
5690 // 64 bit unit decrement
5691 operand immL_M1()
5692 %{
5693   predicate(n->get_long() == -1);
5694   match(ConL);
5695 
5696   op_cost(0);
5697   format %{ %}
5698   interface(CONST_INTER);
5699 %}
5700 
5701 // 32 bit offset of pc in thread anchor
5702 
5703 operand immL_pc_off()
5704 %{
5705   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5706                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5707   match(ConL);
5708 
5709   op_cost(0);
5710   format %{ %}
5711   interface(CONST_INTER);
5712 %}
5713 
5714 // 64 bit integer valid for add sub immediate
5715 operand immLAddSub()
5716 %{
5717   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5718   match(ConL);
5719   op_cost(0);
5720   format %{ %}
5721   interface(CONST_INTER);
5722 %}
5723 
5724 // 64 bit integer valid for logical immediate
5725 operand immLLog()
5726 %{
5727   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5728   match(ConL);
5729   op_cost(0);
5730   format %{ %}
5731   interface(CONST_INTER);
5732 %}
5733 
5734 // Long Immediate: low 32-bit mask
5735 operand immL_32bits()
5736 %{
5737   predicate(n->get_long() == 0xFFFFFFFFL);
5738   match(ConL);
5739   op_cost(0);
5740   format %{ %}
5741   interface(CONST_INTER);
5742 %}
5743 
5744 // Pointer operands
5745 // Pointer Immediate
5746 operand immP()
5747 %{
5748   match(ConP);
5749 
5750   op_cost(0);
5751   format %{ %}
5752   interface(CONST_INTER);
5753 %}
5754 
5755 // NULL Pointer Immediate
5756 operand immP0()
5757 %{
5758   predicate(n->get_ptr() == 0);
5759   match(ConP);
5760 
5761   op_cost(0);
5762   format %{ %}
5763   interface(CONST_INTER);
5764 %}
5765 
5766 // Pointer Immediate One
5767 // this is used in object initialization (initial object header)
5768 operand immP_1()
5769 %{
5770   predicate(n->get_ptr() == 1);
5771   match(ConP);
5772 
5773   op_cost(0);
5774   format %{ %}
5775   interface(CONST_INTER);
5776 %}
5777 
5778 // Polling Page Pointer Immediate
5779 operand immPollPage()
5780 %{
5781   predicate((address)n->get_ptr() == os::get_polling_page());
5782   match(ConP);
5783 
5784   op_cost(0);
5785   format %{ %}
5786   interface(CONST_INTER);
5787 %}
5788 
5789 // Card Table Byte Map Base
5790 operand immByteMapBase()
5791 %{
5792   // Get base of card map
5793   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
5794             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
5795   match(ConP);
5796 
5797   op_cost(0);
5798   format %{ %}
5799   interface(CONST_INTER);
5800 %}
5801 
5802 // Pointer Immediate Minus One
5803 // this is used when we want to write the current PC to the thread anchor
5804 operand immP_M1()
5805 %{
5806   predicate(n->get_ptr() == -1);
5807   match(ConP);
5808 
5809   op_cost(0);
5810   format %{ %}
5811   interface(CONST_INTER);
5812 %}
5813 
5814 // Pointer Immediate Minus Two
5815 // this is used when we want to write the current PC to the thread anchor
5816 operand immP_M2()
5817 %{
5818   predicate(n->get_ptr() == -2);
5819   match(ConP);
5820 
5821   op_cost(0);
5822   format %{ %}
5823   interface(CONST_INTER);
5824 %}
5825 
5826 // Float and Double operands
5827 // Double Immediate
5828 operand immD()
5829 %{
5830   match(ConD);
5831   op_cost(0);
5832   format %{ %}
5833   interface(CONST_INTER);
5834 %}
5835 
5836 // Double Immediate: +0.0d
5837 operand immD0()
5838 %{
5839   predicate(jlong_cast(n->getd()) == 0);
5840   match(ConD);
5841 
5842   op_cost(0);
5843   format %{ %}
5844   interface(CONST_INTER);
5845 %}
5846 
5847 // constant 'double +0.0'.
5848 operand immDPacked()
5849 %{
5850   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5851   match(ConD);
5852   op_cost(0);
5853   format %{ %}
5854   interface(CONST_INTER);
5855 %}
5856 
5857 // Float Immediate
5858 operand immF()
5859 %{
5860   match(ConF);
5861   op_cost(0);
5862   format %{ %}
5863   interface(CONST_INTER);
5864 %}
5865 
5866 // Float Immediate: +0.0f.
5867 operand immF0()
5868 %{
5869   predicate(jint_cast(n->getf()) == 0);
5870   match(ConF);
5871 
5872   op_cost(0);
5873   format %{ %}
5874   interface(CONST_INTER);
5875 %}
5876 
5877 //
5878 operand immFPacked()
5879 %{
5880   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5881   match(ConF);
5882   op_cost(0);
5883   format %{ %}
5884   interface(CONST_INTER);
5885 %}
5886 
5887 // Narrow pointer operands
5888 // Narrow Pointer Immediate
5889 operand immN()
5890 %{
5891   match(ConN);
5892 
5893   op_cost(0);
5894   format %{ %}
5895   interface(CONST_INTER);
5896 %}
5897 
5898 // Narrow NULL Pointer Immediate
5899 operand immN0()
5900 %{
5901   predicate(n->get_narrowcon() == 0);
5902   match(ConN);
5903 
5904   op_cost(0);
5905   format %{ %}
5906   interface(CONST_INTER);
5907 %}
5908 
5909 operand immNKlass()
5910 %{
5911   match(ConNKlass);
5912 
5913   op_cost(0);
5914   format %{ %}
5915   interface(CONST_INTER);
5916 %}
5917 
5918 // Integer 32 bit Register Operands
5919 // Integer 32 bitRegister (excludes SP)
5920 operand iRegI()
5921 %{
5922   constraint(ALLOC_IN_RC(any_reg32));
5923   match(RegI);
5924   match(iRegINoSp);
5925   op_cost(0);
5926   format %{ %}
5927   interface(REG_INTER);
5928 %}
5929 
5930 // Integer 32 bit Register not Special
5931 operand iRegINoSp()
5932 %{
5933   constraint(ALLOC_IN_RC(no_special_reg32));
5934   match(RegI);
5935   op_cost(0);
5936   format %{ %}
5937   interface(REG_INTER);
5938 %}
5939 
5940 // Integer 64 bit Register Operands
5941 // Integer 64 bit Register (includes SP)
5942 operand iRegL()
5943 %{
5944   constraint(ALLOC_IN_RC(any_reg));
5945   match(RegL);
5946   match(iRegLNoSp);
5947   op_cost(0);
5948   format %{ %}
5949   interface(REG_INTER);
5950 %}
5951 
5952 // Integer 64 bit Register not Special
5953 operand iRegLNoSp()
5954 %{
5955   constraint(ALLOC_IN_RC(no_special_reg));
5956   match(RegL);
5957   match(iRegL_R0);
5958   format %{ %}
5959   interface(REG_INTER);
5960 %}
5961 
5962 // Pointer Register Operands
5963 // Pointer Register
5964 operand iRegP()
5965 %{
5966   constraint(ALLOC_IN_RC(ptr_reg));
5967   match(RegP);
5968   match(iRegPNoSp);
5969   match(iRegP_R0);
5970   //match(iRegP_R2);
5971   //match(iRegP_R4);
5972   //match(iRegP_R5);
5973   match(thread_RegP);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 // Pointer 64 bit Register not Special
5980 operand iRegPNoSp()
5981 %{
5982   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5983   match(RegP);
5984   // match(iRegP);
5985   // match(iRegP_R0);
5986   // match(iRegP_R2);
5987   // match(iRegP_R4);
5988   // match(iRegP_R5);
5989   // match(thread_RegP);
5990   op_cost(0);
5991   format %{ %}
5992   interface(REG_INTER);
5993 %}
5994 
5995 // Pointer 64 bit Register R0 only
5996 operand iRegP_R0()
5997 %{
5998   constraint(ALLOC_IN_RC(r0_reg));
5999   match(RegP);
6000   // match(iRegP);
6001   match(iRegPNoSp);
6002   op_cost(0);
6003   format %{ %}
6004   interface(REG_INTER);
6005 %}
6006 
6007 // Pointer 64 bit Register R1 only
6008 operand iRegP_R1()
6009 %{
6010   constraint(ALLOC_IN_RC(r1_reg));
6011   match(RegP);
6012   // match(iRegP);
6013   match(iRegPNoSp);
6014   op_cost(0);
6015   format %{ %}
6016   interface(REG_INTER);
6017 %}
6018 
6019 // Pointer 64 bit Register R2 only
6020 operand iRegP_R2()
6021 %{
6022   constraint(ALLOC_IN_RC(r2_reg));
6023   match(RegP);
6024   // match(iRegP);
6025   match(iRegPNoSp);
6026   op_cost(0);
6027   format %{ %}
6028   interface(REG_INTER);
6029 %}
6030 
6031 // Pointer 64 bit Register R3 only
6032 operand iRegP_R3()
6033 %{
6034   constraint(ALLOC_IN_RC(r3_reg));
6035   match(RegP);
6036   // match(iRegP);
6037   match(iRegPNoSp);
6038   op_cost(0);
6039   format %{ %}
6040   interface(REG_INTER);
6041 %}
6042 
6043 // Pointer 64 bit Register R4 only
6044 operand iRegP_R4()
6045 %{
6046   constraint(ALLOC_IN_RC(r4_reg));
6047   match(RegP);
6048   // match(iRegP);
6049   match(iRegPNoSp);
6050   op_cost(0);
6051   format %{ %}
6052   interface(REG_INTER);
6053 %}
6054 
6055 // Pointer 64 bit Register R5 only
6056 operand iRegP_R5()
6057 %{
6058   constraint(ALLOC_IN_RC(r5_reg));
6059   match(RegP);
6060   // match(iRegP);
6061   match(iRegPNoSp);
6062   op_cost(0);
6063   format %{ %}
6064   interface(REG_INTER);
6065 %}
6066 
6067 // Pointer 64 bit Register R10 only
6068 operand iRegP_R10()
6069 %{
6070   constraint(ALLOC_IN_RC(r10_reg));
6071   match(RegP);
6072   // match(iRegP);
6073   match(iRegPNoSp);
6074   op_cost(0);
6075   format %{ %}
6076   interface(REG_INTER);
6077 %}
6078 
6079 // Long 64 bit Register R0 only
6080 operand iRegL_R0()
6081 %{
6082   constraint(ALLOC_IN_RC(r0_reg));
6083   match(RegL);
6084   match(iRegLNoSp);
6085   op_cost(0);
6086   format %{ %}
6087   interface(REG_INTER);
6088 %}
6089 
6090 // Long 64 bit Register R2 only
6091 operand iRegL_R2()
6092 %{
6093   constraint(ALLOC_IN_RC(r2_reg));
6094   match(RegL);
6095   match(iRegLNoSp);
6096   op_cost(0);
6097   format %{ %}
6098   interface(REG_INTER);
6099 %}
6100 
6101 // Long 64 bit Register R3 only
6102 operand iRegL_R3()
6103 %{
6104   constraint(ALLOC_IN_RC(r3_reg));
6105   match(RegL);
6106   match(iRegLNoSp);
6107   op_cost(0);
6108   format %{ %}
6109   interface(REG_INTER);
6110 %}
6111 
6112 // Long 64 bit Register R11 only
6113 operand iRegL_R11()
6114 %{
6115   constraint(ALLOC_IN_RC(r11_reg));
6116   match(RegL);
6117   match(iRegLNoSp);
6118   op_cost(0);
6119   format %{ %}
6120   interface(REG_INTER);
6121 %}
6122 
6123 // Pointer 64 bit Register FP only
6124 operand iRegP_FP()
6125 %{
6126   constraint(ALLOC_IN_RC(fp_reg));
6127   match(RegP);
6128   // match(iRegP);
6129   op_cost(0);
6130   format %{ %}
6131   interface(REG_INTER);
6132 %}
6133 
6134 // Register R0 only
6135 operand iRegI_R0()
6136 %{
6137   constraint(ALLOC_IN_RC(int_r0_reg));
6138   match(RegI);
6139   match(iRegINoSp);
6140   op_cost(0);
6141   format %{ %}
6142   interface(REG_INTER);
6143 %}
6144 
6145 // Register R2 only
6146 operand iRegI_R2()
6147 %{
6148   constraint(ALLOC_IN_RC(int_r2_reg));
6149   match(RegI);
6150   match(iRegINoSp);
6151   op_cost(0);
6152   format %{ %}
6153   interface(REG_INTER);
6154 %}
6155 
6156 // Register R3 only
6157 operand iRegI_R3()
6158 %{
6159   constraint(ALLOC_IN_RC(int_r3_reg));
6160   match(RegI);
6161   match(iRegINoSp);
6162   op_cost(0);
6163   format %{ %}
6164   interface(REG_INTER);
6165 %}
6166 
6167 
6168 // Register R4 only
6169 operand iRegI_R4()
6170 %{
6171   constraint(ALLOC_IN_RC(int_r4_reg));
6172   match(RegI);
6173   match(iRegINoSp);
6174   op_cost(0);
6175   format %{ %}
6176   interface(REG_INTER);
6177 %}
6178 
6179 
6180 // Pointer Register Operands
6181 // Narrow Pointer Register
6182 operand iRegN()
6183 %{
6184   constraint(ALLOC_IN_RC(any_reg32));
6185   match(RegN);
6186   match(iRegNNoSp);
6187   op_cost(0);
6188   format %{ %}
6189   interface(REG_INTER);
6190 %}
6191 
6192 operand iRegN_R0()
6193 %{
6194   constraint(ALLOC_IN_RC(r0_reg));
6195   match(iRegN);
6196   op_cost(0);
6197   format %{ %}
6198   interface(REG_INTER);
6199 %}
6200 
6201 operand iRegN_R2()
6202 %{
6203   constraint(ALLOC_IN_RC(r2_reg));
6204   match(iRegN);
6205   op_cost(0);
6206   format %{ %}
6207   interface(REG_INTER);
6208 %}
6209 
6210 operand iRegN_R3()
6211 %{
6212   constraint(ALLOC_IN_RC(r3_reg));
6213   match(iRegN);
6214   op_cost(0);
6215   format %{ %}
6216   interface(REG_INTER);
6217 %}
6218 
6219 // Integer 64 bit Register not Special
6220 operand iRegNNoSp()
6221 %{
6222   constraint(ALLOC_IN_RC(no_special_reg32));
6223   match(RegN);
6224   op_cost(0);
6225   format %{ %}
6226   interface(REG_INTER);
6227 %}
6228 
6229 // heap base register -- used for encoding immN0
6230 
6231 operand iRegIHeapbase()
6232 %{
6233   constraint(ALLOC_IN_RC(heapbase_reg));
6234   match(RegI);
6235   op_cost(0);
6236   format %{ %}
6237   interface(REG_INTER);
6238 %}
6239 
6240 // Float Register
6241 // Float register operands
6242 operand vRegF()
6243 %{
6244   constraint(ALLOC_IN_RC(float_reg));
6245   match(RegF);
6246 
6247   op_cost(0);
6248   format %{ %}
6249   interface(REG_INTER);
6250 %}
6251 
6252 // Double Register
6253 // Double register operands
6254 operand vRegD()
6255 %{
6256   constraint(ALLOC_IN_RC(double_reg));
6257   match(RegD);
6258 
6259   op_cost(0);
6260   format %{ %}
6261   interface(REG_INTER);
6262 %}
6263 
6264 operand vecD()
6265 %{
6266   constraint(ALLOC_IN_RC(vectord_reg));
6267   match(VecD);
6268 
6269   op_cost(0);
6270   format %{ %}
6271   interface(REG_INTER);
6272 %}
6273 
6274 operand vecX()
6275 %{
6276   constraint(ALLOC_IN_RC(vectorx_reg));
6277   match(VecX);
6278 
6279   op_cost(0);
6280   format %{ %}
6281   interface(REG_INTER);
6282 %}
6283 
6284 operand vRegD_V0()
6285 %{
6286   constraint(ALLOC_IN_RC(v0_reg));
6287   match(RegD);
6288   op_cost(0);
6289   format %{ %}
6290   interface(REG_INTER);
6291 %}
6292 
6293 operand vRegD_V1()
6294 %{
6295   constraint(ALLOC_IN_RC(v1_reg));
6296   match(RegD);
6297   op_cost(0);
6298   format %{ %}
6299   interface(REG_INTER);
6300 %}
6301 
6302 operand vRegD_V2()
6303 %{
6304   constraint(ALLOC_IN_RC(v2_reg));
6305   match(RegD);
6306   op_cost(0);
6307   format %{ %}
6308   interface(REG_INTER);
6309 %}
6310 
6311 operand vRegD_V3()
6312 %{
6313   constraint(ALLOC_IN_RC(v3_reg));
6314   match(RegD);
6315   op_cost(0);
6316   format %{ %}
6317   interface(REG_INTER);
6318 %}
6319 
6320 // Flags register, used as output of signed compare instructions
6321 
6322 // note that on AArch64 we also use this register as the output for
6323 // for floating point compare instructions (CmpF CmpD). this ensures
6324 // that ordered inequality tests use GT, GE, LT or LE none of which
6325 // pass through cases where the result is unordered i.e. one or both
6326 // inputs to the compare is a NaN. this means that the ideal code can
6327 // replace e.g. a GT with an LE and not end up capturing the NaN case
6328 // (where the comparison should always fail). EQ and NE tests are
6329 // always generated in ideal code so that unordered folds into the NE
6330 // case, matching the behaviour of AArch64 NE.
6331 //
6332 // This differs from x86 where the outputs of FP compares use a
6333 // special FP flags registers and where compares based on this
6334 // register are distinguished into ordered inequalities (cmpOpUCF) and
6335 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6336 // to explicitly handle the unordered case in branches. x86 also has
6337 // to include extra CMoveX rules to accept a cmpOpUCF input.
6338 
6339 operand rFlagsReg()
6340 %{
6341   constraint(ALLOC_IN_RC(int_flags));
6342   match(RegFlags);
6343 
6344   op_cost(0);
6345   format %{ "RFLAGS" %}
6346   interface(REG_INTER);
6347 %}
6348 
6349 // Flags register, used as output of unsigned compare instructions
6350 operand rFlagsRegU()
6351 %{
6352   constraint(ALLOC_IN_RC(int_flags));
6353   match(RegFlags);
6354 
6355   op_cost(0);
6356   format %{ "RFLAGSU" %}
6357   interface(REG_INTER);
6358 %}
6359 
6360 // Special Registers
6361 
6362 // Method Register
6363 operand inline_cache_RegP(iRegP reg)
6364 %{
6365   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6366   match(reg);
6367   match(iRegPNoSp);
6368   op_cost(0);
6369   format %{ %}
6370   interface(REG_INTER);
6371 %}
6372 
6373 operand interpreter_method_oop_RegP(iRegP reg)
6374 %{
6375   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6376   match(reg);
6377   match(iRegPNoSp);
6378   op_cost(0);
6379   format %{ %}
6380   interface(REG_INTER);
6381 %}
6382 
6383 // Thread Register
6384 operand thread_RegP(iRegP reg)
6385 %{
6386   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6387   match(reg);
6388   op_cost(0);
6389   format %{ %}
6390   interface(REG_INTER);
6391 %}
6392 
6393 operand lr_RegP(iRegP reg)
6394 %{
6395   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6396   match(reg);
6397   op_cost(0);
6398   format %{ %}
6399   interface(REG_INTER);
6400 %}
6401 
6402 //----------Memory Operands----------------------------------------------------
6403 
6404 operand indirect(iRegP reg)
6405 %{
6406   constraint(ALLOC_IN_RC(ptr_reg));
6407   match(reg);
6408   op_cost(0);
6409   format %{ "[$reg]" %}
6410   interface(MEMORY_INTER) %{
6411     base($reg);
6412     index(0xffffffff);
6413     scale(0x0);
6414     disp(0x0);
6415   %}
6416 %}
6417 
6418 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6419 %{
6420   constraint(ALLOC_IN_RC(ptr_reg));
6421   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6422   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6423   op_cost(0);
6424   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6425   interface(MEMORY_INTER) %{
6426     base($reg);
6427     index($ireg);
6428     scale($scale);
6429     disp(0x0);
6430   %}
6431 %}
6432 
6433 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6434 %{
6435   constraint(ALLOC_IN_RC(ptr_reg));
6436   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6437   match(AddP reg (LShiftL lreg scale));
6438   op_cost(0);
6439   format %{ "$reg, $lreg lsl($scale)" %}
6440   interface(MEMORY_INTER) %{
6441     base($reg);
6442     index($lreg);
6443     scale($scale);
6444     disp(0x0);
6445   %}
6446 %}
6447 
6448 operand indIndexI2L(iRegP reg, iRegI ireg)
6449 %{
6450   constraint(ALLOC_IN_RC(ptr_reg));
6451   match(AddP reg (ConvI2L ireg));
6452   op_cost(0);
6453   format %{ "$reg, $ireg, 0, I2L" %}
6454   interface(MEMORY_INTER) %{
6455     base($reg);
6456     index($ireg);
6457     scale(0x0);
6458     disp(0x0);
6459   %}
6460 %}
6461 
6462 operand indIndex(iRegP reg, iRegL lreg)
6463 %{
6464   constraint(ALLOC_IN_RC(ptr_reg));
6465   match(AddP reg lreg);
6466   op_cost(0);
6467   format %{ "$reg, $lreg" %}
6468   interface(MEMORY_INTER) %{
6469     base($reg);
6470     index($lreg);
6471     scale(0x0);
6472     disp(0x0);
6473   %}
6474 %}
6475 
6476 operand indOffI(iRegP reg, immIOffset off)
6477 %{
6478   constraint(ALLOC_IN_RC(ptr_reg));
6479   match(AddP reg off);
6480   op_cost(0);
6481   format %{ "[$reg, $off]" %}
6482   interface(MEMORY_INTER) %{
6483     base($reg);
6484     index(0xffffffff);
6485     scale(0x0);
6486     disp($off);
6487   %}
6488 %}
6489 
6490 operand indOffI4(iRegP reg, immIOffset4 off)
6491 %{
6492   constraint(ALLOC_IN_RC(ptr_reg));
6493   match(AddP reg off);
6494   op_cost(0);
6495   format %{ "[$reg, $off]" %}
6496   interface(MEMORY_INTER) %{
6497     base($reg);
6498     index(0xffffffff);
6499     scale(0x0);
6500     disp($off);
6501   %}
6502 %}
6503 
6504 operand indOffI8(iRegP reg, immIOffset8 off)
6505 %{
6506   constraint(ALLOC_IN_RC(ptr_reg));
6507   match(AddP reg off);
6508   op_cost(0);
6509   format %{ "[$reg, $off]" %}
6510   interface(MEMORY_INTER) %{
6511     base($reg);
6512     index(0xffffffff);
6513     scale(0x0);
6514     disp($off);
6515   %}
6516 %}
6517 
6518 operand indOffI16(iRegP reg, immIOffset16 off)
6519 %{
6520   constraint(ALLOC_IN_RC(ptr_reg));
6521   match(AddP reg off);
6522   op_cost(0);
6523   format %{ "[$reg, $off]" %}
6524   interface(MEMORY_INTER) %{
6525     base($reg);
6526     index(0xffffffff);
6527     scale(0x0);
6528     disp($off);
6529   %}
6530 %}
6531 
6532 operand indOffL(iRegP reg, immLoffset off)
6533 %{
6534   constraint(ALLOC_IN_RC(ptr_reg));
6535   match(AddP reg off);
6536   op_cost(0);
6537   format %{ "[$reg, $off]" %}
6538   interface(MEMORY_INTER) %{
6539     base($reg);
6540     index(0xffffffff);
6541     scale(0x0);
6542     disp($off);
6543   %}
6544 %}
6545 
6546 operand indOffL4(iRegP reg, immLoffset4 off)
6547 %{
6548   constraint(ALLOC_IN_RC(ptr_reg));
6549   match(AddP reg off);
6550   op_cost(0);
6551   format %{ "[$reg, $off]" %}
6552   interface(MEMORY_INTER) %{
6553     base($reg);
6554     index(0xffffffff);
6555     scale(0x0);
6556     disp($off);
6557   %}
6558 %}
6559 
6560 operand indOffL8(iRegP reg, immLoffset8 off)
6561 %{
6562   constraint(ALLOC_IN_RC(ptr_reg));
6563   match(AddP reg off);
6564   op_cost(0);
6565   format %{ "[$reg, $off]" %}
6566   interface(MEMORY_INTER) %{
6567     base($reg);
6568     index(0xffffffff);
6569     scale(0x0);
6570     disp($off);
6571   %}
6572 %}
6573 
6574 operand indOffL16(iRegP reg, immLoffset16 off)
6575 %{
6576   constraint(ALLOC_IN_RC(ptr_reg));
6577   match(AddP reg off);
6578   op_cost(0);
6579   format %{ "[$reg, $off]" %}
6580   interface(MEMORY_INTER) %{
6581     base($reg);
6582     index(0xffffffff);
6583     scale(0x0);
6584     disp($off);
6585   %}
6586 %}
6587 
6588 operand indirectN(iRegN reg)
6589 %{
6590   predicate(Universe::narrow_oop_shift() == 0);
6591   constraint(ALLOC_IN_RC(ptr_reg));
6592   match(DecodeN reg);
6593   op_cost(0);
6594   format %{ "[$reg]\t# narrow" %}
6595   interface(MEMORY_INTER) %{
6596     base($reg);
6597     index(0xffffffff);
6598     scale(0x0);
6599     disp(0x0);
6600   %}
6601 %}
6602 
6603 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6604 %{
6605   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6606   constraint(ALLOC_IN_RC(ptr_reg));
6607   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6608   op_cost(0);
6609   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6610   interface(MEMORY_INTER) %{
6611     base($reg);
6612     index($ireg);
6613     scale($scale);
6614     disp(0x0);
6615   %}
6616 %}
6617 
6618 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6619 %{
6620   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6621   constraint(ALLOC_IN_RC(ptr_reg));
6622   match(AddP (DecodeN reg) (LShiftL lreg scale));
6623   op_cost(0);
6624   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6625   interface(MEMORY_INTER) %{
6626     base($reg);
6627     index($lreg);
6628     scale($scale);
6629     disp(0x0);
6630   %}
6631 %}
6632 
6633 operand indIndexI2LN(iRegN reg, iRegI ireg)
6634 %{
6635   predicate(Universe::narrow_oop_shift() == 0);
6636   constraint(ALLOC_IN_RC(ptr_reg));
6637   match(AddP (DecodeN reg) (ConvI2L ireg));
6638   op_cost(0);
6639   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6640   interface(MEMORY_INTER) %{
6641     base($reg);
6642     index($ireg);
6643     scale(0x0);
6644     disp(0x0);
6645   %}
6646 %}
6647 
6648 operand indIndexN(iRegN reg, iRegL lreg)
6649 %{
6650   predicate(Universe::narrow_oop_shift() == 0);
6651   constraint(ALLOC_IN_RC(ptr_reg));
6652   match(AddP (DecodeN reg) lreg);
6653   op_cost(0);
6654   format %{ "$reg, $lreg\t# narrow" %}
6655   interface(MEMORY_INTER) %{
6656     base($reg);
6657     index($lreg);
6658     scale(0x0);
6659     disp(0x0);
6660   %}
6661 %}
6662 
6663 operand indOffIN(iRegN reg, immIOffset off)
6664 %{
6665   predicate(Universe::narrow_oop_shift() == 0);
6666   constraint(ALLOC_IN_RC(ptr_reg));
6667   match(AddP (DecodeN reg) off);
6668   op_cost(0);
6669   format %{ "[$reg, $off]\t# narrow" %}
6670   interface(MEMORY_INTER) %{
6671     base($reg);
6672     index(0xffffffff);
6673     scale(0x0);
6674     disp($off);
6675   %}
6676 %}
6677 
6678 operand indOffLN(iRegN reg, immLoffset off)
6679 %{
6680   predicate(Universe::narrow_oop_shift() == 0);
6681   constraint(ALLOC_IN_RC(ptr_reg));
6682   match(AddP (DecodeN reg) off);
6683   op_cost(0);
6684   format %{ "[$reg, $off]\t# narrow" %}
6685   interface(MEMORY_INTER) %{
6686     base($reg);
6687     index(0xffffffff);
6688     scale(0x0);
6689     disp($off);
6690   %}
6691 %}
6692 
6693 
6694 
6695 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6696 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6697 %{
6698   constraint(ALLOC_IN_RC(ptr_reg));
6699   match(AddP reg off);
6700   op_cost(0);
6701   format %{ "[$reg, $off]" %}
6702   interface(MEMORY_INTER) %{
6703     base($reg);
6704     index(0xffffffff);
6705     scale(0x0);
6706     disp($off);
6707   %}
6708 %}
6709 
6710 //----------Special Memory Operands--------------------------------------------
6711 // Stack Slot Operand - This operand is used for loading and storing temporary
6712 //                      values on the stack where a match requires a value to
6713 //                      flow through memory.
6714 operand stackSlotP(sRegP reg)
6715 %{
6716   constraint(ALLOC_IN_RC(stack_slots));
6717   op_cost(100);
6718   // No match rule because this operand is only generated in matching
6719   // match(RegP);
6720   format %{ "[$reg]" %}
6721   interface(MEMORY_INTER) %{
6722     base(0x1e);  // RSP
6723     index(0x0);  // No Index
6724     scale(0x0);  // No Scale
6725     disp($reg);  // Stack Offset
6726   %}
6727 %}
6728 
6729 operand stackSlotI(sRegI reg)
6730 %{
6731   constraint(ALLOC_IN_RC(stack_slots));
6732   // No match rule because this operand is only generated in matching
6733   // match(RegI);
6734   format %{ "[$reg]" %}
6735   interface(MEMORY_INTER) %{
6736     base(0x1e);  // RSP
6737     index(0x0);  // No Index
6738     scale(0x0);  // No Scale
6739     disp($reg);  // Stack Offset
6740   %}
6741 %}
6742 
6743 operand stackSlotF(sRegF reg)
6744 %{
6745   constraint(ALLOC_IN_RC(stack_slots));
6746   // No match rule because this operand is only generated in matching
6747   // match(RegF);
6748   format %{ "[$reg]" %}
6749   interface(MEMORY_INTER) %{
6750     base(0x1e);  // RSP
6751     index(0x0);  // No Index
6752     scale(0x0);  // No Scale
6753     disp($reg);  // Stack Offset
6754   %}
6755 %}
6756 
6757 operand stackSlotD(sRegD reg)
6758 %{
6759   constraint(ALLOC_IN_RC(stack_slots));
6760   // No match rule because this operand is only generated in matching
6761   // match(RegD);
6762   format %{ "[$reg]" %}
6763   interface(MEMORY_INTER) %{
6764     base(0x1e);  // RSP
6765     index(0x0);  // No Index
6766     scale(0x0);  // No Scale
6767     disp($reg);  // Stack Offset
6768   %}
6769 %}
6770 
6771 operand stackSlotL(sRegL reg)
6772 %{
6773   constraint(ALLOC_IN_RC(stack_slots));
6774   // No match rule because this operand is only generated in matching
6775   // match(RegL);
6776   format %{ "[$reg]" %}
6777   interface(MEMORY_INTER) %{
6778     base(0x1e);  // RSP
6779     index(0x0);  // No Index
6780     scale(0x0);  // No Scale
6781     disp($reg);  // Stack Offset
6782   %}
6783 %}
6784 
6785 // Operands for expressing Control Flow
6786 // NOTE: Label is a predefined operand which should not be redefined in
6787 //       the AD file. It is generically handled within the ADLC.
6788 
6789 //----------Conditional Branch Operands----------------------------------------
6790 // Comparison Op  - This is the operation of the comparison, and is limited to
6791 //                  the following set of codes:
6792 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6793 //
6794 // Other attributes of the comparison, such as unsignedness, are specified
6795 // by the comparison instruction that sets a condition code flags register.
6796 // That result is represented by a flags operand whose subtype is appropriate
6797 // to the unsignedness (etc.) of the comparison.
6798 //
6799 // Later, the instruction which matches both the Comparison Op (a Bool) and
6800 // the flags (produced by the Cmp) specifies the coding of the comparison op
6801 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6802 
6803 // used for signed integral comparisons and fp comparisons
6804 
6805 operand cmpOp()
6806 %{
6807   match(Bool);
6808 
6809   format %{ "" %}
6810   interface(COND_INTER) %{
6811     equal(0x0, "eq");
6812     not_equal(0x1, "ne");
6813     less(0xb, "lt");
6814     greater_equal(0xa, "ge");
6815     less_equal(0xd, "le");
6816     greater(0xc, "gt");
6817     overflow(0x6, "vs");
6818     no_overflow(0x7, "vc");
6819   %}
6820 %}
6821 
6822 // used for unsigned integral comparisons
6823 
6824 operand cmpOpU()
6825 %{
6826   match(Bool);
6827 
6828   format %{ "" %}
6829   interface(COND_INTER) %{
6830     equal(0x0, "eq");
6831     not_equal(0x1, "ne");
6832     less(0x3, "lo");
6833     greater_equal(0x2, "hs");
6834     less_equal(0x9, "ls");
6835     greater(0x8, "hi");
6836     overflow(0x6, "vs");
6837     no_overflow(0x7, "vc");
6838   %}
6839 %}
6840 
6841 // used for certain integral comparisons which can be
6842 // converted to cbxx or tbxx instructions
6843 
6844 operand cmpOpEqNe()
6845 %{
6846   match(Bool);
6847   match(CmpOp);
6848   op_cost(0);
6849   predicate(n->as_Bool()->_test._test == BoolTest::ne
6850             || n->as_Bool()->_test._test == BoolTest::eq);
6851 
6852   format %{ "" %}
6853   interface(COND_INTER) %{
6854     equal(0x0, "eq");
6855     not_equal(0x1, "ne");
6856     less(0xb, "lt");
6857     greater_equal(0xa, "ge");
6858     less_equal(0xd, "le");
6859     greater(0xc, "gt");
6860     overflow(0x6, "vs");
6861     no_overflow(0x7, "vc");
6862   %}
6863 %}
6864 
6865 // used for certain integral comparisons which can be
6866 // converted to cbxx or tbxx instructions
6867 
6868 operand cmpOpLtGe()
6869 %{
6870   match(Bool);
6871   match(CmpOp);
6872   op_cost(0);
6873 
6874   predicate(n->as_Bool()->_test._test == BoolTest::lt
6875             || n->as_Bool()->_test._test == BoolTest::ge);
6876 
6877   format %{ "" %}
6878   interface(COND_INTER) %{
6879     equal(0x0, "eq");
6880     not_equal(0x1, "ne");
6881     less(0xb, "lt");
6882     greater_equal(0xa, "ge");
6883     less_equal(0xd, "le");
6884     greater(0xc, "gt");
6885     overflow(0x6, "vs");
6886     no_overflow(0x7, "vc");
6887   %}
6888 %}
6889 
6890 // used for certain unsigned integral comparisons which can be
6891 // converted to cbxx or tbxx instructions
6892 
6893 operand cmpOpUEqNeLtGe()
6894 %{
6895   match(Bool);
6896   match(CmpOp);
6897   op_cost(0);
6898 
6899   predicate(n->as_Bool()->_test._test == BoolTest::eq
6900             || n->as_Bool()->_test._test == BoolTest::ne
6901             || n->as_Bool()->_test._test == BoolTest::lt
6902             || n->as_Bool()->_test._test == BoolTest::ge);
6903 
6904   format %{ "" %}
6905   interface(COND_INTER) %{
6906     equal(0x0, "eq");
6907     not_equal(0x1, "ne");
6908     less(0xb, "lt");
6909     greater_equal(0xa, "ge");
6910     less_equal(0xd, "le");
6911     greater(0xc, "gt");
6912     overflow(0x6, "vs");
6913     no_overflow(0x7, "vc");
6914   %}
6915 %}
6916 
6917 // Special operand allowing long args to int ops to be truncated for free
6918 
6919 operand iRegL2I(iRegL reg) %{
6920 
6921   op_cost(0);
6922 
6923   match(ConvL2I reg);
6924 
6925   format %{ "l2i($reg)" %}
6926 
6927   interface(REG_INTER)
6928 %}
6929 
6930 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6931 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6932 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6933 
6934 //----------OPERAND CLASSES----------------------------------------------------
6935 // Operand Classes are groups of operands that are used as to simplify
6936 // instruction definitions by not requiring the AD writer to specify
6937 // separate instructions for every form of operand when the
6938 // instruction accepts multiple operand types with the same basic
6939 // encoding and format. The classic case of this is memory operands.
6940 
6941 // memory is used to define read/write location for load/store
6942 // instruction defs. we can turn a memory op into an Address
6943 
6944 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6945                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6946 
6947 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6948 // operations. it allows the src to be either an iRegI or a (ConvL2I
6949 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6950 // can be elided because the 32-bit instruction will just employ the
6951 // lower 32 bits anyway.
6952 //
6953 // n.b. this does not elide all L2I conversions. if the truncated
6954 // value is consumed by more than one operation then the ConvL2I
6955 // cannot be bundled into the consuming nodes so an l2i gets planted
6956 // (actually a movw $dst $src) and the downstream instructions consume
6957 // the result of the l2i as an iRegI input. That's a shame since the
6958 // movw is actually redundant but its not too costly.
6959 
6960 opclass iRegIorL2I(iRegI, iRegL2I);
6961 
6962 //----------PIPELINE-----------------------------------------------------------
6963 // Rules which define the behavior of the target architectures pipeline.
6964 
6965 // For specific pipelines, eg A53, define the stages of that pipeline
6966 //pipe_desc(ISS, EX1, EX2, WR);
6967 #define ISS S0
6968 #define EX1 S1
6969 #define EX2 S2
6970 #define WR  S3
6971 
6972 // Integer ALU reg operation
6973 pipeline %{
6974 
6975 attributes %{
6976   // ARM instructions are of fixed length
6977   fixed_size_instructions;        // Fixed size instructions TODO does
6978   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6979   // ARM instructions come in 32-bit word units
6980   instruction_unit_size = 4;         // An instruction is 4 bytes long
6981   instruction_fetch_unit_size = 64;  // The processor fetches one line
6982   instruction_fetch_units = 1;       // of 64 bytes
6983 
6984   // List of nop instructions
6985   nops( MachNop );
6986 %}
6987 
6988 // We don't use an actual pipeline model so don't care about resources
6989 // or description. we do use pipeline classes to introduce fixed
6990 // latencies
6991 
6992 //----------RESOURCES----------------------------------------------------------
6993 // Resources are the functional units available to the machine
6994 
6995 resources( INS0, INS1, INS01 = INS0 | INS1,
6996            ALU0, ALU1, ALU = ALU0 | ALU1,
6997            MAC,
6998            DIV,
6999            BRANCH,
7000            LDST,
7001            NEON_FP);
7002 
7003 //----------PIPELINE DESCRIPTION-----------------------------------------------
7004 // Pipeline Description specifies the stages in the machine's pipeline
7005 
7006 // Define the pipeline as a generic 6 stage pipeline
7007 pipe_desc(S0, S1, S2, S3, S4, S5);
7008 
7009 //----------PIPELINE CLASSES---------------------------------------------------
7010 // Pipeline Classes describe the stages in which input and output are
7011 // referenced by the hardware pipeline.
7012 
7013 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
7014 %{
7015   single_instruction;
7016   src1   : S1(read);
7017   src2   : S2(read);
7018   dst    : S5(write);
7019   INS01  : ISS;
7020   NEON_FP : S5;
7021 %}
7022 
7023 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
7024 %{
7025   single_instruction;
7026   src1   : S1(read);
7027   src2   : S2(read);
7028   dst    : S5(write);
7029   INS01  : ISS;
7030   NEON_FP : S5;
7031 %}
7032 
7033 pipe_class fp_uop_s(vRegF dst, vRegF src)
7034 %{
7035   single_instruction;
7036   src    : S1(read);
7037   dst    : S5(write);
7038   INS01  : ISS;
7039   NEON_FP : S5;
7040 %}
7041 
7042 pipe_class fp_uop_d(vRegD dst, vRegD src)
7043 %{
7044   single_instruction;
7045   src    : S1(read);
7046   dst    : S5(write);
7047   INS01  : ISS;
7048   NEON_FP : S5;
7049 %}
7050 
7051 pipe_class fp_d2f(vRegF dst, vRegD src)
7052 %{
7053   single_instruction;
7054   src    : S1(read);
7055   dst    : S5(write);
7056   INS01  : ISS;
7057   NEON_FP : S5;
7058 %}
7059 
7060 pipe_class fp_f2d(vRegD dst, vRegF src)
7061 %{
7062   single_instruction;
7063   src    : S1(read);
7064   dst    : S5(write);
7065   INS01  : ISS;
7066   NEON_FP : S5;
7067 %}
7068 
7069 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7070 %{
7071   single_instruction;
7072   src    : S1(read);
7073   dst    : S5(write);
7074   INS01  : ISS;
7075   NEON_FP : S5;
7076 %}
7077 
7078 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7079 %{
7080   single_instruction;
7081   src    : S1(read);
7082   dst    : S5(write);
7083   INS01  : ISS;
7084   NEON_FP : S5;
7085 %}
7086 
7087 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7088 %{
7089   single_instruction;
7090   src    : S1(read);
7091   dst    : S5(write);
7092   INS01  : ISS;
7093   NEON_FP : S5;
7094 %}
7095 
7096 pipe_class fp_l2f(vRegF dst, iRegL src)
7097 %{
7098   single_instruction;
7099   src    : S1(read);
7100   dst    : S5(write);
7101   INS01  : ISS;
7102   NEON_FP : S5;
7103 %}
7104 
7105 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7106 %{
7107   single_instruction;
7108   src    : S1(read);
7109   dst    : S5(write);
7110   INS01  : ISS;
7111   NEON_FP : S5;
7112 %}
7113 
7114 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7115 %{
7116   single_instruction;
7117   src    : S1(read);
7118   dst    : S5(write);
7119   INS01  : ISS;
7120   NEON_FP : S5;
7121 %}
7122 
7123 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7124 %{
7125   single_instruction;
7126   src    : S1(read);
7127   dst    : S5(write);
7128   INS01  : ISS;
7129   NEON_FP : S5;
7130 %}
7131 
7132 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7133 %{
7134   single_instruction;
7135   src    : S1(read);
7136   dst    : S5(write);
7137   INS01  : ISS;
7138   NEON_FP : S5;
7139 %}
7140 
7141 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7142 %{
7143   single_instruction;
7144   src1   : S1(read);
7145   src2   : S2(read);
7146   dst    : S5(write);
7147   INS0   : ISS;
7148   NEON_FP : S5;
7149 %}
7150 
7151 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7152 %{
7153   single_instruction;
7154   src1   : S1(read);
7155   src2   : S2(read);
7156   dst    : S5(write);
7157   INS0   : ISS;
7158   NEON_FP : S5;
7159 %}
7160 
7161 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7162 %{
7163   single_instruction;
7164   cr     : S1(read);
7165   src1   : S1(read);
7166   src2   : S1(read);
7167   dst    : S3(write);
7168   INS01  : ISS;
7169   NEON_FP : S3;
7170 %}
7171 
7172 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7173 %{
7174   single_instruction;
7175   cr     : S1(read);
7176   src1   : S1(read);
7177   src2   : S1(read);
7178   dst    : S3(write);
7179   INS01  : ISS;
7180   NEON_FP : S3;
7181 %}
7182 
7183 pipe_class fp_imm_s(vRegF dst)
7184 %{
7185   single_instruction;
7186   dst    : S3(write);
7187   INS01  : ISS;
7188   NEON_FP : S3;
7189 %}
7190 
7191 pipe_class fp_imm_d(vRegD dst)
7192 %{
7193   single_instruction;
7194   dst    : S3(write);
7195   INS01  : ISS;
7196   NEON_FP : S3;
7197 %}
7198 
7199 pipe_class fp_load_constant_s(vRegF dst)
7200 %{
7201   single_instruction;
7202   dst    : S4(write);
7203   INS01  : ISS;
7204   NEON_FP : S4;
7205 %}
7206 
7207 pipe_class fp_load_constant_d(vRegD dst)
7208 %{
7209   single_instruction;
7210   dst    : S4(write);
7211   INS01  : ISS;
7212   NEON_FP : S4;
7213 %}
7214 
7215 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7216 %{
7217   single_instruction;
7218   dst    : S5(write);
7219   src1   : S1(read);
7220   src2   : S1(read);
7221   INS01  : ISS;
7222   NEON_FP : S5;
7223 %}
7224 
7225 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7226 %{
7227   single_instruction;
7228   dst    : S5(write);
7229   src1   : S1(read);
7230   src2   : S1(read);
7231   INS0   : ISS;
7232   NEON_FP : S5;
7233 %}
7234 
7235 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7236 %{
7237   single_instruction;
7238   dst    : S5(write);
7239   src1   : S1(read);
7240   src2   : S1(read);
7241   dst    : S1(read);
7242   INS01  : ISS;
7243   NEON_FP : S5;
7244 %}
7245 
7246 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7247 %{
7248   single_instruction;
7249   dst    : S5(write);
7250   src1   : S1(read);
7251   src2   : S1(read);
7252   dst    : S1(read);
7253   INS0   : ISS;
7254   NEON_FP : S5;
7255 %}
7256 
7257 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7258 %{
7259   single_instruction;
7260   dst    : S4(write);
7261   src1   : S2(read);
7262   src2   : S2(read);
7263   INS01  : ISS;
7264   NEON_FP : S4;
7265 %}
7266 
7267 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7268 %{
7269   single_instruction;
7270   dst    : S4(write);
7271   src1   : S2(read);
7272   src2   : S2(read);
7273   INS0   : ISS;
7274   NEON_FP : S4;
7275 %}
7276 
7277 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7278 %{
7279   single_instruction;
7280   dst    : S3(write);
7281   src1   : S2(read);
7282   src2   : S2(read);
7283   INS01  : ISS;
7284   NEON_FP : S3;
7285 %}
7286 
7287 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7288 %{
7289   single_instruction;
7290   dst    : S3(write);
7291   src1   : S2(read);
7292   src2   : S2(read);
7293   INS0   : ISS;
7294   NEON_FP : S3;
7295 %}
7296 
7297 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7298 %{
7299   single_instruction;
7300   dst    : S3(write);
7301   src    : S1(read);
7302   shift  : S1(read);
7303   INS01  : ISS;
7304   NEON_FP : S3;
7305 %}
7306 
7307 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7308 %{
7309   single_instruction;
7310   dst    : S3(write);
7311   src    : S1(read);
7312   shift  : S1(read);
7313   INS0   : ISS;
7314   NEON_FP : S3;
7315 %}
7316 
7317 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7318 %{
7319   single_instruction;
7320   dst    : S3(write);
7321   src    : S1(read);
7322   INS01  : ISS;
7323   NEON_FP : S3;
7324 %}
7325 
7326 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7327 %{
7328   single_instruction;
7329   dst    : S3(write);
7330   src    : S1(read);
7331   INS0   : ISS;
7332   NEON_FP : S3;
7333 %}
7334 
7335 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7336 %{
7337   single_instruction;
7338   dst    : S5(write);
7339   src1   : S1(read);
7340   src2   : S1(read);
7341   INS01  : ISS;
7342   NEON_FP : S5;
7343 %}
7344 
7345 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7346 %{
7347   single_instruction;
7348   dst    : S5(write);
7349   src1   : S1(read);
7350   src2   : S1(read);
7351   INS0   : ISS;
7352   NEON_FP : S5;
7353 %}
7354 
7355 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7356 %{
7357   single_instruction;
7358   dst    : S5(write);
7359   src1   : S1(read);
7360   src2   : S1(read);
7361   INS0   : ISS;
7362   NEON_FP : S5;
7363 %}
7364 
7365 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7366 %{
7367   single_instruction;
7368   dst    : S5(write);
7369   src1   : S1(read);
7370   src2   : S1(read);
7371   INS0   : ISS;
7372   NEON_FP : S5;
7373 %}
7374 
7375 pipe_class vsqrt_fp128(vecX dst, vecX src)
7376 %{
7377   single_instruction;
7378   dst    : S5(write);
7379   src    : S1(read);
7380   INS0   : ISS;
7381   NEON_FP : S5;
7382 %}
7383 
7384 pipe_class vunop_fp64(vecD dst, vecD src)
7385 %{
7386   single_instruction;
7387   dst    : S5(write);
7388   src    : S1(read);
7389   INS01  : ISS;
7390   NEON_FP : S5;
7391 %}
7392 
7393 pipe_class vunop_fp128(vecX dst, vecX src)
7394 %{
7395   single_instruction;
7396   dst    : S5(write);
7397   src    : S1(read);
7398   INS0   : ISS;
7399   NEON_FP : S5;
7400 %}
7401 
7402 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7403 %{
7404   single_instruction;
7405   dst    : S3(write);
7406   src    : S1(read);
7407   INS01  : ISS;
7408   NEON_FP : S3;
7409 %}
7410 
7411 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7412 %{
7413   single_instruction;
7414   dst    : S3(write);
7415   src    : S1(read);
7416   INS01  : ISS;
7417   NEON_FP : S3;
7418 %}
7419 
7420 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7421 %{
7422   single_instruction;
7423   dst    : S3(write);
7424   src    : S1(read);
7425   INS01  : ISS;
7426   NEON_FP : S3;
7427 %}
7428 
7429 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7430 %{
7431   single_instruction;
7432   dst    : S3(write);
7433   src    : S1(read);
7434   INS01  : ISS;
7435   NEON_FP : S3;
7436 %}
7437 
7438 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7439 %{
7440   single_instruction;
7441   dst    : S3(write);
7442   src    : S1(read);
7443   INS01  : ISS;
7444   NEON_FP : S3;
7445 %}
7446 
7447 pipe_class vmovi_reg_imm64(vecD dst)
7448 %{
7449   single_instruction;
7450   dst    : S3(write);
7451   INS01  : ISS;
7452   NEON_FP : S3;
7453 %}
7454 
7455 pipe_class vmovi_reg_imm128(vecX dst)
7456 %{
7457   single_instruction;
7458   dst    : S3(write);
7459   INS0   : ISS;
7460   NEON_FP : S3;
7461 %}
7462 
7463 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7464 %{
7465   single_instruction;
7466   dst    : S5(write);
7467   mem    : ISS(read);
7468   INS01  : ISS;
7469   NEON_FP : S3;
7470 %}
7471 
7472 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7473 %{
7474   single_instruction;
7475   dst    : S5(write);
7476   mem    : ISS(read);
7477   INS01  : ISS;
7478   NEON_FP : S3;
7479 %}
7480 
7481 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7482 %{
7483   single_instruction;
7484   mem    : ISS(read);
7485   src    : S2(read);
7486   INS01  : ISS;
7487   NEON_FP : S3;
7488 %}
7489 
7490 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7491 %{
7492   single_instruction;
7493   mem    : ISS(read);
7494   src    : S2(read);
7495   INS01  : ISS;
7496   NEON_FP : S3;
7497 %}
7498 
7499 //------- Integer ALU operations --------------------------
7500 
7501 // Integer ALU reg-reg operation
7502 // Operands needed in EX1, result generated in EX2
7503 // Eg.  ADD     x0, x1, x2
7504 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7505 %{
7506   single_instruction;
7507   dst    : EX2(write);
7508   src1   : EX1(read);
7509   src2   : EX1(read);
7510   INS01  : ISS; // Dual issue as instruction 0 or 1
7511   ALU    : EX2;
7512 %}
7513 
7514 // Integer ALU reg-reg operation with constant shift
7515 // Shifted register must be available in LATE_ISS instead of EX1
7516 // Eg.  ADD     x0, x1, x2, LSL #2
7517 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7518 %{
7519   single_instruction;
7520   dst    : EX2(write);
7521   src1   : EX1(read);
7522   src2   : ISS(read);
7523   INS01  : ISS;
7524   ALU    : EX2;
7525 %}
7526 
7527 // Integer ALU reg operation with constant shift
7528 // Eg.  LSL     x0, x1, #shift
7529 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7530 %{
7531   single_instruction;
7532   dst    : EX2(write);
7533   src1   : ISS(read);
7534   INS01  : ISS;
7535   ALU    : EX2;
7536 %}
7537 
7538 // Integer ALU reg-reg operation with variable shift
7539 // Both operands must be available in LATE_ISS instead of EX1
7540 // Result is available in EX1 instead of EX2
7541 // Eg.  LSLV    x0, x1, x2
7542 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7543 %{
7544   single_instruction;
7545   dst    : EX1(write);
7546   src1   : ISS(read);
7547   src2   : ISS(read);
7548   INS01  : ISS;
7549   ALU    : EX1;
7550 %}
7551 
7552 // Integer ALU reg-reg operation with extract
7553 // As for _vshift above, but result generated in EX2
7554 // Eg.  EXTR    x0, x1, x2, #N
7555 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7556 %{
7557   single_instruction;
7558   dst    : EX2(write);
7559   src1   : ISS(read);
7560   src2   : ISS(read);
7561   INS1   : ISS; // Can only dual issue as Instruction 1
7562   ALU    : EX1;
7563 %}
7564 
7565 // Integer ALU reg operation
7566 // Eg.  NEG     x0, x1
7567 pipe_class ialu_reg(iRegI dst, iRegI src)
7568 %{
7569   single_instruction;
7570   dst    : EX2(write);
7571   src    : EX1(read);
7572   INS01  : ISS;
7573   ALU    : EX2;
7574 %}
7575 
7576 // Integer ALU reg mmediate operation
7577 // Eg.  ADD     x0, x1, #N
7578 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7579 %{
7580   single_instruction;
7581   dst    : EX2(write);
7582   src1   : EX1(read);
7583   INS01  : ISS;
7584   ALU    : EX2;
7585 %}
7586 
7587 // Integer ALU immediate operation (no source operands)
7588 // Eg.  MOV     x0, #N
7589 pipe_class ialu_imm(iRegI dst)
7590 %{
7591   single_instruction;
7592   dst    : EX1(write);
7593   INS01  : ISS;
7594   ALU    : EX1;
7595 %}
7596 
7597 //------- Compare operation -------------------------------
7598 
7599 // Compare reg-reg
7600 // Eg.  CMP     x0, x1
7601 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7602 %{
7603   single_instruction;
7604 //  fixed_latency(16);
7605   cr     : EX2(write);
7606   op1    : EX1(read);
7607   op2    : EX1(read);
7608   INS01  : ISS;
7609   ALU    : EX2;
7610 %}
7611 
7612 // Compare reg-reg
7613 // Eg.  CMP     x0, #N
7614 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7615 %{
7616   single_instruction;
7617 //  fixed_latency(16);
7618   cr     : EX2(write);
7619   op1    : EX1(read);
7620   INS01  : ISS;
7621   ALU    : EX2;
7622 %}
7623 
7624 //------- Conditional instructions ------------------------
7625 
7626 // Conditional no operands
7627 // Eg.  CSINC   x0, zr, zr, <cond>
7628 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7629 %{
7630   single_instruction;
7631   cr     : EX1(read);
7632   dst    : EX2(write);
7633   INS01  : ISS;
7634   ALU    : EX2;
7635 %}
7636 
7637 // Conditional 2 operand
7638 // EG.  CSEL    X0, X1, X2, <cond>
7639 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7640 %{
7641   single_instruction;
7642   cr     : EX1(read);
7643   src1   : EX1(read);
7644   src2   : EX1(read);
7645   dst    : EX2(write);
7646   INS01  : ISS;
7647   ALU    : EX2;
7648 %}
7649 
7650 // Conditional 2 operand
7651 // EG.  CSEL    X0, X1, X2, <cond>
7652 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7653 %{
7654   single_instruction;
7655   cr     : EX1(read);
7656   src    : EX1(read);
7657   dst    : EX2(write);
7658   INS01  : ISS;
7659   ALU    : EX2;
7660 %}
7661 
7662 //------- Multiply pipeline operations --------------------
7663 
7664 // Multiply reg-reg
7665 // Eg.  MUL     w0, w1, w2
7666 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7667 %{
7668   single_instruction;
7669   dst    : WR(write);
7670   src1   : ISS(read);
7671   src2   : ISS(read);
7672   INS01  : ISS;
7673   MAC    : WR;
7674 %}
7675 
7676 // Multiply accumulate
7677 // Eg.  MADD    w0, w1, w2, w3
7678 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7679 %{
7680   single_instruction;
7681   dst    : WR(write);
7682   src1   : ISS(read);
7683   src2   : ISS(read);
7684   src3   : ISS(read);
7685   INS01  : ISS;
7686   MAC    : WR;
7687 %}
7688 
7689 // Eg.  MUL     w0, w1, w2
7690 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7691 %{
7692   single_instruction;
7693   fixed_latency(3); // Maximum latency for 64 bit mul
7694   dst    : WR(write);
7695   src1   : ISS(read);
7696   src2   : ISS(read);
7697   INS01  : ISS;
7698   MAC    : WR;
7699 %}
7700 
7701 // Multiply accumulate
7702 // Eg.  MADD    w0, w1, w2, w3
7703 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7704 %{
7705   single_instruction;
7706   fixed_latency(3); // Maximum latency for 64 bit mul
7707   dst    : WR(write);
7708   src1   : ISS(read);
7709   src2   : ISS(read);
7710   src3   : ISS(read);
7711   INS01  : ISS;
7712   MAC    : WR;
7713 %}
7714 
7715 //------- Divide pipeline operations --------------------
7716 
7717 // Eg.  SDIV    w0, w1, w2
7718 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7719 %{
7720   single_instruction;
7721   fixed_latency(8); // Maximum latency for 32 bit divide
7722   dst    : WR(write);
7723   src1   : ISS(read);
7724   src2   : ISS(read);
7725   INS0   : ISS; // Can only dual issue as instruction 0
7726   DIV    : WR;
7727 %}
7728 
7729 // Eg.  SDIV    x0, x1, x2
7730 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7731 %{
7732   single_instruction;
7733   fixed_latency(16); // Maximum latency for 64 bit divide
7734   dst    : WR(write);
7735   src1   : ISS(read);
7736   src2   : ISS(read);
7737   INS0   : ISS; // Can only dual issue as instruction 0
7738   DIV    : WR;
7739 %}
7740 
7741 //------- Load pipeline operations ------------------------
7742 
7743 // Load - prefetch
7744 // Eg.  PFRM    <mem>
7745 pipe_class iload_prefetch(memory mem)
7746 %{
7747   single_instruction;
7748   mem    : ISS(read);
7749   INS01  : ISS;
7750   LDST   : WR;
7751 %}
7752 
7753 // Load - reg, mem
7754 // Eg.  LDR     x0, <mem>
7755 pipe_class iload_reg_mem(iRegI dst, memory mem)
7756 %{
7757   single_instruction;
7758   dst    : WR(write);
7759   mem    : ISS(read);
7760   INS01  : ISS;
7761   LDST   : WR;
7762 %}
7763 
7764 // Load - reg, reg
7765 // Eg.  LDR     x0, [sp, x1]
7766 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7767 %{
7768   single_instruction;
7769   dst    : WR(write);
7770   src    : ISS(read);
7771   INS01  : ISS;
7772   LDST   : WR;
7773 %}
7774 
7775 //------- Store pipeline operations -----------------------
7776 
7777 // Store - zr, mem
7778 // Eg.  STR     zr, <mem>
7779 pipe_class istore_mem(memory mem)
7780 %{
7781   single_instruction;
7782   mem    : ISS(read);
7783   INS01  : ISS;
7784   LDST   : WR;
7785 %}
7786 
7787 // Store - reg, mem
7788 // Eg.  STR     x0, <mem>
7789 pipe_class istore_reg_mem(iRegI src, memory mem)
7790 %{
7791   single_instruction;
7792   mem    : ISS(read);
7793   src    : EX2(read);
7794   INS01  : ISS;
7795   LDST   : WR;
7796 %}
7797 
7798 // Store - reg, reg
7799 // Eg. STR      x0, [sp, x1]
7800 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7801 %{
7802   single_instruction;
7803   dst    : ISS(read);
7804   src    : EX2(read);
7805   INS01  : ISS;
7806   LDST   : WR;
7807 %}
7808 
7809 //------- Store pipeline operations -----------------------
7810 
7811 // Branch
7812 pipe_class pipe_branch()
7813 %{
7814   single_instruction;
7815   INS01  : ISS;
7816   BRANCH : EX1;
7817 %}
7818 
7819 // Conditional branch
7820 pipe_class pipe_branch_cond(rFlagsReg cr)
7821 %{
7822   single_instruction;
7823   cr     : EX1(read);
7824   INS01  : ISS;
7825   BRANCH : EX1;
7826 %}
7827 
7828 // Compare & Branch
7829 // EG.  CBZ/CBNZ
7830 pipe_class pipe_cmp_branch(iRegI op1)
7831 %{
7832   single_instruction;
7833   op1    : EX1(read);
7834   INS01  : ISS;
7835   BRANCH : EX1;
7836 %}
7837 
7838 //------- Synchronisation operations ----------------------
7839 
7840 // Any operation requiring serialization.
7841 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7842 pipe_class pipe_serial()
7843 %{
7844   single_instruction;
7845   force_serialization;
7846   fixed_latency(16);
7847   INS01  : ISS(2); // Cannot dual issue with any other instruction
7848   LDST   : WR;
7849 %}
7850 
7851 // Generic big/slow expanded idiom - also serialized
7852 pipe_class pipe_slow()
7853 %{
7854   instruction_count(10);
7855   multiple_bundles;
7856   force_serialization;
7857   fixed_latency(16);
7858   INS01  : ISS(2); // Cannot dual issue with any other instruction
7859   LDST   : WR;
7860 %}
7861 
7862 // Empty pipeline class
7863 pipe_class pipe_class_empty()
7864 %{
7865   single_instruction;
7866   fixed_latency(0);
7867 %}
7868 
7869 // Default pipeline class.
7870 pipe_class pipe_class_default()
7871 %{
7872   single_instruction;
7873   fixed_latency(2);
7874 %}
7875 
7876 // Pipeline class for compares.
7877 pipe_class pipe_class_compare()
7878 %{
7879   single_instruction;
7880   fixed_latency(16);
7881 %}
7882 
7883 // Pipeline class for memory operations.
7884 pipe_class pipe_class_memory()
7885 %{
7886   single_instruction;
7887   fixed_latency(16);
7888 %}
7889 
7890 // Pipeline class for call.
7891 pipe_class pipe_class_call()
7892 %{
7893   single_instruction;
7894   fixed_latency(100);
7895 %}
7896 
7897 // Define the class for the Nop node.
7898 define %{
7899    MachNop = pipe_class_empty;
7900 %}
7901 
7902 %}
7903 //----------INSTRUCTIONS-------------------------------------------------------
7904 //
7905 // match      -- States which machine-independent subtree may be replaced
7906 //               by this instruction.
7907 // ins_cost   -- The estimated cost of this instruction is used by instruction
7908 //               selection to identify a minimum cost tree of machine
7909 //               instructions that matches a tree of machine-independent
7910 //               instructions.
7911 // format     -- A string providing the disassembly for this instruction.
7912 //               The value of an instruction's operand may be inserted
7913 //               by referring to it with a '$' prefix.
7914 // opcode     -- Three instruction opcodes may be provided.  These are referred
7915 //               to within an encode class as $primary, $secondary, and $tertiary
7916 //               rrspectively.  The primary opcode is commonly used to
7917 //               indicate the type of machine instruction, while secondary
7918 //               and tertiary are often used for prefix options or addressing
7919 //               modes.
7920 // ins_encode -- A list of encode classes with parameters. The encode class
7921 //               name must have been defined in an 'enc_class' specification
7922 //               in the encode section of the architecture description.
7923 
7924 // ============================================================================
7925 // Memory (Load/Store) Instructions
7926 
7927 // Load Instructions
7928 
7929 // Load Byte (8 bit signed)
7930 instruct loadB(iRegINoSp dst, memory mem)
7931 %{
7932   match(Set dst (LoadB mem));
7933   predicate(!needs_acquiring_load(n));
7934 
7935   ins_cost(4 * INSN_COST);
7936   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7937 
7938   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7939 
7940   ins_pipe(iload_reg_mem);
7941 %}
7942 
7943 // Load Byte (8 bit signed) into long
7944 instruct loadB2L(iRegLNoSp dst, memory mem)
7945 %{
7946   match(Set dst (ConvI2L (LoadB mem)));
7947   predicate(!needs_acquiring_load(n->in(1)));
7948 
7949   ins_cost(4 * INSN_COST);
7950   format %{ "ldrsb  $dst, $mem\t# byte" %}
7951 
7952   ins_encode(aarch64_enc_ldrsb(dst, mem));
7953 
7954   ins_pipe(iload_reg_mem);
7955 %}
7956 
7957 // Load Byte (8 bit unsigned)
7958 instruct loadUB(iRegINoSp dst, memory mem)
7959 %{
7960   match(Set dst (LoadUB mem));
7961   predicate(!needs_acquiring_load(n));
7962 
7963   ins_cost(4 * INSN_COST);
7964   format %{ "ldrbw  $dst, $mem\t# byte" %}
7965 
7966   ins_encode(aarch64_enc_ldrb(dst, mem));
7967 
7968   ins_pipe(iload_reg_mem);
7969 %}
7970 
7971 // Load Byte (8 bit unsigned) into long
7972 instruct loadUB2L(iRegLNoSp dst, memory mem)
7973 %{
7974   match(Set dst (ConvI2L (LoadUB mem)));
7975   predicate(!needs_acquiring_load(n->in(1)));
7976 
7977   ins_cost(4 * INSN_COST);
7978   format %{ "ldrb  $dst, $mem\t# byte" %}
7979 
7980   ins_encode(aarch64_enc_ldrb(dst, mem));
7981 
7982   ins_pipe(iload_reg_mem);
7983 %}
7984 
7985 // Load Short (16 bit signed)
7986 instruct loadS(iRegINoSp dst, memory mem)
7987 %{
7988   match(Set dst (LoadS mem));
7989   predicate(!needs_acquiring_load(n));
7990 
7991   ins_cost(4 * INSN_COST);
7992   format %{ "ldrshw  $dst, $mem\t# short" %}
7993 
7994   ins_encode(aarch64_enc_ldrshw(dst, mem));
7995 
7996   ins_pipe(iload_reg_mem);
7997 %}
7998 
7999 // Load Short (16 bit signed) into long
8000 instruct loadS2L(iRegLNoSp dst, memory mem)
8001 %{
8002   match(Set dst (ConvI2L (LoadS mem)));
8003   predicate(!needs_acquiring_load(n->in(1)));
8004 
8005   ins_cost(4 * INSN_COST);
8006   format %{ "ldrsh  $dst, $mem\t# short" %}
8007 
8008   ins_encode(aarch64_enc_ldrsh(dst, mem));
8009 
8010   ins_pipe(iload_reg_mem);
8011 %}
8012 
8013 // Load Char (16 bit unsigned)
8014 instruct loadUS(iRegINoSp dst, memory mem)
8015 %{
8016   match(Set dst (LoadUS mem));
8017   predicate(!needs_acquiring_load(n));
8018 
8019   ins_cost(4 * INSN_COST);
8020   format %{ "ldrh  $dst, $mem\t# short" %}
8021 
8022   ins_encode(aarch64_enc_ldrh(dst, mem));
8023 
8024   ins_pipe(iload_reg_mem);
8025 %}
8026 
8027 // Load Short/Char (16 bit unsigned) into long
8028 instruct loadUS2L(iRegLNoSp dst, memory mem)
8029 %{
8030   match(Set dst (ConvI2L (LoadUS mem)));
8031   predicate(!needs_acquiring_load(n->in(1)));
8032 
8033   ins_cost(4 * INSN_COST);
8034   format %{ "ldrh  $dst, $mem\t# short" %}
8035 
8036   ins_encode(aarch64_enc_ldrh(dst, mem));
8037 
8038   ins_pipe(iload_reg_mem);
8039 %}
8040 
8041 // Load Integer (32 bit signed)
8042 instruct loadI(iRegINoSp dst, memory mem)
8043 %{
8044   match(Set dst (LoadI mem));
8045   predicate(!needs_acquiring_load(n));
8046 
8047   ins_cost(4 * INSN_COST);
8048   format %{ "ldrw  $dst, $mem\t# int" %}
8049 
8050   ins_encode(aarch64_enc_ldrw(dst, mem));
8051 
8052   ins_pipe(iload_reg_mem);
8053 %}
8054 
8055 // Load Integer (32 bit signed) into long
8056 instruct loadI2L(iRegLNoSp dst, memory mem)
8057 %{
8058   match(Set dst (ConvI2L (LoadI mem)));
8059   predicate(!needs_acquiring_load(n->in(1)));
8060 
8061   ins_cost(4 * INSN_COST);
8062   format %{ "ldrsw  $dst, $mem\t# int" %}
8063 
8064   ins_encode(aarch64_enc_ldrsw(dst, mem));
8065 
8066   ins_pipe(iload_reg_mem);
8067 %}
8068 
8069 // Load Integer (32 bit unsigned) into long
8070 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8071 %{
8072   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8073   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8074 
8075   ins_cost(4 * INSN_COST);
8076   format %{ "ldrw  $dst, $mem\t# int" %}
8077 
8078   ins_encode(aarch64_enc_ldrw(dst, mem));
8079 
8080   ins_pipe(iload_reg_mem);
8081 %}
8082 
8083 // Load Long (64 bit signed)
8084 instruct loadL(iRegLNoSp dst, memory mem)
8085 %{
8086   match(Set dst (LoadL mem));
8087   predicate(!needs_acquiring_load(n));
8088 
8089   ins_cost(4 * INSN_COST);
8090   format %{ "ldr  $dst, $mem\t# int" %}
8091 
8092   ins_encode(aarch64_enc_ldr(dst, mem));
8093 
8094   ins_pipe(iload_reg_mem);
8095 %}
8096 
8097 // Load Range
8098 instruct loadRange(iRegINoSp dst, memory mem)
8099 %{
8100   match(Set dst (LoadRange mem));
8101 
8102   ins_cost(4 * INSN_COST);
8103   format %{ "ldrw  $dst, $mem\t# range" %}
8104 
8105   ins_encode(aarch64_enc_ldrw(dst, mem));
8106 
8107   ins_pipe(iload_reg_mem);
8108 %}
8109 
8110 // Load Pointer
8111 instruct loadP(iRegPNoSp dst, memory mem)
8112 %{
8113   match(Set dst (LoadP mem));
8114   predicate(!needs_acquiring_load(n));
8115 
8116   ins_cost(4 * INSN_COST);
8117   format %{ "ldr  $dst, $mem\t# ptr" %}
8118 
8119   ins_encode(aarch64_enc_ldr(dst, mem));
8120 
8121   ins_pipe(iload_reg_mem);
8122 %}
8123 
8124 // Load Compressed Pointer
8125 instruct loadN(iRegNNoSp dst, memory mem)
8126 %{
8127   match(Set dst (LoadN mem));
8128   predicate(!needs_acquiring_load(n));
8129 
8130   ins_cost(4 * INSN_COST);
8131   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8132 
8133   ins_encode(aarch64_enc_ldrw(dst, mem));
8134 
8135   ins_pipe(iload_reg_mem);
8136 %}
8137 
8138 // Load Klass Pointer
8139 instruct loadKlass(iRegPNoSp dst, memory mem)
8140 %{
8141   match(Set dst (LoadKlass mem));
8142   predicate(!needs_acquiring_load(n));
8143 
8144   ins_cost(4 * INSN_COST);
8145   format %{ "ldr  $dst, $mem\t# class" %}
8146 
8147   ins_encode(aarch64_enc_ldr(dst, mem));
8148 
8149   ins_pipe(iload_reg_mem);
8150 %}
8151 
8152 // Load Narrow Klass Pointer
8153 instruct loadNKlass(iRegNNoSp dst, memory mem)
8154 %{
8155   match(Set dst (LoadNKlass mem));
8156   predicate(!needs_acquiring_load(n));
8157 
8158   ins_cost(4 * INSN_COST);
8159   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8160 
8161   ins_encode(aarch64_enc_ldrw(dst, mem));
8162 
8163   ins_pipe(iload_reg_mem);
8164 %}
8165 
8166 // Load Float
8167 instruct loadF(vRegF dst, memory mem)
8168 %{
8169   match(Set dst (LoadF mem));
8170   predicate(!needs_acquiring_load(n));
8171 
8172   ins_cost(4 * INSN_COST);
8173   format %{ "ldrs  $dst, $mem\t# float" %}
8174 
8175   ins_encode( aarch64_enc_ldrs(dst, mem) );
8176 
8177   ins_pipe(pipe_class_memory);
8178 %}
8179 
8180 // Load Double
8181 instruct loadD(vRegD dst, memory mem)
8182 %{
8183   match(Set dst (LoadD mem));
8184   predicate(!needs_acquiring_load(n));
8185 
8186   ins_cost(4 * INSN_COST);
8187   format %{ "ldrd  $dst, $mem\t# double" %}
8188 
8189   ins_encode( aarch64_enc_ldrd(dst, mem) );
8190 
8191   ins_pipe(pipe_class_memory);
8192 %}
8193 
8194 
8195 // Load Int Constant
8196 instruct loadConI(iRegINoSp dst, immI src)
8197 %{
8198   match(Set dst src);
8199 
8200   ins_cost(INSN_COST);
8201   format %{ "mov $dst, $src\t# int" %}
8202 
8203   ins_encode( aarch64_enc_movw_imm(dst, src) );
8204 
8205   ins_pipe(ialu_imm);
8206 %}
8207 
8208 // Load Long Constant
8209 instruct loadConL(iRegLNoSp dst, immL src)
8210 %{
8211   match(Set dst src);
8212 
8213   ins_cost(INSN_COST);
8214   format %{ "mov $dst, $src\t# long" %}
8215 
8216   ins_encode( aarch64_enc_mov_imm(dst, src) );
8217 
8218   ins_pipe(ialu_imm);
8219 %}
8220 
8221 // Load Pointer Constant
8222 
8223 instruct loadConP(iRegPNoSp dst, immP con)
8224 %{
8225   match(Set dst con);
8226 
8227   ins_cost(INSN_COST * 4);
8228   format %{
8229     "mov  $dst, $con\t# ptr\n\t"
8230   %}
8231 
8232   ins_encode(aarch64_enc_mov_p(dst, con));
8233 
8234   ins_pipe(ialu_imm);
8235 %}
8236 
8237 // Load Null Pointer Constant
8238 
8239 instruct loadConP0(iRegPNoSp dst, immP0 con)
8240 %{
8241   match(Set dst con);
8242 
8243   ins_cost(INSN_COST);
8244   format %{ "mov  $dst, $con\t# NULL ptr" %}
8245 
8246   ins_encode(aarch64_enc_mov_p0(dst, con));
8247 
8248   ins_pipe(ialu_imm);
8249 %}
8250 
8251 // Load Pointer Constant One
8252 
8253 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8254 %{
8255   match(Set dst con);
8256 
8257   ins_cost(INSN_COST);
8258   format %{ "mov  $dst, $con\t# NULL ptr" %}
8259 
8260   ins_encode(aarch64_enc_mov_p1(dst, con));
8261 
8262   ins_pipe(ialu_imm);
8263 %}
8264 
8265 // Load Poll Page Constant
8266 
8267 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8268 %{
8269   match(Set dst con);
8270 
8271   ins_cost(INSN_COST);
8272   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8273 
8274   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8275 
8276   ins_pipe(ialu_imm);
8277 %}
8278 
8279 // Load Byte Map Base Constant
8280 
8281 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8282 %{
8283   match(Set dst con);
8284 
8285   ins_cost(INSN_COST);
8286   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8287 
8288   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8289 
8290   ins_pipe(ialu_imm);
8291 %}
8292 
8293 // Load Narrow Pointer Constant
8294 
8295 instruct loadConN(iRegNNoSp dst, immN con)
8296 %{
8297   match(Set dst con);
8298 
8299   ins_cost(INSN_COST * 4);
8300   format %{ "mov  $dst, $con\t# compressed ptr" %}
8301 
8302   ins_encode(aarch64_enc_mov_n(dst, con));
8303 
8304   ins_pipe(ialu_imm);
8305 %}
8306 
8307 // Load Narrow Null Pointer Constant
8308 
8309 instruct loadConN0(iRegNNoSp dst, immN0 con)
8310 %{
8311   match(Set dst con);
8312 
8313   ins_cost(INSN_COST);
8314   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8315 
8316   ins_encode(aarch64_enc_mov_n0(dst, con));
8317 
8318   ins_pipe(ialu_imm);
8319 %}
8320 
8321 // Load Narrow Klass Constant
8322 
8323 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8324 %{
8325   match(Set dst con);
8326 
8327   ins_cost(INSN_COST);
8328   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8329 
8330   ins_encode(aarch64_enc_mov_nk(dst, con));
8331 
8332   ins_pipe(ialu_imm);
8333 %}
8334 
8335 // Load Packed Float Constant
8336 
8337 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8338   match(Set dst con);
8339   ins_cost(INSN_COST * 4);
8340   format %{ "fmovs  $dst, $con"%}
8341   ins_encode %{
8342     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8343   %}
8344 
8345   ins_pipe(fp_imm_s);
8346 %}
8347 
8348 // Load Float Constant
8349 
8350 instruct loadConF(vRegF dst, immF con) %{
8351   match(Set dst con);
8352 
8353   ins_cost(INSN_COST * 4);
8354 
8355   format %{
8356     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8357   %}
8358 
8359   ins_encode %{
8360     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8361   %}
8362 
8363   ins_pipe(fp_load_constant_s);
8364 %}
8365 
8366 // Load Packed Double Constant
8367 
8368 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8369   match(Set dst con);
8370   ins_cost(INSN_COST);
8371   format %{ "fmovd  $dst, $con"%}
8372   ins_encode %{
8373     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8374   %}
8375 
8376   ins_pipe(fp_imm_d);
8377 %}
8378 
8379 // Load Double Constant
8380 
8381 instruct loadConD(vRegD dst, immD con) %{
8382   match(Set dst con);
8383 
8384   ins_cost(INSN_COST * 5);
8385   format %{
8386     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8387   %}
8388 
8389   ins_encode %{
8390     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8391   %}
8392 
8393   ins_pipe(fp_load_constant_d);
8394 %}
8395 
8396 // Store Instructions
8397 
8398 // Store CMS card-mark Immediate
8399 instruct storeimmCM0(immI0 zero, memory mem)
8400 %{
8401   match(Set mem (StoreCM mem zero));
8402   predicate(unnecessary_storestore(n));
8403 
8404   ins_cost(INSN_COST);
8405   format %{ "storestore (elided)\n\t"
8406             "strb zr, $mem\t# byte" %}
8407 
8408   ins_encode(aarch64_enc_strb0(mem));
8409 
8410   ins_pipe(istore_mem);
8411 %}
8412 
8413 // Store CMS card-mark Immediate with intervening StoreStore
8414 // needed when using CMS with no conditional card marking
8415 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8416 %{
8417   match(Set mem (StoreCM mem zero));
8418 
8419   ins_cost(INSN_COST * 2);
8420   format %{ "storestore\n\t"
8421             "dmb ishst"
8422             "\n\tstrb zr, $mem\t# byte" %}
8423 
8424   ins_encode(aarch64_enc_strb0_ordered(mem));
8425 
8426   ins_pipe(istore_mem);
8427 %}
8428 
8429 // Store Byte
8430 instruct storeB(iRegIorL2I src, memory mem)
8431 %{
8432   match(Set mem (StoreB mem src));
8433   predicate(!needs_releasing_store(n));
8434 
8435   ins_cost(INSN_COST);
8436   format %{ "strb  $src, $mem\t# byte" %}
8437 
8438   ins_encode(aarch64_enc_strb(src, mem));
8439 
8440   ins_pipe(istore_reg_mem);
8441 %}
8442 
8443 
8444 instruct storeimmB0(immI0 zero, memory mem)
8445 %{
8446   match(Set mem (StoreB mem zero));
8447   predicate(!needs_releasing_store(n));
8448 
8449   ins_cost(INSN_COST);
8450   format %{ "strb rscractch2, $mem\t# byte" %}
8451 
8452   ins_encode(aarch64_enc_strb0(mem));
8453 
8454   ins_pipe(istore_mem);
8455 %}
8456 
8457 // Store Char/Short
8458 instruct storeC(iRegIorL2I src, memory mem)
8459 %{
8460   match(Set mem (StoreC mem src));
8461   predicate(!needs_releasing_store(n));
8462 
8463   ins_cost(INSN_COST);
8464   format %{ "strh  $src, $mem\t# short" %}
8465 
8466   ins_encode(aarch64_enc_strh(src, mem));
8467 
8468   ins_pipe(istore_reg_mem);
8469 %}
8470 
8471 instruct storeimmC0(immI0 zero, memory mem)
8472 %{
8473   match(Set mem (StoreC mem zero));
8474   predicate(!needs_releasing_store(n));
8475 
8476   ins_cost(INSN_COST);
8477   format %{ "strh  zr, $mem\t# short" %}
8478 
8479   ins_encode(aarch64_enc_strh0(mem));
8480 
8481   ins_pipe(istore_mem);
8482 %}
8483 
8484 // Store Integer
8485 
8486 instruct storeI(iRegIorL2I src, memory mem)
8487 %{
8488   match(Set mem(StoreI mem src));
8489   predicate(!needs_releasing_store(n));
8490 
8491   ins_cost(INSN_COST);
8492   format %{ "strw  $src, $mem\t# int" %}
8493 
8494   ins_encode(aarch64_enc_strw(src, mem));
8495 
8496   ins_pipe(istore_reg_mem);
8497 %}
8498 
8499 instruct storeimmI0(immI0 zero, memory mem)
8500 %{
8501   match(Set mem(StoreI mem zero));
8502   predicate(!needs_releasing_store(n));
8503 
8504   ins_cost(INSN_COST);
8505   format %{ "strw  zr, $mem\t# int" %}
8506 
8507   ins_encode(aarch64_enc_strw0(mem));
8508 
8509   ins_pipe(istore_mem);
8510 %}
8511 
8512 // Store Long (64 bit signed)
8513 instruct storeL(iRegL src, memory mem)
8514 %{
8515   match(Set mem (StoreL mem src));
8516   predicate(!needs_releasing_store(n));
8517 
8518   ins_cost(INSN_COST);
8519   format %{ "str  $src, $mem\t# int" %}
8520 
8521   ins_encode(aarch64_enc_str(src, mem));
8522 
8523   ins_pipe(istore_reg_mem);
8524 %}
8525 
8526 // Store Long (64 bit signed)
8527 instruct storeimmL0(immL0 zero, memory mem)
8528 %{
8529   match(Set mem (StoreL mem zero));
8530   predicate(!needs_releasing_store(n));
8531 
8532   ins_cost(INSN_COST);
8533   format %{ "str  zr, $mem\t# int" %}
8534 
8535   ins_encode(aarch64_enc_str0(mem));
8536 
8537   ins_pipe(istore_mem);
8538 %}
8539 
8540 // Store Pointer
8541 instruct storeP(iRegP src, memory mem)
8542 %{
8543   match(Set mem (StoreP mem src));
8544   predicate(!needs_releasing_store(n));
8545 
8546   ins_cost(INSN_COST);
8547   format %{ "str  $src, $mem\t# ptr" %}
8548 
8549   ins_encode(aarch64_enc_str(src, mem));
8550 
8551   ins_pipe(istore_reg_mem);
8552 %}
8553 
8554 // Store Pointer
8555 instruct storeimmP0(immP0 zero, memory mem)
8556 %{
8557   match(Set mem (StoreP mem zero));
8558   predicate(!needs_releasing_store(n));
8559 
8560   ins_cost(INSN_COST);
8561   format %{ "str zr, $mem\t# ptr" %}
8562 
8563   ins_encode(aarch64_enc_str0(mem));
8564 
8565   ins_pipe(istore_mem);
8566 %}
8567 
8568 // Store Compressed Pointer
8569 instruct storeN(iRegN src, memory mem)
8570 %{
8571   match(Set mem (StoreN mem src));
8572   predicate(!needs_releasing_store(n));
8573 
8574   ins_cost(INSN_COST);
8575   format %{ "strw  $src, $mem\t# compressed ptr" %}
8576 
8577   ins_encode(aarch64_enc_strw(src, mem));
8578 
8579   ins_pipe(istore_reg_mem);
8580 %}
8581 
8582 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8583 %{
8584   match(Set mem (StoreN mem zero));
8585   predicate(Universe::narrow_oop_base() == NULL &&
8586             Universe::narrow_klass_base() == NULL &&
8587             (!needs_releasing_store(n)));
8588 
8589   ins_cost(INSN_COST);
8590   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8591 
8592   ins_encode(aarch64_enc_strw(heapbase, mem));
8593 
8594   ins_pipe(istore_reg_mem);
8595 %}
8596 
8597 // Store Float
8598 instruct storeF(vRegF src, memory mem)
8599 %{
8600   match(Set mem (StoreF mem src));
8601   predicate(!needs_releasing_store(n));
8602 
8603   ins_cost(INSN_COST);
8604   format %{ "strs  $src, $mem\t# float" %}
8605 
8606   ins_encode( aarch64_enc_strs(src, mem) );
8607 
8608   ins_pipe(pipe_class_memory);
8609 %}
8610 
8611 // TODO
8612 // implement storeImmF0 and storeFImmPacked
8613 
8614 // Store Double
8615 instruct storeD(vRegD src, memory mem)
8616 %{
8617   match(Set mem (StoreD mem src));
8618   predicate(!needs_releasing_store(n));
8619 
8620   ins_cost(INSN_COST);
8621   format %{ "strd  $src, $mem\t# double" %}
8622 
8623   ins_encode( aarch64_enc_strd(src, mem) );
8624 
8625   ins_pipe(pipe_class_memory);
8626 %}
8627 
8628 // Store Compressed Klass Pointer
8629 instruct storeNKlass(iRegN src, memory mem)
8630 %{
8631   predicate(!needs_releasing_store(n));
8632   match(Set mem (StoreNKlass mem src));
8633 
8634   ins_cost(INSN_COST);
8635   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8636 
8637   ins_encode(aarch64_enc_strw(src, mem));
8638 
8639   ins_pipe(istore_reg_mem);
8640 %}
8641 
8642 // TODO
8643 // implement storeImmD0 and storeDImmPacked
8644 
8645 // prefetch instructions
8646 // Must be safe to execute with invalid address (cannot fault).
8647 
8648 instruct prefetchalloc( memory mem ) %{
8649   match(PrefetchAllocation mem);
8650 
8651   ins_cost(INSN_COST);
8652   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8653 
8654   ins_encode( aarch64_enc_prefetchw(mem) );
8655 
8656   ins_pipe(iload_prefetch);
8657 %}
8658 
8659 //  ---------------- volatile loads and stores ----------------
8660 
8661 // Load Byte (8 bit signed)
8662 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8663 %{
8664   match(Set dst (LoadB mem));
8665 
8666   ins_cost(VOLATILE_REF_COST);
8667   format %{ "ldarsb  $dst, $mem\t# byte" %}
8668 
8669   ins_encode(aarch64_enc_ldarsb(dst, mem));
8670 
8671   ins_pipe(pipe_serial);
8672 %}
8673 
8674 // Load Byte (8 bit signed) into long
8675 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8676 %{
8677   match(Set dst (ConvI2L (LoadB mem)));
8678 
8679   ins_cost(VOLATILE_REF_COST);
8680   format %{ "ldarsb  $dst, $mem\t# byte" %}
8681 
8682   ins_encode(aarch64_enc_ldarsb(dst, mem));
8683 
8684   ins_pipe(pipe_serial);
8685 %}
8686 
8687 // Load Byte (8 bit unsigned)
8688 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8689 %{
8690   match(Set dst (LoadUB mem));
8691 
8692   ins_cost(VOLATILE_REF_COST);
8693   format %{ "ldarb  $dst, $mem\t# byte" %}
8694 
8695   ins_encode(aarch64_enc_ldarb(dst, mem));
8696 
8697   ins_pipe(pipe_serial);
8698 %}
8699 
8700 // Load Byte (8 bit unsigned) into long
8701 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8702 %{
8703   match(Set dst (ConvI2L (LoadUB mem)));
8704 
8705   ins_cost(VOLATILE_REF_COST);
8706   format %{ "ldarb  $dst, $mem\t# byte" %}
8707 
8708   ins_encode(aarch64_enc_ldarb(dst, mem));
8709 
8710   ins_pipe(pipe_serial);
8711 %}
8712 
8713 // Load Short (16 bit signed)
8714 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8715 %{
8716   match(Set dst (LoadS mem));
8717 
8718   ins_cost(VOLATILE_REF_COST);
8719   format %{ "ldarshw  $dst, $mem\t# short" %}
8720 
8721   ins_encode(aarch64_enc_ldarshw(dst, mem));
8722 
8723   ins_pipe(pipe_serial);
8724 %}
8725 
8726 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8727 %{
8728   match(Set dst (LoadUS mem));
8729 
8730   ins_cost(VOLATILE_REF_COST);
8731   format %{ "ldarhw  $dst, $mem\t# short" %}
8732 
8733   ins_encode(aarch64_enc_ldarhw(dst, mem));
8734 
8735   ins_pipe(pipe_serial);
8736 %}
8737 
8738 // Load Short/Char (16 bit unsigned) into long
8739 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8740 %{
8741   match(Set dst (ConvI2L (LoadUS mem)));
8742 
8743   ins_cost(VOLATILE_REF_COST);
8744   format %{ "ldarh  $dst, $mem\t# short" %}
8745 
8746   ins_encode(aarch64_enc_ldarh(dst, mem));
8747 
8748   ins_pipe(pipe_serial);
8749 %}
8750 
8751 // Load Short/Char (16 bit signed) into long
8752 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8753 %{
8754   match(Set dst (ConvI2L (LoadS mem)));
8755 
8756   ins_cost(VOLATILE_REF_COST);
8757   format %{ "ldarh  $dst, $mem\t# short" %}
8758 
8759   ins_encode(aarch64_enc_ldarsh(dst, mem));
8760 
8761   ins_pipe(pipe_serial);
8762 %}
8763 
8764 // Load Integer (32 bit signed)
8765 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8766 %{
8767   match(Set dst (LoadI mem));
8768 
8769   ins_cost(VOLATILE_REF_COST);
8770   format %{ "ldarw  $dst, $mem\t# int" %}
8771 
8772   ins_encode(aarch64_enc_ldarw(dst, mem));
8773 
8774   ins_pipe(pipe_serial);
8775 %}
8776 
8777 // Load Integer (32 bit unsigned) into long
8778 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8779 %{
8780   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8781 
8782   ins_cost(VOLATILE_REF_COST);
8783   format %{ "ldarw  $dst, $mem\t# int" %}
8784 
8785   ins_encode(aarch64_enc_ldarw(dst, mem));
8786 
8787   ins_pipe(pipe_serial);
8788 %}
8789 
8790 // Load Long (64 bit signed)
8791 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8792 %{
8793   match(Set dst (LoadL mem));
8794 
8795   ins_cost(VOLATILE_REF_COST);
8796   format %{ "ldar  $dst, $mem\t# int" %}
8797 
8798   ins_encode(aarch64_enc_ldar(dst, mem));
8799 
8800   ins_pipe(pipe_serial);
8801 %}
8802 
8803 // Load Pointer
8804 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8805 %{
8806   match(Set dst (LoadP mem));
8807 
8808   ins_cost(VOLATILE_REF_COST);
8809   format %{ "ldar  $dst, $mem\t# ptr" %}
8810 
8811   ins_encode(aarch64_enc_ldar(dst, mem));
8812 
8813   ins_pipe(pipe_serial);
8814 %}
8815 
8816 // Load Compressed Pointer
8817 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8818 %{
8819   match(Set dst (LoadN mem));
8820 
8821   ins_cost(VOLATILE_REF_COST);
8822   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8823 
8824   ins_encode(aarch64_enc_ldarw(dst, mem));
8825 
8826   ins_pipe(pipe_serial);
8827 %}
8828 
8829 // Load Float
8830 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8831 %{
8832   match(Set dst (LoadF mem));
8833 
8834   ins_cost(VOLATILE_REF_COST);
8835   format %{ "ldars  $dst, $mem\t# float" %}
8836 
8837   ins_encode( aarch64_enc_fldars(dst, mem) );
8838 
8839   ins_pipe(pipe_serial);
8840 %}
8841 
8842 // Load Double
8843 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8844 %{
8845   match(Set dst (LoadD mem));
8846 
8847   ins_cost(VOLATILE_REF_COST);
8848   format %{ "ldard  $dst, $mem\t# double" %}
8849 
8850   ins_encode( aarch64_enc_fldard(dst, mem) );
8851 
8852   ins_pipe(pipe_serial);
8853 %}
8854 
8855 // Store Byte
8856 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8857 %{
8858   match(Set mem (StoreB mem src));
8859 
8860   ins_cost(VOLATILE_REF_COST);
8861   format %{ "stlrb  $src, $mem\t# byte" %}
8862 
8863   ins_encode(aarch64_enc_stlrb(src, mem));
8864 
8865   ins_pipe(pipe_class_memory);
8866 %}
8867 
8868 // Store Char/Short
8869 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8870 %{
8871   match(Set mem (StoreC mem src));
8872 
8873   ins_cost(VOLATILE_REF_COST);
8874   format %{ "stlrh  $src, $mem\t# short" %}
8875 
8876   ins_encode(aarch64_enc_stlrh(src, mem));
8877 
8878   ins_pipe(pipe_class_memory);
8879 %}
8880 
8881 // Store Integer
8882 
8883 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8884 %{
8885   match(Set mem(StoreI mem src));
8886 
8887   ins_cost(VOLATILE_REF_COST);
8888   format %{ "stlrw  $src, $mem\t# int" %}
8889 
8890   ins_encode(aarch64_enc_stlrw(src, mem));
8891 
8892   ins_pipe(pipe_class_memory);
8893 %}
8894 
8895 // Store Long (64 bit signed)
8896 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8897 %{
8898   match(Set mem (StoreL mem src));
8899 
8900   ins_cost(VOLATILE_REF_COST);
8901   format %{ "stlr  $src, $mem\t# int" %}
8902 
8903   ins_encode(aarch64_enc_stlr(src, mem));
8904 
8905   ins_pipe(pipe_class_memory);
8906 %}
8907 
8908 // Store Pointer
8909 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8910 %{
8911   match(Set mem (StoreP mem src));
8912 
8913   ins_cost(VOLATILE_REF_COST);
8914   format %{ "stlr  $src, $mem\t# ptr" %}
8915 
8916   ins_encode(aarch64_enc_stlr(src, mem));
8917 
8918   ins_pipe(pipe_class_memory);
8919 %}
8920 
8921 // Store Compressed Pointer
8922 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8923 %{
8924   match(Set mem (StoreN mem src));
8925 
8926   ins_cost(VOLATILE_REF_COST);
8927   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8928 
8929   ins_encode(aarch64_enc_stlrw(src, mem));
8930 
8931   ins_pipe(pipe_class_memory);
8932 %}
8933 
8934 // Store Float
8935 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8936 %{
8937   match(Set mem (StoreF mem src));
8938 
8939   ins_cost(VOLATILE_REF_COST);
8940   format %{ "stlrs  $src, $mem\t# float" %}
8941 
8942   ins_encode( aarch64_enc_fstlrs(src, mem) );
8943 
8944   ins_pipe(pipe_class_memory);
8945 %}
8946 
8947 // TODO
8948 // implement storeImmF0 and storeFImmPacked
8949 
8950 // Store Double
8951 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8952 %{
8953   match(Set mem (StoreD mem src));
8954 
8955   ins_cost(VOLATILE_REF_COST);
8956   format %{ "stlrd  $src, $mem\t# double" %}
8957 
8958   ins_encode( aarch64_enc_fstlrd(src, mem) );
8959 
8960   ins_pipe(pipe_class_memory);
8961 %}
8962 
8963 //  ---------------- end of volatile loads and stores ----------------
8964 
8965 instruct cacheWB(indirect addr)
8966 %{
8967   match(CacheWB addr);
8968 
8969   ins_cost(100);
8970   format %{"cache wb $addr" %}
8971   ins_encode %{
8972     assert($addr->index_position() < 0, "should be");
8973     assert($addr$$disp == 0, "should be");
8974     __ cache_wb(Address($addr$$base$$Register, 0));
8975   %}
8976   ins_pipe(pipe_slow); // XXX
8977 %}
8978 
8979 instruct cacheWBPreSync()
8980 %{
8981   match(CacheWBPreSync);
8982 
8983   ins_cost(100);
8984   format %{"cache wb presync" %}
8985   ins_encode %{
8986     __ cache_wbsync();
8987   %}
8988   ins_pipe(pipe_slow); // XXX
8989 %}
8990 
8991 instruct cacheWBPostSync()
8992 %{
8993   match(CacheWBPostSync);
8994 
8995   ins_cost(100);
8996   format %{"cache wb postsync" %}
8997   ins_encode %{
8998     __ cache_wbsync();
8999   %}
9000   ins_pipe(pipe_slow); // XXX
9001 %}
9002 
9003 // ============================================================================
9004 // BSWAP Instructions
9005 
9006 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
9007   match(Set dst (ReverseBytesI src));
9008 
9009   ins_cost(INSN_COST);
9010   format %{ "revw  $dst, $src" %}
9011 
9012   ins_encode %{
9013     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
9014   %}
9015 
9016   ins_pipe(ialu_reg);
9017 %}
9018 
9019 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
9020   match(Set dst (ReverseBytesL src));
9021 
9022   ins_cost(INSN_COST);
9023   format %{ "rev  $dst, $src" %}
9024 
9025   ins_encode %{
9026     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
9027   %}
9028 
9029   ins_pipe(ialu_reg);
9030 %}
9031 
9032 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
9033   match(Set dst (ReverseBytesUS src));
9034 
9035   ins_cost(INSN_COST);
9036   format %{ "rev16w  $dst, $src" %}
9037 
9038   ins_encode %{
9039     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9040   %}
9041 
9042   ins_pipe(ialu_reg);
9043 %}
9044 
9045 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
9046   match(Set dst (ReverseBytesS src));
9047 
9048   ins_cost(INSN_COST);
9049   format %{ "rev16w  $dst, $src\n\t"
9050             "sbfmw $dst, $dst, #0, #15" %}
9051 
9052   ins_encode %{
9053     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9054     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
9055   %}
9056 
9057   ins_pipe(ialu_reg);
9058 %}
9059 
9060 // ============================================================================
9061 // Zero Count Instructions
9062 
9063 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9064   match(Set dst (CountLeadingZerosI src));
9065 
9066   ins_cost(INSN_COST);
9067   format %{ "clzw  $dst, $src" %}
9068   ins_encode %{
9069     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9070   %}
9071 
9072   ins_pipe(ialu_reg);
9073 %}
9074 
9075 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9076   match(Set dst (CountLeadingZerosL src));
9077 
9078   ins_cost(INSN_COST);
9079   format %{ "clz   $dst, $src" %}
9080   ins_encode %{
9081     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9082   %}
9083 
9084   ins_pipe(ialu_reg);
9085 %}
9086 
9087 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9088   match(Set dst (CountTrailingZerosI src));
9089 
9090   ins_cost(INSN_COST * 2);
9091   format %{ "rbitw  $dst, $src\n\t"
9092             "clzw   $dst, $dst" %}
9093   ins_encode %{
9094     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9095     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9096   %}
9097 
9098   ins_pipe(ialu_reg);
9099 %}
9100 
9101 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9102   match(Set dst (CountTrailingZerosL src));
9103 
9104   ins_cost(INSN_COST * 2);
9105   format %{ "rbit   $dst, $src\n\t"
9106             "clz    $dst, $dst" %}
9107   ins_encode %{
9108     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9109     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9110   %}
9111 
9112   ins_pipe(ialu_reg);
9113 %}
9114 
9115 //---------- Population Count Instructions -------------------------------------
9116 //
9117 
9118 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9119   predicate(UsePopCountInstruction);
9120   match(Set dst (PopCountI src));
9121   effect(TEMP tmp);
9122   ins_cost(INSN_COST * 13);
9123 
9124   format %{ "movw   $src, $src\n\t"
9125             "mov    $tmp, $src\t# vector (1D)\n\t"
9126             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9127             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9128             "mov    $dst, $tmp\t# vector (1D)" %}
9129   ins_encode %{
9130     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9131     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9132     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9133     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9134     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9135   %}
9136 
9137   ins_pipe(pipe_class_default);
9138 %}
9139 
9140 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9141   predicate(UsePopCountInstruction);
9142   match(Set dst (PopCountI (LoadI mem)));
9143   effect(TEMP tmp);
9144   ins_cost(INSN_COST * 13);
9145 
9146   format %{ "ldrs   $tmp, $mem\n\t"
9147             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9148             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9149             "mov    $dst, $tmp\t# vector (1D)" %}
9150   ins_encode %{
9151     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9152     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9153                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9154     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9155     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9156     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9157   %}
9158 
9159   ins_pipe(pipe_class_default);
9160 %}
9161 
9162 // Note: Long.bitCount(long) returns an int.
9163 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9164   predicate(UsePopCountInstruction);
9165   match(Set dst (PopCountL src));
9166   effect(TEMP tmp);
9167   ins_cost(INSN_COST * 13);
9168 
9169   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9170             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9171             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9172             "mov    $dst, $tmp\t# vector (1D)" %}
9173   ins_encode %{
9174     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9175     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9176     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9177     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9178   %}
9179 
9180   ins_pipe(pipe_class_default);
9181 %}
9182 
9183 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9184   predicate(UsePopCountInstruction);
9185   match(Set dst (PopCountL (LoadL mem)));
9186   effect(TEMP tmp);
9187   ins_cost(INSN_COST * 13);
9188 
9189   format %{ "ldrd   $tmp, $mem\n\t"
9190             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9191             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9192             "mov    $dst, $tmp\t# vector (1D)" %}
9193   ins_encode %{
9194     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9195     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9196                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9197     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9198     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9199     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9200   %}
9201 
9202   ins_pipe(pipe_class_default);
9203 %}
9204 
9205 // ============================================================================
9206 // MemBar Instruction
9207 
9208 instruct load_fence() %{
9209   match(LoadFence);
9210   ins_cost(VOLATILE_REF_COST);
9211 
9212   format %{ "load_fence" %}
9213 
9214   ins_encode %{
9215     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9216   %}
9217   ins_pipe(pipe_serial);
9218 %}
9219 
9220 instruct unnecessary_membar_acquire() %{
9221   predicate(unnecessary_acquire(n));
9222   match(MemBarAcquire);
9223   ins_cost(0);
9224 
9225   format %{ "membar_acquire (elided)" %}
9226 
9227   ins_encode %{
9228     __ block_comment("membar_acquire (elided)");
9229   %}
9230 
9231   ins_pipe(pipe_class_empty);
9232 %}
9233 
9234 instruct membar_acquire() %{
9235   match(MemBarAcquire);
9236   ins_cost(VOLATILE_REF_COST);
9237 
9238   format %{ "membar_acquire\n\t"
9239             "dmb ish" %}
9240 
9241   ins_encode %{
9242     __ block_comment("membar_acquire");
9243     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9244   %}
9245 
9246   ins_pipe(pipe_serial);
9247 %}
9248 
9249 
9250 instruct membar_acquire_lock() %{
9251   match(MemBarAcquireLock);
9252   ins_cost(VOLATILE_REF_COST);
9253 
9254   format %{ "membar_acquire_lock (elided)" %}
9255 
9256   ins_encode %{
9257     __ block_comment("membar_acquire_lock (elided)");
9258   %}
9259 
9260   ins_pipe(pipe_serial);
9261 %}
9262 
9263 instruct store_fence() %{
9264   match(StoreFence);
9265   ins_cost(VOLATILE_REF_COST);
9266 
9267   format %{ "store_fence" %}
9268 
9269   ins_encode %{
9270     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9271   %}
9272   ins_pipe(pipe_serial);
9273 %}
9274 
9275 instruct unnecessary_membar_release() %{
9276   predicate(unnecessary_release(n));
9277   match(MemBarRelease);
9278   ins_cost(0);
9279 
9280   format %{ "membar_release (elided)" %}
9281 
9282   ins_encode %{
9283     __ block_comment("membar_release (elided)");
9284   %}
9285   ins_pipe(pipe_serial);
9286 %}
9287 
9288 instruct membar_release() %{
9289   match(MemBarRelease);
9290   ins_cost(VOLATILE_REF_COST);
9291 
9292   format %{ "membar_release\n\t"
9293             "dmb ish" %}
9294 
9295   ins_encode %{
9296     __ block_comment("membar_release");
9297     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9298   %}
9299   ins_pipe(pipe_serial);
9300 %}
9301 
9302 instruct membar_storestore() %{
9303   match(MemBarStoreStore);
9304   ins_cost(VOLATILE_REF_COST);
9305 
9306   format %{ "MEMBAR-store-store" %}
9307 
9308   ins_encode %{
9309     __ membar(Assembler::StoreStore);
9310   %}
9311   ins_pipe(pipe_serial);
9312 %}
9313 
9314 instruct membar_release_lock() %{
9315   match(MemBarReleaseLock);
9316   ins_cost(VOLATILE_REF_COST);
9317 
9318   format %{ "membar_release_lock (elided)" %}
9319 
9320   ins_encode %{
9321     __ block_comment("membar_release_lock (elided)");
9322   %}
9323 
9324   ins_pipe(pipe_serial);
9325 %}
9326 
9327 instruct unnecessary_membar_volatile() %{
9328   predicate(unnecessary_volatile(n));
9329   match(MemBarVolatile);
9330   ins_cost(0);
9331 
9332   format %{ "membar_volatile (elided)" %}
9333 
9334   ins_encode %{
9335     __ block_comment("membar_volatile (elided)");
9336   %}
9337 
9338   ins_pipe(pipe_serial);
9339 %}
9340 
9341 instruct membar_volatile() %{
9342   match(MemBarVolatile);
9343   ins_cost(VOLATILE_REF_COST*100);
9344 
9345   format %{ "membar_volatile\n\t"
9346              "dmb ish"%}
9347 
9348   ins_encode %{
9349     __ block_comment("membar_volatile");
9350     __ membar(Assembler::StoreLoad);
9351   %}
9352 
9353   ins_pipe(pipe_serial);
9354 %}
9355 
9356 // ============================================================================
9357 // Cast/Convert Instructions
9358 
9359 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9360   match(Set dst (CastX2P src));
9361 
9362   ins_cost(INSN_COST);
9363   format %{ "mov $dst, $src\t# long -> ptr" %}
9364 
9365   ins_encode %{
9366     if ($dst$$reg != $src$$reg) {
9367       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9368     }
9369   %}
9370 
9371   ins_pipe(ialu_reg);
9372 %}
9373 
9374 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9375   match(Set dst (CastP2X src));
9376 
9377   ins_cost(INSN_COST);
9378   format %{ "mov $dst, $src\t# ptr -> long" %}
9379 
9380   ins_encode %{
9381     if ($dst$$reg != $src$$reg) {
9382       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9383     }
9384   %}
9385 
9386   ins_pipe(ialu_reg);
9387 %}
9388 
9389 // Convert oop into int for vectors alignment masking
9390 instruct convP2I(iRegINoSp dst, iRegP src) %{
9391   match(Set dst (ConvL2I (CastP2X src)));
9392 
9393   ins_cost(INSN_COST);
9394   format %{ "movw $dst, $src\t# ptr -> int" %}
9395   ins_encode %{
9396     __ movw($dst$$Register, $src$$Register);
9397   %}
9398 
9399   ins_pipe(ialu_reg);
9400 %}
9401 
9402 // Convert compressed oop into int for vectors alignment masking
9403 // in case of 32bit oops (heap < 4Gb).
9404 instruct convN2I(iRegINoSp dst, iRegN src)
9405 %{
9406   predicate(Universe::narrow_oop_shift() == 0);
9407   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9408 
9409   ins_cost(INSN_COST);
9410   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9411   ins_encode %{
9412     __ movw($dst$$Register, $src$$Register);
9413   %}
9414 
9415   ins_pipe(ialu_reg);
9416 %}
9417 
9418 
9419 // Convert oop pointer into compressed form
9420 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9421   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9422   match(Set dst (EncodeP src));
9423   effect(KILL cr);
9424   ins_cost(INSN_COST * 3);
9425   format %{ "encode_heap_oop $dst, $src" %}
9426   ins_encode %{
9427     Register s = $src$$Register;
9428     Register d = $dst$$Register;
9429     __ encode_heap_oop(d, s);
9430   %}
9431   ins_pipe(ialu_reg);
9432 %}
9433 
9434 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9435   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9436   match(Set dst (EncodeP src));
9437   ins_cost(INSN_COST * 3);
9438   format %{ "encode_heap_oop_not_null $dst, $src" %}
9439   ins_encode %{
9440     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9441   %}
9442   ins_pipe(ialu_reg);
9443 %}
9444 
9445 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9446   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9447             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9448   match(Set dst (DecodeN src));
9449   ins_cost(INSN_COST * 3);
9450   format %{ "decode_heap_oop $dst, $src" %}
9451   ins_encode %{
9452     Register s = $src$$Register;
9453     Register d = $dst$$Register;
9454     __ decode_heap_oop(d, s);
9455   %}
9456   ins_pipe(ialu_reg);
9457 %}
9458 
9459 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9460   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9461             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9462   match(Set dst (DecodeN src));
9463   ins_cost(INSN_COST * 3);
9464   format %{ "decode_heap_oop_not_null $dst, $src" %}
9465   ins_encode %{
9466     Register s = $src$$Register;
9467     Register d = $dst$$Register;
9468     __ decode_heap_oop_not_null(d, s);
9469   %}
9470   ins_pipe(ialu_reg);
9471 %}
9472 
9473 // n.b. AArch64 implementations of encode_klass_not_null and
9474 // decode_klass_not_null do not modify the flags register so, unlike
9475 // Intel, we don't kill CR as a side effect here
9476 
9477 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9478   match(Set dst (EncodePKlass src));
9479 
9480   ins_cost(INSN_COST * 3);
9481   format %{ "encode_klass_not_null $dst,$src" %}
9482 
9483   ins_encode %{
9484     Register src_reg = as_Register($src$$reg);
9485     Register dst_reg = as_Register($dst$$reg);
9486     __ encode_klass_not_null(dst_reg, src_reg);
9487   %}
9488 
9489    ins_pipe(ialu_reg);
9490 %}
9491 
9492 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9493   match(Set dst (DecodeNKlass src));
9494 
9495   ins_cost(INSN_COST * 3);
9496   format %{ "decode_klass_not_null $dst,$src" %}
9497 
9498   ins_encode %{
9499     Register src_reg = as_Register($src$$reg);
9500     Register dst_reg = as_Register($dst$$reg);
9501     if (dst_reg != src_reg) {
9502       __ decode_klass_not_null(dst_reg, src_reg);
9503     } else {
9504       __ decode_klass_not_null(dst_reg);
9505     }
9506   %}
9507 
9508    ins_pipe(ialu_reg);
9509 %}
9510 
9511 instruct checkCastPP(iRegPNoSp dst)
9512 %{
9513   match(Set dst (CheckCastPP dst));
9514 
9515   size(0);
9516   format %{ "# checkcastPP of $dst" %}
9517   ins_encode(/* empty encoding */);
9518   ins_pipe(pipe_class_empty);
9519 %}
9520 
9521 instruct castPP(iRegPNoSp dst)
9522 %{
9523   match(Set dst (CastPP dst));
9524 
9525   size(0);
9526   format %{ "# castPP of $dst" %}
9527   ins_encode(/* empty encoding */);
9528   ins_pipe(pipe_class_empty);
9529 %}
9530 
9531 instruct castII(iRegI dst)
9532 %{
9533   match(Set dst (CastII dst));
9534 
9535   size(0);
9536   format %{ "# castII of $dst" %}
9537   ins_encode(/* empty encoding */);
9538   ins_cost(0);
9539   ins_pipe(pipe_class_empty);
9540 %}
9541 
9542 // ============================================================================
9543 // Atomic operation instructions
9544 //
9545 // Intel and SPARC both implement Ideal Node LoadPLocked and
9546 // Store{PIL}Conditional instructions using a normal load for the
9547 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9548 //
9549 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9550 // pair to lock object allocations from Eden space when not using
9551 // TLABs.
9552 //
9553 // There does not appear to be a Load{IL}Locked Ideal Node and the
9554 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9555 // and to use StoreIConditional only for 32-bit and StoreLConditional
9556 // only for 64-bit.
9557 //
9558 // We implement LoadPLocked and StorePLocked instructions using,
9559 // respectively the AArch64 hw load-exclusive and store-conditional
9560 // instructions. Whereas we must implement each of
9561 // Store{IL}Conditional using a CAS which employs a pair of
9562 // instructions comprising a load-exclusive followed by a
9563 // store-conditional.
9564 
9565 
9566 // Locked-load (linked load) of the current heap-top
9567 // used when updating the eden heap top
9568 // implemented using ldaxr on AArch64
9569 
9570 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9571 %{
9572   match(Set dst (LoadPLocked mem));
9573 
9574   ins_cost(VOLATILE_REF_COST);
9575 
9576   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9577 
9578   ins_encode(aarch64_enc_ldaxr(dst, mem));
9579 
9580   ins_pipe(pipe_serial);
9581 %}
9582 
9583 // Conditional-store of the updated heap-top.
9584 // Used during allocation of the shared heap.
9585 // Sets flag (EQ) on success.
9586 // implemented using stlxr on AArch64.
9587 
9588 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9589 %{
9590   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9591 
9592   ins_cost(VOLATILE_REF_COST);
9593 
9594  // TODO
9595  // do we need to do a store-conditional release or can we just use a
9596  // plain store-conditional?
9597 
9598   format %{
9599     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9600     "cmpw rscratch1, zr\t# EQ on successful write"
9601   %}
9602 
9603   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9604 
9605   ins_pipe(pipe_serial);
9606 %}
9607 
9608 
9609 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9610 // when attempting to rebias a lock towards the current thread.  We
9611 // must use the acquire form of cmpxchg in order to guarantee acquire
9612 // semantics in this case.
9613 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9614 %{
9615   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9616 
9617   ins_cost(VOLATILE_REF_COST);
9618 
9619   format %{
9620     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9621     "cmpw rscratch1, zr\t# EQ on successful write"
9622   %}
9623 
9624   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9625 
9626   ins_pipe(pipe_slow);
9627 %}
9628 
9629 // storeIConditional also has acquire semantics, for no better reason
9630 // than matching storeLConditional.  At the time of writing this
9631 // comment storeIConditional was not used anywhere by AArch64.
9632 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9633 %{
9634   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9635 
9636   ins_cost(VOLATILE_REF_COST);
9637 
9638   format %{
9639     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9640     "cmpw rscratch1, zr\t# EQ on successful write"
9641   %}
9642 
9643   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9644 
9645   ins_pipe(pipe_slow);
9646 %}
9647 
9648 // standard CompareAndSwapX when we are using barriers
9649 // these have higher priority than the rules selected by a predicate
9650 
9651 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9652 // can't match them
9653 
9654 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9655 
9656   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
9657   ins_cost(2 * VOLATILE_REF_COST);
9658 
9659   effect(KILL cr);
9660 
9661   format %{
9662     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9663     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9664   %}
9665 
9666   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
9667             aarch64_enc_cset_eq(res));
9668 
9669   ins_pipe(pipe_slow);
9670 %}
9671 
9672 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9673 
9674   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
9675   ins_cost(2 * VOLATILE_REF_COST);
9676 
9677   effect(KILL cr);
9678 
9679   format %{
9680     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9681     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9682   %}
9683 
9684   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
9685             aarch64_enc_cset_eq(res));
9686 
9687   ins_pipe(pipe_slow);
9688 %}
9689 
9690 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9691 
9692   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9693   ins_cost(2 * VOLATILE_REF_COST);
9694 
9695   effect(KILL cr);
9696 
9697  format %{
9698     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9699     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9700  %}
9701 
9702  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9703             aarch64_enc_cset_eq(res));
9704 
9705   ins_pipe(pipe_slow);
9706 %}
9707 
9708 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9709 
9710   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9711   ins_cost(2 * VOLATILE_REF_COST);
9712 
9713   effect(KILL cr);
9714 
9715  format %{
9716     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9717     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9718  %}
9719 
9720  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9721             aarch64_enc_cset_eq(res));
9722 
9723   ins_pipe(pipe_slow);
9724 %}
9725 
9726 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9727 
9728   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9729   ins_cost(2 * VOLATILE_REF_COST);
9730 
9731   effect(KILL cr);
9732 
9733  format %{
9734     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9735     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9736  %}
9737 
9738  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9739             aarch64_enc_cset_eq(res));
9740 
9741   ins_pipe(pipe_slow);
9742 %}
9743 
9744 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9745 
9746   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9747   ins_cost(2 * VOLATILE_REF_COST);
9748 
9749   effect(KILL cr);
9750 
9751  format %{
9752     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9753     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9754  %}
9755 
9756  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9757             aarch64_enc_cset_eq(res));
9758 
9759   ins_pipe(pipe_slow);
9760 %}
9761 
9762 // alternative CompareAndSwapX when we are eliding barriers
9763 
9764 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9765 
9766   predicate(needs_acquiring_load_exclusive(n));
9767   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9768   ins_cost(VOLATILE_REF_COST);
9769 
9770   effect(KILL cr);
9771 
9772  format %{
9773     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9774     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9775  %}
9776 
9777  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9778             aarch64_enc_cset_eq(res));
9779 
9780   ins_pipe(pipe_slow);
9781 %}
9782 
9783 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9784 
9785   predicate(needs_acquiring_load_exclusive(n));
9786   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9787   ins_cost(VOLATILE_REF_COST);
9788 
9789   effect(KILL cr);
9790 
9791  format %{
9792     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9793     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9794  %}
9795 
9796  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9797             aarch64_enc_cset_eq(res));
9798 
9799   ins_pipe(pipe_slow);
9800 %}
9801 
9802 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9803 
9804   predicate(needs_acquiring_load_exclusive(n));
9805   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9806   ins_cost(VOLATILE_REF_COST);
9807 
9808   effect(KILL cr);
9809 
9810  format %{
9811     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9812     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9813  %}
9814 
9815  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9816             aarch64_enc_cset_eq(res));
9817 
9818   ins_pipe(pipe_slow);
9819 %}
9820 
9821 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9822 
9823   predicate(needs_acquiring_load_exclusive(n));
9824   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9825   ins_cost(VOLATILE_REF_COST);
9826 
9827   effect(KILL cr);
9828 
9829  format %{
9830     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9831     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9832  %}
9833 
9834  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9835             aarch64_enc_cset_eq(res));
9836 
9837   ins_pipe(pipe_slow);
9838 %}
9839 
9840 
9841 // ---------------------------------------------------------------------
9842 
9843 
9844 // BEGIN This section of the file is automatically generated. Do not edit --------------
9845 
9846 // Sundry CAS operations.  Note that release is always true,
9847 // regardless of the memory ordering of the CAS.  This is because we
9848 // need the volatile case to be sequentially consistent but there is
9849 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9850 // can't check the type of memory ordering here, so we always emit a
9851 // STLXR.
9852 
9853 // This section is generated from aarch64_ad_cas.m4
9854 
9855 
9856 
9857 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9858   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9859   ins_cost(2 * VOLATILE_REF_COST);
9860   effect(TEMP_DEF res, KILL cr);
9861   format %{
9862     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9863   %}
9864   ins_encode %{
9865     __ uxtbw(rscratch2, $oldval$$Register);
9866     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9867                Assembler::byte, /*acquire*/ false, /*release*/ true,
9868                /*weak*/ false, $res$$Register);
9869     __ sxtbw($res$$Register, $res$$Register);
9870   %}
9871   ins_pipe(pipe_slow);
9872 %}
9873 
9874 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9875   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9876   ins_cost(2 * VOLATILE_REF_COST);
9877   effect(TEMP_DEF res, KILL cr);
9878   format %{
9879     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9880   %}
9881   ins_encode %{
9882     __ uxthw(rscratch2, $oldval$$Register);
9883     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9884                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9885                /*weak*/ false, $res$$Register);
9886     __ sxthw($res$$Register, $res$$Register);
9887   %}
9888   ins_pipe(pipe_slow);
9889 %}
9890 
9891 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9892   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9893   ins_cost(2 * VOLATILE_REF_COST);
9894   effect(TEMP_DEF res, KILL cr);
9895   format %{
9896     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9897   %}
9898   ins_encode %{
9899     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9900                Assembler::word, /*acquire*/ false, /*release*/ true,
9901                /*weak*/ false, $res$$Register);
9902   %}
9903   ins_pipe(pipe_slow);
9904 %}
9905 
9906 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9907   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9908   ins_cost(2 * VOLATILE_REF_COST);
9909   effect(TEMP_DEF res, KILL cr);
9910   format %{
9911     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9912   %}
9913   ins_encode %{
9914     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9915                Assembler::xword, /*acquire*/ false, /*release*/ true,
9916                /*weak*/ false, $res$$Register);
9917   %}
9918   ins_pipe(pipe_slow);
9919 %}
9920 
9921 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9922   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9923   ins_cost(2 * VOLATILE_REF_COST);
9924   effect(TEMP_DEF res, KILL cr);
9925   format %{
9926     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9927   %}
9928   ins_encode %{
9929     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9930                Assembler::word, /*acquire*/ false, /*release*/ true,
9931                /*weak*/ false, $res$$Register);
9932   %}
9933   ins_pipe(pipe_slow);
9934 %}
9935 
9936 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9937   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9938   ins_cost(2 * VOLATILE_REF_COST);
9939   effect(TEMP_DEF res, KILL cr);
9940   format %{
9941     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9942   %}
9943   ins_encode %{
9944     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9945                Assembler::xword, /*acquire*/ false, /*release*/ true,
9946                /*weak*/ false, $res$$Register);
9947   %}
9948   ins_pipe(pipe_slow);
9949 %}
9950 
9951 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9952   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9953   ins_cost(2 * VOLATILE_REF_COST);
9954   effect(KILL cr);
9955   format %{
9956     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9957     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9958   %}
9959   ins_encode %{
9960     __ uxtbw(rscratch2, $oldval$$Register);
9961     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9962                Assembler::byte, /*acquire*/ false, /*release*/ true,
9963                /*weak*/ true, noreg);
9964     __ csetw($res$$Register, Assembler::EQ);
9965   %}
9966   ins_pipe(pipe_slow);
9967 %}
9968 
9969 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9970   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9971   ins_cost(2 * VOLATILE_REF_COST);
9972   effect(KILL cr);
9973   format %{
9974     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9975     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9976   %}
9977   ins_encode %{
9978     __ uxthw(rscratch2, $oldval$$Register);
9979     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9980                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9981                /*weak*/ true, noreg);
9982     __ csetw($res$$Register, Assembler::EQ);
9983   %}
9984   ins_pipe(pipe_slow);
9985 %}
9986 
9987 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9988   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9989   ins_cost(2 * VOLATILE_REF_COST);
9990   effect(KILL cr);
9991   format %{
9992     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9993     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9994   %}
9995   ins_encode %{
9996     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9997                Assembler::word, /*acquire*/ false, /*release*/ true,
9998                /*weak*/ true, noreg);
9999     __ csetw($res$$Register, Assembler::EQ);
10000   %}
10001   ins_pipe(pipe_slow);
10002 %}
10003 
10004 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
10005   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
10006   ins_cost(2 * VOLATILE_REF_COST);
10007   effect(KILL cr);
10008   format %{
10009     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
10010     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10011   %}
10012   ins_encode %{
10013     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10014                Assembler::xword, /*acquire*/ false, /*release*/ true,
10015                /*weak*/ true, noreg);
10016     __ csetw($res$$Register, Assembler::EQ);
10017   %}
10018   ins_pipe(pipe_slow);
10019 %}
10020 
10021 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
10022   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
10023   ins_cost(2 * VOLATILE_REF_COST);
10024   effect(KILL cr);
10025   format %{
10026     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10027     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10028   %}
10029   ins_encode %{
10030     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10031                Assembler::word, /*acquire*/ false, /*release*/ true,
10032                /*weak*/ true, noreg);
10033     __ csetw($res$$Register, Assembler::EQ);
10034   %}
10035   ins_pipe(pipe_slow);
10036 %}
10037 
10038 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
10039   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
10040   ins_cost(2 * VOLATILE_REF_COST);
10041   effect(KILL cr);
10042   format %{
10043     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10044     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10045   %}
10046   ins_encode %{
10047     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10048                Assembler::xword, /*acquire*/ false, /*release*/ true,
10049                /*weak*/ true, noreg);
10050     __ csetw($res$$Register, Assembler::EQ);
10051   %}
10052   ins_pipe(pipe_slow);
10053 %}
10054 
10055 // END This section of the file is automatically generated. Do not edit --------------
10056 // ---------------------------------------------------------------------
10057 
10058 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
10059   match(Set prev (GetAndSetI mem newv));
10060   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10061   ins_encode %{
10062     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10063   %}
10064   ins_pipe(pipe_serial);
10065 %}
10066 
10067 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10068   match(Set prev (GetAndSetL mem newv));
10069   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10070   ins_encode %{
10071     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10072   %}
10073   ins_pipe(pipe_serial);
10074 %}
10075 
10076 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10077   match(Set prev (GetAndSetN mem newv));
10078   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10079   ins_encode %{
10080     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10081   %}
10082   ins_pipe(pipe_serial);
10083 %}
10084 
10085 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10086   match(Set prev (GetAndSetP mem newv));
10087   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10088   ins_encode %{
10089     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10090   %}
10091   ins_pipe(pipe_serial);
10092 %}
10093 
10094 
10095 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10096   match(Set newval (GetAndAddL mem incr));
10097   ins_cost(INSN_COST * 10);
10098   format %{ "get_and_addL $newval, [$mem], $incr" %}
10099   ins_encode %{
10100     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10101   %}
10102   ins_pipe(pipe_serial);
10103 %}
10104 
10105 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10106   predicate(n->as_LoadStore()->result_not_used());
10107   match(Set dummy (GetAndAddL mem incr));
10108   ins_cost(INSN_COST * 9);
10109   format %{ "get_and_addL [$mem], $incr" %}
10110   ins_encode %{
10111     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10112   %}
10113   ins_pipe(pipe_serial);
10114 %}
10115 
10116 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10117   match(Set newval (GetAndAddL mem incr));
10118   ins_cost(INSN_COST * 10);
10119   format %{ "get_and_addL $newval, [$mem], $incr" %}
10120   ins_encode %{
10121     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10122   %}
10123   ins_pipe(pipe_serial);
10124 %}
10125 
10126 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10127   predicate(n->as_LoadStore()->result_not_used());
10128   match(Set dummy (GetAndAddL mem incr));
10129   ins_cost(INSN_COST * 9);
10130   format %{ "get_and_addL [$mem], $incr" %}
10131   ins_encode %{
10132     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10133   %}
10134   ins_pipe(pipe_serial);
10135 %}
10136 
10137 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10138   match(Set newval (GetAndAddI mem incr));
10139   ins_cost(INSN_COST * 10);
10140   format %{ "get_and_addI $newval, [$mem], $incr" %}
10141   ins_encode %{
10142     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10143   %}
10144   ins_pipe(pipe_serial);
10145 %}
10146 
10147 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10148   predicate(n->as_LoadStore()->result_not_used());
10149   match(Set dummy (GetAndAddI mem incr));
10150   ins_cost(INSN_COST * 9);
10151   format %{ "get_and_addI [$mem], $incr" %}
10152   ins_encode %{
10153     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10154   %}
10155   ins_pipe(pipe_serial);
10156 %}
10157 
10158 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10159   match(Set newval (GetAndAddI mem incr));
10160   ins_cost(INSN_COST * 10);
10161   format %{ "get_and_addI $newval, [$mem], $incr" %}
10162   ins_encode %{
10163     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10164   %}
10165   ins_pipe(pipe_serial);
10166 %}
10167 
10168 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10169   predicate(n->as_LoadStore()->result_not_used());
10170   match(Set dummy (GetAndAddI mem incr));
10171   ins_cost(INSN_COST * 9);
10172   format %{ "get_and_addI [$mem], $incr" %}
10173   ins_encode %{
10174     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10175   %}
10176   ins_pipe(pipe_serial);
10177 %}
10178 
10179 // Manifest a CmpL result in an integer register.
10180 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10181 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10182 %{
10183   match(Set dst (CmpL3 src1 src2));
10184   effect(KILL flags);
10185 
10186   ins_cost(INSN_COST * 6);
10187   format %{
10188       "cmp $src1, $src2"
10189       "csetw $dst, ne"
10190       "cnegw $dst, lt"
10191   %}
10192   // format %{ "CmpL3 $dst, $src1, $src2" %}
10193   ins_encode %{
10194     __ cmp($src1$$Register, $src2$$Register);
10195     __ csetw($dst$$Register, Assembler::NE);
10196     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10197   %}
10198 
10199   ins_pipe(pipe_class_default);
10200 %}
10201 
10202 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10203 %{
10204   match(Set dst (CmpL3 src1 src2));
10205   effect(KILL flags);
10206 
10207   ins_cost(INSN_COST * 6);
10208   format %{
10209       "cmp $src1, $src2"
10210       "csetw $dst, ne"
10211       "cnegw $dst, lt"
10212   %}
10213   ins_encode %{
10214     int32_t con = (int32_t)$src2$$constant;
10215      if (con < 0) {
10216       __ adds(zr, $src1$$Register, -con);
10217     } else {
10218       __ subs(zr, $src1$$Register, con);
10219     }
10220     __ csetw($dst$$Register, Assembler::NE);
10221     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10222   %}
10223 
10224   ins_pipe(pipe_class_default);
10225 %}
10226 
10227 // ============================================================================
10228 // Conditional Move Instructions
10229 
10230 // n.b. we have identical rules for both a signed compare op (cmpOp)
10231 // and an unsigned compare op (cmpOpU). it would be nice if we could
10232 // define an op class which merged both inputs and use it to type the
10233 // argument to a single rule. unfortunatelyt his fails because the
10234 // opclass does not live up to the COND_INTER interface of its
10235 // component operands. When the generic code tries to negate the
10236 // operand it ends up running the generci Machoper::negate method
10237 // which throws a ShouldNotHappen. So, we have to provide two flavours
10238 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10239 
10240 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10241   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10242 
10243   ins_cost(INSN_COST * 2);
10244   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10245 
10246   ins_encode %{
10247     __ cselw(as_Register($dst$$reg),
10248              as_Register($src2$$reg),
10249              as_Register($src1$$reg),
10250              (Assembler::Condition)$cmp$$cmpcode);
10251   %}
10252 
10253   ins_pipe(icond_reg_reg);
10254 %}
10255 
10256 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10257   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10258 
10259   ins_cost(INSN_COST * 2);
10260   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10261 
10262   ins_encode %{
10263     __ cselw(as_Register($dst$$reg),
10264              as_Register($src2$$reg),
10265              as_Register($src1$$reg),
10266              (Assembler::Condition)$cmp$$cmpcode);
10267   %}
10268 
10269   ins_pipe(icond_reg_reg);
10270 %}
10271 
10272 // special cases where one arg is zero
10273 
10274 // n.b. this is selected in preference to the rule above because it
10275 // avoids loading constant 0 into a source register
10276 
10277 // TODO
10278 // we ought only to be able to cull one of these variants as the ideal
10279 // transforms ought always to order the zero consistently (to left/right?)
10280 
10281 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10282   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10283 
10284   ins_cost(INSN_COST * 2);
10285   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10286 
10287   ins_encode %{
10288     __ cselw(as_Register($dst$$reg),
10289              as_Register($src$$reg),
10290              zr,
10291              (Assembler::Condition)$cmp$$cmpcode);
10292   %}
10293 
10294   ins_pipe(icond_reg);
10295 %}
10296 
10297 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10298   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10299 
10300   ins_cost(INSN_COST * 2);
10301   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10302 
10303   ins_encode %{
10304     __ cselw(as_Register($dst$$reg),
10305              as_Register($src$$reg),
10306              zr,
10307              (Assembler::Condition)$cmp$$cmpcode);
10308   %}
10309 
10310   ins_pipe(icond_reg);
10311 %}
10312 
10313 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10314   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10315 
10316   ins_cost(INSN_COST * 2);
10317   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10318 
10319   ins_encode %{
10320     __ cselw(as_Register($dst$$reg),
10321              zr,
10322              as_Register($src$$reg),
10323              (Assembler::Condition)$cmp$$cmpcode);
10324   %}
10325 
10326   ins_pipe(icond_reg);
10327 %}
10328 
10329 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10330   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10331 
10332   ins_cost(INSN_COST * 2);
10333   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10334 
10335   ins_encode %{
10336     __ cselw(as_Register($dst$$reg),
10337              zr,
10338              as_Register($src$$reg),
10339              (Assembler::Condition)$cmp$$cmpcode);
10340   %}
10341 
10342   ins_pipe(icond_reg);
10343 %}
10344 
10345 // special case for creating a boolean 0 or 1
10346 
10347 // n.b. this is selected in preference to the rule above because it
10348 // avoids loading constants 0 and 1 into a source register
10349 
10350 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10351   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10352 
10353   ins_cost(INSN_COST * 2);
10354   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10355 
10356   ins_encode %{
10357     // equivalently
10358     // cset(as_Register($dst$$reg),
10359     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10360     __ csincw(as_Register($dst$$reg),
10361              zr,
10362              zr,
10363              (Assembler::Condition)$cmp$$cmpcode);
10364   %}
10365 
10366   ins_pipe(icond_none);
10367 %}
10368 
10369 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10370   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10371 
10372   ins_cost(INSN_COST * 2);
10373   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10374 
10375   ins_encode %{
10376     // equivalently
10377     // cset(as_Register($dst$$reg),
10378     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10379     __ csincw(as_Register($dst$$reg),
10380              zr,
10381              zr,
10382              (Assembler::Condition)$cmp$$cmpcode);
10383   %}
10384 
10385   ins_pipe(icond_none);
10386 %}
10387 
10388 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10389   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10390 
10391   ins_cost(INSN_COST * 2);
10392   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10393 
10394   ins_encode %{
10395     __ csel(as_Register($dst$$reg),
10396             as_Register($src2$$reg),
10397             as_Register($src1$$reg),
10398             (Assembler::Condition)$cmp$$cmpcode);
10399   %}
10400 
10401   ins_pipe(icond_reg_reg);
10402 %}
10403 
10404 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10405   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10406 
10407   ins_cost(INSN_COST * 2);
10408   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10409 
10410   ins_encode %{
10411     __ csel(as_Register($dst$$reg),
10412             as_Register($src2$$reg),
10413             as_Register($src1$$reg),
10414             (Assembler::Condition)$cmp$$cmpcode);
10415   %}
10416 
10417   ins_pipe(icond_reg_reg);
10418 %}
10419 
10420 // special cases where one arg is zero
10421 
10422 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10423   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10424 
10425   ins_cost(INSN_COST * 2);
10426   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10427 
10428   ins_encode %{
10429     __ csel(as_Register($dst$$reg),
10430             zr,
10431             as_Register($src$$reg),
10432             (Assembler::Condition)$cmp$$cmpcode);
10433   %}
10434 
10435   ins_pipe(icond_reg);
10436 %}
10437 
10438 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10439   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10440 
10441   ins_cost(INSN_COST * 2);
10442   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10443 
10444   ins_encode %{
10445     __ csel(as_Register($dst$$reg),
10446             zr,
10447             as_Register($src$$reg),
10448             (Assembler::Condition)$cmp$$cmpcode);
10449   %}
10450 
10451   ins_pipe(icond_reg);
10452 %}
10453 
10454 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10455   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10456 
10457   ins_cost(INSN_COST * 2);
10458   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10459 
10460   ins_encode %{
10461     __ csel(as_Register($dst$$reg),
10462             as_Register($src$$reg),
10463             zr,
10464             (Assembler::Condition)$cmp$$cmpcode);
10465   %}
10466 
10467   ins_pipe(icond_reg);
10468 %}
10469 
10470 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10471   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10472 
10473   ins_cost(INSN_COST * 2);
10474   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10475 
10476   ins_encode %{
10477     __ csel(as_Register($dst$$reg),
10478             as_Register($src$$reg),
10479             zr,
10480             (Assembler::Condition)$cmp$$cmpcode);
10481   %}
10482 
10483   ins_pipe(icond_reg);
10484 %}
10485 
10486 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10487   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10488 
10489   ins_cost(INSN_COST * 2);
10490   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10491 
10492   ins_encode %{
10493     __ csel(as_Register($dst$$reg),
10494             as_Register($src2$$reg),
10495             as_Register($src1$$reg),
10496             (Assembler::Condition)$cmp$$cmpcode);
10497   %}
10498 
10499   ins_pipe(icond_reg_reg);
10500 %}
10501 
10502 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10503   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10504 
10505   ins_cost(INSN_COST * 2);
10506   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10507 
10508   ins_encode %{
10509     __ csel(as_Register($dst$$reg),
10510             as_Register($src2$$reg),
10511             as_Register($src1$$reg),
10512             (Assembler::Condition)$cmp$$cmpcode);
10513   %}
10514 
10515   ins_pipe(icond_reg_reg);
10516 %}
10517 
10518 // special cases where one arg is zero
10519 
10520 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10521   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10522 
10523   ins_cost(INSN_COST * 2);
10524   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10525 
10526   ins_encode %{
10527     __ csel(as_Register($dst$$reg),
10528             zr,
10529             as_Register($src$$reg),
10530             (Assembler::Condition)$cmp$$cmpcode);
10531   %}
10532 
10533   ins_pipe(icond_reg);
10534 %}
10535 
10536 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10537   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10538 
10539   ins_cost(INSN_COST * 2);
10540   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10541 
10542   ins_encode %{
10543     __ csel(as_Register($dst$$reg),
10544             zr,
10545             as_Register($src$$reg),
10546             (Assembler::Condition)$cmp$$cmpcode);
10547   %}
10548 
10549   ins_pipe(icond_reg);
10550 %}
10551 
10552 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10553   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10554 
10555   ins_cost(INSN_COST * 2);
10556   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10557 
10558   ins_encode %{
10559     __ csel(as_Register($dst$$reg),
10560             as_Register($src$$reg),
10561             zr,
10562             (Assembler::Condition)$cmp$$cmpcode);
10563   %}
10564 
10565   ins_pipe(icond_reg);
10566 %}
10567 
10568 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10569   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10570 
10571   ins_cost(INSN_COST * 2);
10572   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10573 
10574   ins_encode %{
10575     __ csel(as_Register($dst$$reg),
10576             as_Register($src$$reg),
10577             zr,
10578             (Assembler::Condition)$cmp$$cmpcode);
10579   %}
10580 
10581   ins_pipe(icond_reg);
10582 %}
10583 
10584 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10585   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10586 
10587   ins_cost(INSN_COST * 2);
10588   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10589 
10590   ins_encode %{
10591     __ cselw(as_Register($dst$$reg),
10592              as_Register($src2$$reg),
10593              as_Register($src1$$reg),
10594              (Assembler::Condition)$cmp$$cmpcode);
10595   %}
10596 
10597   ins_pipe(icond_reg_reg);
10598 %}
10599 
10600 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10601   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10602 
10603   ins_cost(INSN_COST * 2);
10604   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10605 
10606   ins_encode %{
10607     __ cselw(as_Register($dst$$reg),
10608              as_Register($src2$$reg),
10609              as_Register($src1$$reg),
10610              (Assembler::Condition)$cmp$$cmpcode);
10611   %}
10612 
10613   ins_pipe(icond_reg_reg);
10614 %}
10615 
10616 // special cases where one arg is zero
10617 
10618 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10619   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10620 
10621   ins_cost(INSN_COST * 2);
10622   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10623 
10624   ins_encode %{
10625     __ cselw(as_Register($dst$$reg),
10626              zr,
10627              as_Register($src$$reg),
10628              (Assembler::Condition)$cmp$$cmpcode);
10629   %}
10630 
10631   ins_pipe(icond_reg);
10632 %}
10633 
10634 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10635   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10636 
10637   ins_cost(INSN_COST * 2);
10638   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10639 
10640   ins_encode %{
10641     __ cselw(as_Register($dst$$reg),
10642              zr,
10643              as_Register($src$$reg),
10644              (Assembler::Condition)$cmp$$cmpcode);
10645   %}
10646 
10647   ins_pipe(icond_reg);
10648 %}
10649 
10650 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10651   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10652 
10653   ins_cost(INSN_COST * 2);
10654   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10655 
10656   ins_encode %{
10657     __ cselw(as_Register($dst$$reg),
10658              as_Register($src$$reg),
10659              zr,
10660              (Assembler::Condition)$cmp$$cmpcode);
10661   %}
10662 
10663   ins_pipe(icond_reg);
10664 %}
10665 
10666 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10667   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10668 
10669   ins_cost(INSN_COST * 2);
10670   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10671 
10672   ins_encode %{
10673     __ cselw(as_Register($dst$$reg),
10674              as_Register($src$$reg),
10675              zr,
10676              (Assembler::Condition)$cmp$$cmpcode);
10677   %}
10678 
10679   ins_pipe(icond_reg);
10680 %}
10681 
10682 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10683 %{
10684   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10685 
10686   ins_cost(INSN_COST * 3);
10687 
10688   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10689   ins_encode %{
10690     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10691     __ fcsels(as_FloatRegister($dst$$reg),
10692               as_FloatRegister($src2$$reg),
10693               as_FloatRegister($src1$$reg),
10694               cond);
10695   %}
10696 
10697   ins_pipe(fp_cond_reg_reg_s);
10698 %}
10699 
10700 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10701 %{
10702   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10703 
10704   ins_cost(INSN_COST * 3);
10705 
10706   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10707   ins_encode %{
10708     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10709     __ fcsels(as_FloatRegister($dst$$reg),
10710               as_FloatRegister($src2$$reg),
10711               as_FloatRegister($src1$$reg),
10712               cond);
10713   %}
10714 
10715   ins_pipe(fp_cond_reg_reg_s);
10716 %}
10717 
10718 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10719 %{
10720   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10721 
10722   ins_cost(INSN_COST * 3);
10723 
10724   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10725   ins_encode %{
10726     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10727     __ fcseld(as_FloatRegister($dst$$reg),
10728               as_FloatRegister($src2$$reg),
10729               as_FloatRegister($src1$$reg),
10730               cond);
10731   %}
10732 
10733   ins_pipe(fp_cond_reg_reg_d);
10734 %}
10735 
10736 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10737 %{
10738   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10739 
10740   ins_cost(INSN_COST * 3);
10741 
10742   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10743   ins_encode %{
10744     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10745     __ fcseld(as_FloatRegister($dst$$reg),
10746               as_FloatRegister($src2$$reg),
10747               as_FloatRegister($src1$$reg),
10748               cond);
10749   %}
10750 
10751   ins_pipe(fp_cond_reg_reg_d);
10752 %}
10753 
10754 // ============================================================================
10755 // Arithmetic Instructions
10756 //
10757 
10758 // Integer Addition
10759 
10760 // TODO
10761 // these currently employ operations which do not set CR and hence are
10762 // not flagged as killing CR but we would like to isolate the cases
10763 // where we want to set flags from those where we don't. need to work
10764 // out how to do that.
10765 
10766 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10767   match(Set dst (AddI src1 src2));
10768 
10769   ins_cost(INSN_COST);
10770   format %{ "addw  $dst, $src1, $src2" %}
10771 
10772   ins_encode %{
10773     __ addw(as_Register($dst$$reg),
10774             as_Register($src1$$reg),
10775             as_Register($src2$$reg));
10776   %}
10777 
10778   ins_pipe(ialu_reg_reg);
10779 %}
10780 
10781 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10782   match(Set dst (AddI src1 src2));
10783 
10784   ins_cost(INSN_COST);
10785   format %{ "addw $dst, $src1, $src2" %}
10786 
10787   // use opcode to indicate that this is an add not a sub
10788   opcode(0x0);
10789 
10790   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10791 
10792   ins_pipe(ialu_reg_imm);
10793 %}
10794 
10795 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10796   match(Set dst (AddI (ConvL2I src1) src2));
10797 
10798   ins_cost(INSN_COST);
10799   format %{ "addw $dst, $src1, $src2" %}
10800 
10801   // use opcode to indicate that this is an add not a sub
10802   opcode(0x0);
10803 
10804   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10805 
10806   ins_pipe(ialu_reg_imm);
10807 %}
10808 
10809 // Pointer Addition
10810 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10811   match(Set dst (AddP src1 src2));
10812 
10813   ins_cost(INSN_COST);
10814   format %{ "add $dst, $src1, $src2\t# ptr" %}
10815 
10816   ins_encode %{
10817     __ add(as_Register($dst$$reg),
10818            as_Register($src1$$reg),
10819            as_Register($src2$$reg));
10820   %}
10821 
10822   ins_pipe(ialu_reg_reg);
10823 %}
10824 
10825 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10826   match(Set dst (AddP src1 (ConvI2L src2)));
10827 
10828   ins_cost(1.9 * INSN_COST);
10829   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10830 
10831   ins_encode %{
10832     __ add(as_Register($dst$$reg),
10833            as_Register($src1$$reg),
10834            as_Register($src2$$reg), ext::sxtw);
10835   %}
10836 
10837   ins_pipe(ialu_reg_reg);
10838 %}
10839 
10840 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10841   match(Set dst (AddP src1 (LShiftL src2 scale)));
10842 
10843   ins_cost(1.9 * INSN_COST);
10844   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10845 
10846   ins_encode %{
10847     __ lea(as_Register($dst$$reg),
10848            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10849                    Address::lsl($scale$$constant)));
10850   %}
10851 
10852   ins_pipe(ialu_reg_reg_shift);
10853 %}
10854 
10855 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10856   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10857 
10858   ins_cost(1.9 * INSN_COST);
10859   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10860 
10861   ins_encode %{
10862     __ lea(as_Register($dst$$reg),
10863            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10864                    Address::sxtw($scale$$constant)));
10865   %}
10866 
10867   ins_pipe(ialu_reg_reg_shift);
10868 %}
10869 
10870 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10871   match(Set dst (LShiftL (ConvI2L src) scale));
10872 
10873   ins_cost(INSN_COST);
10874   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10875 
10876   ins_encode %{
10877     __ sbfiz(as_Register($dst$$reg),
10878           as_Register($src$$reg),
10879           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10880   %}
10881 
10882   ins_pipe(ialu_reg_shift);
10883 %}
10884 
10885 // Pointer Immediate Addition
10886 // n.b. this needs to be more expensive than using an indirect memory
10887 // operand
10888 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10889   match(Set dst (AddP src1 src2));
10890 
10891   ins_cost(INSN_COST);
10892   format %{ "add $dst, $src1, $src2\t# ptr" %}
10893 
10894   // use opcode to indicate that this is an add not a sub
10895   opcode(0x0);
10896 
10897   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10898 
10899   ins_pipe(ialu_reg_imm);
10900 %}
10901 
10902 // Long Addition
10903 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10904 
10905   match(Set dst (AddL src1 src2));
10906 
10907   ins_cost(INSN_COST);
10908   format %{ "add  $dst, $src1, $src2" %}
10909 
10910   ins_encode %{
10911     __ add(as_Register($dst$$reg),
10912            as_Register($src1$$reg),
10913            as_Register($src2$$reg));
10914   %}
10915 
10916   ins_pipe(ialu_reg_reg);
10917 %}
10918 
10919 // No constant pool entries requiredLong Immediate Addition.
10920 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10921   match(Set dst (AddL src1 src2));
10922 
10923   ins_cost(INSN_COST);
10924   format %{ "add $dst, $src1, $src2" %}
10925 
10926   // use opcode to indicate that this is an add not a sub
10927   opcode(0x0);
10928 
10929   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10930 
10931   ins_pipe(ialu_reg_imm);
10932 %}
10933 
10934 // Integer Subtraction
10935 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10936   match(Set dst (SubI src1 src2));
10937 
10938   ins_cost(INSN_COST);
10939   format %{ "subw  $dst, $src1, $src2" %}
10940 
10941   ins_encode %{
10942     __ subw(as_Register($dst$$reg),
10943             as_Register($src1$$reg),
10944             as_Register($src2$$reg));
10945   %}
10946 
10947   ins_pipe(ialu_reg_reg);
10948 %}
10949 
10950 // Immediate Subtraction
10951 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10952   match(Set dst (SubI src1 src2));
10953 
10954   ins_cost(INSN_COST);
10955   format %{ "subw $dst, $src1, $src2" %}
10956 
10957   // use opcode to indicate that this is a sub not an add
10958   opcode(0x1);
10959 
10960   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10961 
10962   ins_pipe(ialu_reg_imm);
10963 %}
10964 
10965 // Long Subtraction
10966 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10967 
10968   match(Set dst (SubL src1 src2));
10969 
10970   ins_cost(INSN_COST);
10971   format %{ "sub  $dst, $src1, $src2" %}
10972 
10973   ins_encode %{
10974     __ sub(as_Register($dst$$reg),
10975            as_Register($src1$$reg),
10976            as_Register($src2$$reg));
10977   %}
10978 
10979   ins_pipe(ialu_reg_reg);
10980 %}
10981 
10982 // No constant pool entries requiredLong Immediate Subtraction.
10983 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10984   match(Set dst (SubL src1 src2));
10985 
10986   ins_cost(INSN_COST);
10987   format %{ "sub$dst, $src1, $src2" %}
10988 
10989   // use opcode to indicate that this is a sub not an add
10990   opcode(0x1);
10991 
10992   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10993 
10994   ins_pipe(ialu_reg_imm);
10995 %}
10996 
10997 // Integer Negation (special case for sub)
10998 
10999 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
11000   match(Set dst (SubI zero src));
11001 
11002   ins_cost(INSN_COST);
11003   format %{ "negw $dst, $src\t# int" %}
11004 
11005   ins_encode %{
11006     __ negw(as_Register($dst$$reg),
11007             as_Register($src$$reg));
11008   %}
11009 
11010   ins_pipe(ialu_reg);
11011 %}
11012 
11013 // Long Negation
11014 
11015 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
11016   match(Set dst (SubL zero src));
11017 
11018   ins_cost(INSN_COST);
11019   format %{ "neg $dst, $src\t# long" %}
11020 
11021   ins_encode %{
11022     __ neg(as_Register($dst$$reg),
11023            as_Register($src$$reg));
11024   %}
11025 
11026   ins_pipe(ialu_reg);
11027 %}
11028 
11029 // Integer Multiply
11030 
11031 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11032   match(Set dst (MulI src1 src2));
11033 
11034   ins_cost(INSN_COST * 3);
11035   format %{ "mulw  $dst, $src1, $src2" %}
11036 
11037   ins_encode %{
11038     __ mulw(as_Register($dst$$reg),
11039             as_Register($src1$$reg),
11040             as_Register($src2$$reg));
11041   %}
11042 
11043   ins_pipe(imul_reg_reg);
11044 %}
11045 
11046 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11047   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
11048 
11049   ins_cost(INSN_COST * 3);
11050   format %{ "smull  $dst, $src1, $src2" %}
11051 
11052   ins_encode %{
11053     __ smull(as_Register($dst$$reg),
11054              as_Register($src1$$reg),
11055              as_Register($src2$$reg));
11056   %}
11057 
11058   ins_pipe(imul_reg_reg);
11059 %}
11060 
11061 // Long Multiply
11062 
11063 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11064   match(Set dst (MulL src1 src2));
11065 
11066   ins_cost(INSN_COST * 5);
11067   format %{ "mul  $dst, $src1, $src2" %}
11068 
11069   ins_encode %{
11070     __ mul(as_Register($dst$$reg),
11071            as_Register($src1$$reg),
11072            as_Register($src2$$reg));
11073   %}
11074 
11075   ins_pipe(lmul_reg_reg);
11076 %}
11077 
11078 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11079 %{
11080   match(Set dst (MulHiL src1 src2));
11081 
11082   ins_cost(INSN_COST * 7);
11083   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11084 
11085   ins_encode %{
11086     __ smulh(as_Register($dst$$reg),
11087              as_Register($src1$$reg),
11088              as_Register($src2$$reg));
11089   %}
11090 
11091   ins_pipe(lmul_reg_reg);
11092 %}
11093 
11094 // Combined Integer Multiply & Add/Sub
11095 
11096 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11097   match(Set dst (AddI src3 (MulI src1 src2)));
11098 
11099   ins_cost(INSN_COST * 3);
11100   format %{ "madd  $dst, $src1, $src2, $src3" %}
11101 
11102   ins_encode %{
11103     __ maddw(as_Register($dst$$reg),
11104              as_Register($src1$$reg),
11105              as_Register($src2$$reg),
11106              as_Register($src3$$reg));
11107   %}
11108 
11109   ins_pipe(imac_reg_reg);
11110 %}
11111 
11112 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11113   match(Set dst (SubI src3 (MulI src1 src2)));
11114 
11115   ins_cost(INSN_COST * 3);
11116   format %{ "msub  $dst, $src1, $src2, $src3" %}
11117 
11118   ins_encode %{
11119     __ msubw(as_Register($dst$$reg),
11120              as_Register($src1$$reg),
11121              as_Register($src2$$reg),
11122              as_Register($src3$$reg));
11123   %}
11124 
11125   ins_pipe(imac_reg_reg);
11126 %}
11127 
11128 // Combined Long Multiply & Add/Sub
11129 
11130 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11131   match(Set dst (AddL src3 (MulL src1 src2)));
11132 
11133   ins_cost(INSN_COST * 5);
11134   format %{ "madd  $dst, $src1, $src2, $src3" %}
11135 
11136   ins_encode %{
11137     __ madd(as_Register($dst$$reg),
11138             as_Register($src1$$reg),
11139             as_Register($src2$$reg),
11140             as_Register($src3$$reg));
11141   %}
11142 
11143   ins_pipe(lmac_reg_reg);
11144 %}
11145 
11146 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11147   match(Set dst (SubL src3 (MulL src1 src2)));
11148 
11149   ins_cost(INSN_COST * 5);
11150   format %{ "msub  $dst, $src1, $src2, $src3" %}
11151 
11152   ins_encode %{
11153     __ msub(as_Register($dst$$reg),
11154             as_Register($src1$$reg),
11155             as_Register($src2$$reg),
11156             as_Register($src3$$reg));
11157   %}
11158 
11159   ins_pipe(lmac_reg_reg);
11160 %}
11161 
11162 // Integer Divide
11163 
11164 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11165   match(Set dst (DivI src1 src2));
11166 
11167   ins_cost(INSN_COST * 19);
11168   format %{ "sdivw  $dst, $src1, $src2" %}
11169 
11170   ins_encode(aarch64_enc_divw(dst, src1, src2));
11171   ins_pipe(idiv_reg_reg);
11172 %}
11173 
11174 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11175   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11176   ins_cost(INSN_COST);
11177   format %{ "lsrw $dst, $src1, $div1" %}
11178   ins_encode %{
11179     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11180   %}
11181   ins_pipe(ialu_reg_shift);
11182 %}
11183 
11184 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11185   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11186   ins_cost(INSN_COST);
11187   format %{ "addw $dst, $src, LSR $div1" %}
11188 
11189   ins_encode %{
11190     __ addw(as_Register($dst$$reg),
11191               as_Register($src$$reg),
11192               as_Register($src$$reg),
11193               Assembler::LSR, 31);
11194   %}
11195   ins_pipe(ialu_reg);
11196 %}
11197 
11198 // Long Divide
11199 
11200 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11201   match(Set dst (DivL src1 src2));
11202 
11203   ins_cost(INSN_COST * 35);
11204   format %{ "sdiv   $dst, $src1, $src2" %}
11205 
11206   ins_encode(aarch64_enc_div(dst, src1, src2));
11207   ins_pipe(ldiv_reg_reg);
11208 %}
11209 
11210 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11211   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11212   ins_cost(INSN_COST);
11213   format %{ "lsr $dst, $src1, $div1" %}
11214   ins_encode %{
11215     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11216   %}
11217   ins_pipe(ialu_reg_shift);
11218 %}
11219 
11220 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11221   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11222   ins_cost(INSN_COST);
11223   format %{ "add $dst, $src, $div1" %}
11224 
11225   ins_encode %{
11226     __ add(as_Register($dst$$reg),
11227               as_Register($src$$reg),
11228               as_Register($src$$reg),
11229               Assembler::LSR, 63);
11230   %}
11231   ins_pipe(ialu_reg);
11232 %}
11233 
11234 // Integer Remainder
11235 
11236 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11237   match(Set dst (ModI src1 src2));
11238 
11239   ins_cost(INSN_COST * 22);
11240   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11241             "msubw($dst, rscratch1, $src2, $src1" %}
11242 
11243   ins_encode(aarch64_enc_modw(dst, src1, src2));
11244   ins_pipe(idiv_reg_reg);
11245 %}
11246 
11247 // Long Remainder
11248 
11249 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11250   match(Set dst (ModL src1 src2));
11251 
11252   ins_cost(INSN_COST * 38);
11253   format %{ "sdiv   rscratch1, $src1, $src2\n"
11254             "msub($dst, rscratch1, $src2, $src1" %}
11255 
11256   ins_encode(aarch64_enc_mod(dst, src1, src2));
11257   ins_pipe(ldiv_reg_reg);
11258 %}
11259 
11260 // Integer Shifts
11261 
11262 // Shift Left Register
11263 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11264   match(Set dst (LShiftI src1 src2));
11265 
11266   ins_cost(INSN_COST * 2);
11267   format %{ "lslvw  $dst, $src1, $src2" %}
11268 
11269   ins_encode %{
11270     __ lslvw(as_Register($dst$$reg),
11271              as_Register($src1$$reg),
11272              as_Register($src2$$reg));
11273   %}
11274 
11275   ins_pipe(ialu_reg_reg_vshift);
11276 %}
11277 
11278 // Shift Left Immediate
11279 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11280   match(Set dst (LShiftI src1 src2));
11281 
11282   ins_cost(INSN_COST);
11283   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11284 
11285   ins_encode %{
11286     __ lslw(as_Register($dst$$reg),
11287             as_Register($src1$$reg),
11288             $src2$$constant & 0x1f);
11289   %}
11290 
11291   ins_pipe(ialu_reg_shift);
11292 %}
11293 
11294 // Shift Right Logical Register
11295 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11296   match(Set dst (URShiftI src1 src2));
11297 
11298   ins_cost(INSN_COST * 2);
11299   format %{ "lsrvw  $dst, $src1, $src2" %}
11300 
11301   ins_encode %{
11302     __ lsrvw(as_Register($dst$$reg),
11303              as_Register($src1$$reg),
11304              as_Register($src2$$reg));
11305   %}
11306 
11307   ins_pipe(ialu_reg_reg_vshift);
11308 %}
11309 
11310 // Shift Right Logical Immediate
11311 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11312   match(Set dst (URShiftI src1 src2));
11313 
11314   ins_cost(INSN_COST);
11315   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11316 
11317   ins_encode %{
11318     __ lsrw(as_Register($dst$$reg),
11319             as_Register($src1$$reg),
11320             $src2$$constant & 0x1f);
11321   %}
11322 
11323   ins_pipe(ialu_reg_shift);
11324 %}
11325 
11326 // Shift Right Arithmetic Register
11327 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11328   match(Set dst (RShiftI src1 src2));
11329 
11330   ins_cost(INSN_COST * 2);
11331   format %{ "asrvw  $dst, $src1, $src2" %}
11332 
11333   ins_encode %{
11334     __ asrvw(as_Register($dst$$reg),
11335              as_Register($src1$$reg),
11336              as_Register($src2$$reg));
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg_vshift);
11340 %}
11341 
11342 // Shift Right Arithmetic Immediate
11343 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11344   match(Set dst (RShiftI src1 src2));
11345 
11346   ins_cost(INSN_COST);
11347   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11348 
11349   ins_encode %{
11350     __ asrw(as_Register($dst$$reg),
11351             as_Register($src1$$reg),
11352             $src2$$constant & 0x1f);
11353   %}
11354 
11355   ins_pipe(ialu_reg_shift);
11356 %}
11357 
11358 // Combined Int Mask and Right Shift (using UBFM)
11359 // TODO
11360 
11361 // Long Shifts
11362 
11363 // Shift Left Register
11364 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11365   match(Set dst (LShiftL src1 src2));
11366 
11367   ins_cost(INSN_COST * 2);
11368   format %{ "lslv  $dst, $src1, $src2" %}
11369 
11370   ins_encode %{
11371     __ lslv(as_Register($dst$$reg),
11372             as_Register($src1$$reg),
11373             as_Register($src2$$reg));
11374   %}
11375 
11376   ins_pipe(ialu_reg_reg_vshift);
11377 %}
11378 
11379 // Shift Left Immediate
11380 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11381   match(Set dst (LShiftL src1 src2));
11382 
11383   ins_cost(INSN_COST);
11384   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11385 
11386   ins_encode %{
11387     __ lsl(as_Register($dst$$reg),
11388             as_Register($src1$$reg),
11389             $src2$$constant & 0x3f);
11390   %}
11391 
11392   ins_pipe(ialu_reg_shift);
11393 %}
11394 
11395 // Shift Right Logical Register
11396 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11397   match(Set dst (URShiftL src1 src2));
11398 
11399   ins_cost(INSN_COST * 2);
11400   format %{ "lsrv  $dst, $src1, $src2" %}
11401 
11402   ins_encode %{
11403     __ lsrv(as_Register($dst$$reg),
11404             as_Register($src1$$reg),
11405             as_Register($src2$$reg));
11406   %}
11407 
11408   ins_pipe(ialu_reg_reg_vshift);
11409 %}
11410 
11411 // Shift Right Logical Immediate
11412 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11413   match(Set dst (URShiftL src1 src2));
11414 
11415   ins_cost(INSN_COST);
11416   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11417 
11418   ins_encode %{
11419     __ lsr(as_Register($dst$$reg),
11420            as_Register($src1$$reg),
11421            $src2$$constant & 0x3f);
11422   %}
11423 
11424   ins_pipe(ialu_reg_shift);
11425 %}
11426 
11427 // A special-case pattern for card table stores.
11428 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11429   match(Set dst (URShiftL (CastP2X src1) src2));
11430 
11431   ins_cost(INSN_COST);
11432   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11433 
11434   ins_encode %{
11435     __ lsr(as_Register($dst$$reg),
11436            as_Register($src1$$reg),
11437            $src2$$constant & 0x3f);
11438   %}
11439 
11440   ins_pipe(ialu_reg_shift);
11441 %}
11442 
11443 // Shift Right Arithmetic Register
11444 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11445   match(Set dst (RShiftL src1 src2));
11446 
11447   ins_cost(INSN_COST * 2);
11448   format %{ "asrv  $dst, $src1, $src2" %}
11449 
11450   ins_encode %{
11451     __ asrv(as_Register($dst$$reg),
11452             as_Register($src1$$reg),
11453             as_Register($src2$$reg));
11454   %}
11455 
11456   ins_pipe(ialu_reg_reg_vshift);
11457 %}
11458 
11459 // Shift Right Arithmetic Immediate
11460 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11461   match(Set dst (RShiftL src1 src2));
11462 
11463   ins_cost(INSN_COST);
11464   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11465 
11466   ins_encode %{
11467     __ asr(as_Register($dst$$reg),
11468            as_Register($src1$$reg),
11469            $src2$$constant & 0x3f);
11470   %}
11471 
11472   ins_pipe(ialu_reg_shift);
11473 %}
11474 
11475 // BEGIN This section of the file is automatically generated. Do not edit --------------
11476 
11477 instruct regL_not_reg(iRegLNoSp dst,
11478                          iRegL src1, immL_M1 m1,
11479                          rFlagsReg cr) %{
11480   match(Set dst (XorL src1 m1));
11481   ins_cost(INSN_COST);
11482   format %{ "eon  $dst, $src1, zr" %}
11483 
11484   ins_encode %{
11485     __ eon(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               zr,
11488               Assembler::LSL, 0);
11489   %}
11490 
11491   ins_pipe(ialu_reg);
11492 %}
11493 instruct regI_not_reg(iRegINoSp dst,
11494                          iRegIorL2I src1, immI_M1 m1,
11495                          rFlagsReg cr) %{
11496   match(Set dst (XorI src1 m1));
11497   ins_cost(INSN_COST);
11498   format %{ "eonw  $dst, $src1, zr" %}
11499 
11500   ins_encode %{
11501     __ eonw(as_Register($dst$$reg),
11502               as_Register($src1$$reg),
11503               zr,
11504               Assembler::LSL, 0);
11505   %}
11506 
11507   ins_pipe(ialu_reg);
11508 %}
11509 
11510 instruct AndI_reg_not_reg(iRegINoSp dst,
11511                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11512                          rFlagsReg cr) %{
11513   match(Set dst (AndI src1 (XorI src2 m1)));
11514   ins_cost(INSN_COST);
11515   format %{ "bicw  $dst, $src1, $src2" %}
11516 
11517   ins_encode %{
11518     __ bicw(as_Register($dst$$reg),
11519               as_Register($src1$$reg),
11520               as_Register($src2$$reg),
11521               Assembler::LSL, 0);
11522   %}
11523 
11524   ins_pipe(ialu_reg_reg);
11525 %}
11526 
11527 instruct AndL_reg_not_reg(iRegLNoSp dst,
11528                          iRegL src1, iRegL src2, immL_M1 m1,
11529                          rFlagsReg cr) %{
11530   match(Set dst (AndL src1 (XorL src2 m1)));
11531   ins_cost(INSN_COST);
11532   format %{ "bic  $dst, $src1, $src2" %}
11533 
11534   ins_encode %{
11535     __ bic(as_Register($dst$$reg),
11536               as_Register($src1$$reg),
11537               as_Register($src2$$reg),
11538               Assembler::LSL, 0);
11539   %}
11540 
11541   ins_pipe(ialu_reg_reg);
11542 %}
11543 
11544 instruct OrI_reg_not_reg(iRegINoSp dst,
11545                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11546                          rFlagsReg cr) %{
11547   match(Set dst (OrI src1 (XorI src2 m1)));
11548   ins_cost(INSN_COST);
11549   format %{ "ornw  $dst, $src1, $src2" %}
11550 
11551   ins_encode %{
11552     __ ornw(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSL, 0);
11556   %}
11557 
11558   ins_pipe(ialu_reg_reg);
11559 %}
11560 
11561 instruct OrL_reg_not_reg(iRegLNoSp dst,
11562                          iRegL src1, iRegL src2, immL_M1 m1,
11563                          rFlagsReg cr) %{
11564   match(Set dst (OrL src1 (XorL src2 m1)));
11565   ins_cost(INSN_COST);
11566   format %{ "orn  $dst, $src1, $src2" %}
11567 
11568   ins_encode %{
11569     __ orn(as_Register($dst$$reg),
11570               as_Register($src1$$reg),
11571               as_Register($src2$$reg),
11572               Assembler::LSL, 0);
11573   %}
11574 
11575   ins_pipe(ialu_reg_reg);
11576 %}
11577 
11578 instruct XorI_reg_not_reg(iRegINoSp dst,
11579                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11580                          rFlagsReg cr) %{
11581   match(Set dst (XorI m1 (XorI src2 src1)));
11582   ins_cost(INSN_COST);
11583   format %{ "eonw  $dst, $src1, $src2" %}
11584 
11585   ins_encode %{
11586     __ eonw(as_Register($dst$$reg),
11587               as_Register($src1$$reg),
11588               as_Register($src2$$reg),
11589               Assembler::LSL, 0);
11590   %}
11591 
11592   ins_pipe(ialu_reg_reg);
11593 %}
11594 
11595 instruct XorL_reg_not_reg(iRegLNoSp dst,
11596                          iRegL src1, iRegL src2, immL_M1 m1,
11597                          rFlagsReg cr) %{
11598   match(Set dst (XorL m1 (XorL src2 src1)));
11599   ins_cost(INSN_COST);
11600   format %{ "eon  $dst, $src1, $src2" %}
11601 
11602   ins_encode %{
11603     __ eon(as_Register($dst$$reg),
11604               as_Register($src1$$reg),
11605               as_Register($src2$$reg),
11606               Assembler::LSL, 0);
11607   %}
11608 
11609   ins_pipe(ialu_reg_reg);
11610 %}
11611 
11612 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11613                          iRegIorL2I src1, iRegIorL2I src2,
11614                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11615   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11616   ins_cost(1.9 * INSN_COST);
11617   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11618 
11619   ins_encode %{
11620     __ bicw(as_Register($dst$$reg),
11621               as_Register($src1$$reg),
11622               as_Register($src2$$reg),
11623               Assembler::LSR,
11624               $src3$$constant & 0x1f);
11625   %}
11626 
11627   ins_pipe(ialu_reg_reg_shift);
11628 %}
11629 
11630 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11631                          iRegL src1, iRegL src2,
11632                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11633   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11634   ins_cost(1.9 * INSN_COST);
11635   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11636 
11637   ins_encode %{
11638     __ bic(as_Register($dst$$reg),
11639               as_Register($src1$$reg),
11640               as_Register($src2$$reg),
11641               Assembler::LSR,
11642               $src3$$constant & 0x3f);
11643   %}
11644 
11645   ins_pipe(ialu_reg_reg_shift);
11646 %}
11647 
11648 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11649                          iRegIorL2I src1, iRegIorL2I src2,
11650                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11651   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11652   ins_cost(1.9 * INSN_COST);
11653   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11654 
11655   ins_encode %{
11656     __ bicw(as_Register($dst$$reg),
11657               as_Register($src1$$reg),
11658               as_Register($src2$$reg),
11659               Assembler::ASR,
11660               $src3$$constant & 0x1f);
11661   %}
11662 
11663   ins_pipe(ialu_reg_reg_shift);
11664 %}
11665 
11666 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11667                          iRegL src1, iRegL src2,
11668                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11669   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11672 
11673   ins_encode %{
11674     __ bic(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::ASR,
11678               $src3$$constant & 0x3f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11685                          iRegIorL2I src1, iRegIorL2I src2,
11686                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11687   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11688   ins_cost(1.9 * INSN_COST);
11689   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11690 
11691   ins_encode %{
11692     __ bicw(as_Register($dst$$reg),
11693               as_Register($src1$$reg),
11694               as_Register($src2$$reg),
11695               Assembler::LSL,
11696               $src3$$constant & 0x1f);
11697   %}
11698 
11699   ins_pipe(ialu_reg_reg_shift);
11700 %}
11701 
11702 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11703                          iRegL src1, iRegL src2,
11704                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11705   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11706   ins_cost(1.9 * INSN_COST);
11707   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11708 
11709   ins_encode %{
11710     __ bic(as_Register($dst$$reg),
11711               as_Register($src1$$reg),
11712               as_Register($src2$$reg),
11713               Assembler::LSL,
11714               $src3$$constant & 0x3f);
11715   %}
11716 
11717   ins_pipe(ialu_reg_reg_shift);
11718 %}
11719 
11720 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11721                          iRegIorL2I src1, iRegIorL2I src2,
11722                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11723   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11724   ins_cost(1.9 * INSN_COST);
11725   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11726 
11727   ins_encode %{
11728     __ eonw(as_Register($dst$$reg),
11729               as_Register($src1$$reg),
11730               as_Register($src2$$reg),
11731               Assembler::LSR,
11732               $src3$$constant & 0x1f);
11733   %}
11734 
11735   ins_pipe(ialu_reg_reg_shift);
11736 %}
11737 
11738 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11739                          iRegL src1, iRegL src2,
11740                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11741   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11742   ins_cost(1.9 * INSN_COST);
11743   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11744 
11745   ins_encode %{
11746     __ eon(as_Register($dst$$reg),
11747               as_Register($src1$$reg),
11748               as_Register($src2$$reg),
11749               Assembler::LSR,
11750               $src3$$constant & 0x3f);
11751   %}
11752 
11753   ins_pipe(ialu_reg_reg_shift);
11754 %}
11755 
11756 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11757                          iRegIorL2I src1, iRegIorL2I src2,
11758                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11759   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11760   ins_cost(1.9 * INSN_COST);
11761   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11762 
11763   ins_encode %{
11764     __ eonw(as_Register($dst$$reg),
11765               as_Register($src1$$reg),
11766               as_Register($src2$$reg),
11767               Assembler::ASR,
11768               $src3$$constant & 0x1f);
11769   %}
11770 
11771   ins_pipe(ialu_reg_reg_shift);
11772 %}
11773 
11774 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11775                          iRegL src1, iRegL src2,
11776                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11777   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11778   ins_cost(1.9 * INSN_COST);
11779   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11780 
11781   ins_encode %{
11782     __ eon(as_Register($dst$$reg),
11783               as_Register($src1$$reg),
11784               as_Register($src2$$reg),
11785               Assembler::ASR,
11786               $src3$$constant & 0x3f);
11787   %}
11788 
11789   ins_pipe(ialu_reg_reg_shift);
11790 %}
11791 
11792 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11793                          iRegIorL2I src1, iRegIorL2I src2,
11794                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11795   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11796   ins_cost(1.9 * INSN_COST);
11797   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11798 
11799   ins_encode %{
11800     __ eonw(as_Register($dst$$reg),
11801               as_Register($src1$$reg),
11802               as_Register($src2$$reg),
11803               Assembler::LSL,
11804               $src3$$constant & 0x1f);
11805   %}
11806 
11807   ins_pipe(ialu_reg_reg_shift);
11808 %}
11809 
11810 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11811                          iRegL src1, iRegL src2,
11812                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11813   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11814   ins_cost(1.9 * INSN_COST);
11815   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11816 
11817   ins_encode %{
11818     __ eon(as_Register($dst$$reg),
11819               as_Register($src1$$reg),
11820               as_Register($src2$$reg),
11821               Assembler::LSL,
11822               $src3$$constant & 0x3f);
11823   %}
11824 
11825   ins_pipe(ialu_reg_reg_shift);
11826 %}
11827 
11828 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11829                          iRegIorL2I src1, iRegIorL2I src2,
11830                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11831   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11832   ins_cost(1.9 * INSN_COST);
11833   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11834 
11835   ins_encode %{
11836     __ ornw(as_Register($dst$$reg),
11837               as_Register($src1$$reg),
11838               as_Register($src2$$reg),
11839               Assembler::LSR,
11840               $src3$$constant & 0x1f);
11841   %}
11842 
11843   ins_pipe(ialu_reg_reg_shift);
11844 %}
11845 
11846 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11847                          iRegL src1, iRegL src2,
11848                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11849   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11850   ins_cost(1.9 * INSN_COST);
11851   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11852 
11853   ins_encode %{
11854     __ orn(as_Register($dst$$reg),
11855               as_Register($src1$$reg),
11856               as_Register($src2$$reg),
11857               Assembler::LSR,
11858               $src3$$constant & 0x3f);
11859   %}
11860 
11861   ins_pipe(ialu_reg_reg_shift);
11862 %}
11863 
11864 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11865                          iRegIorL2I src1, iRegIorL2I src2,
11866                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11867   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11868   ins_cost(1.9 * INSN_COST);
11869   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11870 
11871   ins_encode %{
11872     __ ornw(as_Register($dst$$reg),
11873               as_Register($src1$$reg),
11874               as_Register($src2$$reg),
11875               Assembler::ASR,
11876               $src3$$constant & 0x1f);
11877   %}
11878 
11879   ins_pipe(ialu_reg_reg_shift);
11880 %}
11881 
11882 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11883                          iRegL src1, iRegL src2,
11884                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11885   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11886   ins_cost(1.9 * INSN_COST);
11887   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11888 
11889   ins_encode %{
11890     __ orn(as_Register($dst$$reg),
11891               as_Register($src1$$reg),
11892               as_Register($src2$$reg),
11893               Assembler::ASR,
11894               $src3$$constant & 0x3f);
11895   %}
11896 
11897   ins_pipe(ialu_reg_reg_shift);
11898 %}
11899 
11900 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11901                          iRegIorL2I src1, iRegIorL2I src2,
11902                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11903   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11904   ins_cost(1.9 * INSN_COST);
11905   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11906 
11907   ins_encode %{
11908     __ ornw(as_Register($dst$$reg),
11909               as_Register($src1$$reg),
11910               as_Register($src2$$reg),
11911               Assembler::LSL,
11912               $src3$$constant & 0x1f);
11913   %}
11914 
11915   ins_pipe(ialu_reg_reg_shift);
11916 %}
11917 
11918 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11919                          iRegL src1, iRegL src2,
11920                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11921   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11922   ins_cost(1.9 * INSN_COST);
11923   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11924 
11925   ins_encode %{
11926     __ orn(as_Register($dst$$reg),
11927               as_Register($src1$$reg),
11928               as_Register($src2$$reg),
11929               Assembler::LSL,
11930               $src3$$constant & 0x3f);
11931   %}
11932 
11933   ins_pipe(ialu_reg_reg_shift);
11934 %}
11935 
11936 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11937                          iRegIorL2I src1, iRegIorL2I src2,
11938                          immI src3, rFlagsReg cr) %{
11939   match(Set dst (AndI src1 (URShiftI src2 src3)));
11940 
11941   ins_cost(1.9 * INSN_COST);
11942   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11943 
11944   ins_encode %{
11945     __ andw(as_Register($dst$$reg),
11946               as_Register($src1$$reg),
11947               as_Register($src2$$reg),
11948               Assembler::LSR,
11949               $src3$$constant & 0x1f);
11950   %}
11951 
11952   ins_pipe(ialu_reg_reg_shift);
11953 %}
11954 
11955 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11956                          iRegL src1, iRegL src2,
11957                          immI src3, rFlagsReg cr) %{
11958   match(Set dst (AndL src1 (URShiftL src2 src3)));
11959 
11960   ins_cost(1.9 * INSN_COST);
11961   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11962 
11963   ins_encode %{
11964     __ andr(as_Register($dst$$reg),
11965               as_Register($src1$$reg),
11966               as_Register($src2$$reg),
11967               Assembler::LSR,
11968               $src3$$constant & 0x3f);
11969   %}
11970 
11971   ins_pipe(ialu_reg_reg_shift);
11972 %}
11973 
11974 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11975                          iRegIorL2I src1, iRegIorL2I src2,
11976                          immI src3, rFlagsReg cr) %{
11977   match(Set dst (AndI src1 (RShiftI src2 src3)));
11978 
11979   ins_cost(1.9 * INSN_COST);
11980   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11981 
11982   ins_encode %{
11983     __ andw(as_Register($dst$$reg),
11984               as_Register($src1$$reg),
11985               as_Register($src2$$reg),
11986               Assembler::ASR,
11987               $src3$$constant & 0x1f);
11988   %}
11989 
11990   ins_pipe(ialu_reg_reg_shift);
11991 %}
11992 
11993 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11994                          iRegL src1, iRegL src2,
11995                          immI src3, rFlagsReg cr) %{
11996   match(Set dst (AndL src1 (RShiftL src2 src3)));
11997 
11998   ins_cost(1.9 * INSN_COST);
11999   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
12000 
12001   ins_encode %{
12002     __ andr(as_Register($dst$$reg),
12003               as_Register($src1$$reg),
12004               as_Register($src2$$reg),
12005               Assembler::ASR,
12006               $src3$$constant & 0x3f);
12007   %}
12008 
12009   ins_pipe(ialu_reg_reg_shift);
12010 %}
12011 
12012 instruct AndI_reg_LShift_reg(iRegINoSp dst,
12013                          iRegIorL2I src1, iRegIorL2I src2,
12014                          immI src3, rFlagsReg cr) %{
12015   match(Set dst (AndI src1 (LShiftI src2 src3)));
12016 
12017   ins_cost(1.9 * INSN_COST);
12018   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
12019 
12020   ins_encode %{
12021     __ andw(as_Register($dst$$reg),
12022               as_Register($src1$$reg),
12023               as_Register($src2$$reg),
12024               Assembler::LSL,
12025               $src3$$constant & 0x1f);
12026   %}
12027 
12028   ins_pipe(ialu_reg_reg_shift);
12029 %}
12030 
12031 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
12032                          iRegL src1, iRegL src2,
12033                          immI src3, rFlagsReg cr) %{
12034   match(Set dst (AndL src1 (LShiftL src2 src3)));
12035 
12036   ins_cost(1.9 * INSN_COST);
12037   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
12038 
12039   ins_encode %{
12040     __ andr(as_Register($dst$$reg),
12041               as_Register($src1$$reg),
12042               as_Register($src2$$reg),
12043               Assembler::LSL,
12044               $src3$$constant & 0x3f);
12045   %}
12046 
12047   ins_pipe(ialu_reg_reg_shift);
12048 %}
12049 
12050 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12051                          iRegIorL2I src1, iRegIorL2I src2,
12052                          immI src3, rFlagsReg cr) %{
12053   match(Set dst (XorI src1 (URShiftI src2 src3)));
12054 
12055   ins_cost(1.9 * INSN_COST);
12056   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12057 
12058   ins_encode %{
12059     __ eorw(as_Register($dst$$reg),
12060               as_Register($src1$$reg),
12061               as_Register($src2$$reg),
12062               Assembler::LSR,
12063               $src3$$constant & 0x1f);
12064   %}
12065 
12066   ins_pipe(ialu_reg_reg_shift);
12067 %}
12068 
12069 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12070                          iRegL src1, iRegL src2,
12071                          immI src3, rFlagsReg cr) %{
12072   match(Set dst (XorL src1 (URShiftL src2 src3)));
12073 
12074   ins_cost(1.9 * INSN_COST);
12075   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12076 
12077   ins_encode %{
12078     __ eor(as_Register($dst$$reg),
12079               as_Register($src1$$reg),
12080               as_Register($src2$$reg),
12081               Assembler::LSR,
12082               $src3$$constant & 0x3f);
12083   %}
12084 
12085   ins_pipe(ialu_reg_reg_shift);
12086 %}
12087 
12088 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12089                          iRegIorL2I src1, iRegIorL2I src2,
12090                          immI src3, rFlagsReg cr) %{
12091   match(Set dst (XorI src1 (RShiftI src2 src3)));
12092 
12093   ins_cost(1.9 * INSN_COST);
12094   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12095 
12096   ins_encode %{
12097     __ eorw(as_Register($dst$$reg),
12098               as_Register($src1$$reg),
12099               as_Register($src2$$reg),
12100               Assembler::ASR,
12101               $src3$$constant & 0x1f);
12102   %}
12103 
12104   ins_pipe(ialu_reg_reg_shift);
12105 %}
12106 
12107 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12108                          iRegL src1, iRegL src2,
12109                          immI src3, rFlagsReg cr) %{
12110   match(Set dst (XorL src1 (RShiftL src2 src3)));
12111 
12112   ins_cost(1.9 * INSN_COST);
12113   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12114 
12115   ins_encode %{
12116     __ eor(as_Register($dst$$reg),
12117               as_Register($src1$$reg),
12118               as_Register($src2$$reg),
12119               Assembler::ASR,
12120               $src3$$constant & 0x3f);
12121   %}
12122 
12123   ins_pipe(ialu_reg_reg_shift);
12124 %}
12125 
12126 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12127                          iRegIorL2I src1, iRegIorL2I src2,
12128                          immI src3, rFlagsReg cr) %{
12129   match(Set dst (XorI src1 (LShiftI src2 src3)));
12130 
12131   ins_cost(1.9 * INSN_COST);
12132   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12133 
12134   ins_encode %{
12135     __ eorw(as_Register($dst$$reg),
12136               as_Register($src1$$reg),
12137               as_Register($src2$$reg),
12138               Assembler::LSL,
12139               $src3$$constant & 0x1f);
12140   %}
12141 
12142   ins_pipe(ialu_reg_reg_shift);
12143 %}
12144 
12145 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12146                          iRegL src1, iRegL src2,
12147                          immI src3, rFlagsReg cr) %{
12148   match(Set dst (XorL src1 (LShiftL src2 src3)));
12149 
12150   ins_cost(1.9 * INSN_COST);
12151   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12152 
12153   ins_encode %{
12154     __ eor(as_Register($dst$$reg),
12155               as_Register($src1$$reg),
12156               as_Register($src2$$reg),
12157               Assembler::LSL,
12158               $src3$$constant & 0x3f);
12159   %}
12160 
12161   ins_pipe(ialu_reg_reg_shift);
12162 %}
12163 
12164 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12165                          iRegIorL2I src1, iRegIorL2I src2,
12166                          immI src3, rFlagsReg cr) %{
12167   match(Set dst (OrI src1 (URShiftI src2 src3)));
12168 
12169   ins_cost(1.9 * INSN_COST);
12170   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12171 
12172   ins_encode %{
12173     __ orrw(as_Register($dst$$reg),
12174               as_Register($src1$$reg),
12175               as_Register($src2$$reg),
12176               Assembler::LSR,
12177               $src3$$constant & 0x1f);
12178   %}
12179 
12180   ins_pipe(ialu_reg_reg_shift);
12181 %}
12182 
12183 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12184                          iRegL src1, iRegL src2,
12185                          immI src3, rFlagsReg cr) %{
12186   match(Set dst (OrL src1 (URShiftL src2 src3)));
12187 
12188   ins_cost(1.9 * INSN_COST);
12189   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12190 
12191   ins_encode %{
12192     __ orr(as_Register($dst$$reg),
12193               as_Register($src1$$reg),
12194               as_Register($src2$$reg),
12195               Assembler::LSR,
12196               $src3$$constant & 0x3f);
12197   %}
12198 
12199   ins_pipe(ialu_reg_reg_shift);
12200 %}
12201 
12202 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12203                          iRegIorL2I src1, iRegIorL2I src2,
12204                          immI src3, rFlagsReg cr) %{
12205   match(Set dst (OrI src1 (RShiftI src2 src3)));
12206 
12207   ins_cost(1.9 * INSN_COST);
12208   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12209 
12210   ins_encode %{
12211     __ orrw(as_Register($dst$$reg),
12212               as_Register($src1$$reg),
12213               as_Register($src2$$reg),
12214               Assembler::ASR,
12215               $src3$$constant & 0x1f);
12216   %}
12217 
12218   ins_pipe(ialu_reg_reg_shift);
12219 %}
12220 
12221 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12222                          iRegL src1, iRegL src2,
12223                          immI src3, rFlagsReg cr) %{
12224   match(Set dst (OrL src1 (RShiftL src2 src3)));
12225 
12226   ins_cost(1.9 * INSN_COST);
12227   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12228 
12229   ins_encode %{
12230     __ orr(as_Register($dst$$reg),
12231               as_Register($src1$$reg),
12232               as_Register($src2$$reg),
12233               Assembler::ASR,
12234               $src3$$constant & 0x3f);
12235   %}
12236 
12237   ins_pipe(ialu_reg_reg_shift);
12238 %}
12239 
12240 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12241                          iRegIorL2I src1, iRegIorL2I src2,
12242                          immI src3, rFlagsReg cr) %{
12243   match(Set dst (OrI src1 (LShiftI src2 src3)));
12244 
12245   ins_cost(1.9 * INSN_COST);
12246   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12247 
12248   ins_encode %{
12249     __ orrw(as_Register($dst$$reg),
12250               as_Register($src1$$reg),
12251               as_Register($src2$$reg),
12252               Assembler::LSL,
12253               $src3$$constant & 0x1f);
12254   %}
12255 
12256   ins_pipe(ialu_reg_reg_shift);
12257 %}
12258 
12259 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12260                          iRegL src1, iRegL src2,
12261                          immI src3, rFlagsReg cr) %{
12262   match(Set dst (OrL src1 (LShiftL src2 src3)));
12263 
12264   ins_cost(1.9 * INSN_COST);
12265   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12266 
12267   ins_encode %{
12268     __ orr(as_Register($dst$$reg),
12269               as_Register($src1$$reg),
12270               as_Register($src2$$reg),
12271               Assembler::LSL,
12272               $src3$$constant & 0x3f);
12273   %}
12274 
12275   ins_pipe(ialu_reg_reg_shift);
12276 %}
12277 
12278 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12279                          iRegIorL2I src1, iRegIorL2I src2,
12280                          immI src3, rFlagsReg cr) %{
12281   match(Set dst (AddI src1 (URShiftI src2 src3)));
12282 
12283   ins_cost(1.9 * INSN_COST);
12284   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12285 
12286   ins_encode %{
12287     __ addw(as_Register($dst$$reg),
12288               as_Register($src1$$reg),
12289               as_Register($src2$$reg),
12290               Assembler::LSR,
12291               $src3$$constant & 0x1f);
12292   %}
12293 
12294   ins_pipe(ialu_reg_reg_shift);
12295 %}
12296 
12297 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12298                          iRegL src1, iRegL src2,
12299                          immI src3, rFlagsReg cr) %{
12300   match(Set dst (AddL src1 (URShiftL src2 src3)));
12301 
12302   ins_cost(1.9 * INSN_COST);
12303   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12304 
12305   ins_encode %{
12306     __ add(as_Register($dst$$reg),
12307               as_Register($src1$$reg),
12308               as_Register($src2$$reg),
12309               Assembler::LSR,
12310               $src3$$constant & 0x3f);
12311   %}
12312 
12313   ins_pipe(ialu_reg_reg_shift);
12314 %}
12315 
12316 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12317                          iRegIorL2I src1, iRegIorL2I src2,
12318                          immI src3, rFlagsReg cr) %{
12319   match(Set dst (AddI src1 (RShiftI src2 src3)));
12320 
12321   ins_cost(1.9 * INSN_COST);
12322   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12323 
12324   ins_encode %{
12325     __ addw(as_Register($dst$$reg),
12326               as_Register($src1$$reg),
12327               as_Register($src2$$reg),
12328               Assembler::ASR,
12329               $src3$$constant & 0x1f);
12330   %}
12331 
12332   ins_pipe(ialu_reg_reg_shift);
12333 %}
12334 
12335 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12336                          iRegL src1, iRegL src2,
12337                          immI src3, rFlagsReg cr) %{
12338   match(Set dst (AddL src1 (RShiftL src2 src3)));
12339 
12340   ins_cost(1.9 * INSN_COST);
12341   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12342 
12343   ins_encode %{
12344     __ add(as_Register($dst$$reg),
12345               as_Register($src1$$reg),
12346               as_Register($src2$$reg),
12347               Assembler::ASR,
12348               $src3$$constant & 0x3f);
12349   %}
12350 
12351   ins_pipe(ialu_reg_reg_shift);
12352 %}
12353 
12354 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12355                          iRegIorL2I src1, iRegIorL2I src2,
12356                          immI src3, rFlagsReg cr) %{
12357   match(Set dst (AddI src1 (LShiftI src2 src3)));
12358 
12359   ins_cost(1.9 * INSN_COST);
12360   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12361 
12362   ins_encode %{
12363     __ addw(as_Register($dst$$reg),
12364               as_Register($src1$$reg),
12365               as_Register($src2$$reg),
12366               Assembler::LSL,
12367               $src3$$constant & 0x1f);
12368   %}
12369 
12370   ins_pipe(ialu_reg_reg_shift);
12371 %}
12372 
12373 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12374                          iRegL src1, iRegL src2,
12375                          immI src3, rFlagsReg cr) %{
12376   match(Set dst (AddL src1 (LShiftL src2 src3)));
12377 
12378   ins_cost(1.9 * INSN_COST);
12379   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12380 
12381   ins_encode %{
12382     __ add(as_Register($dst$$reg),
12383               as_Register($src1$$reg),
12384               as_Register($src2$$reg),
12385               Assembler::LSL,
12386               $src3$$constant & 0x3f);
12387   %}
12388 
12389   ins_pipe(ialu_reg_reg_shift);
12390 %}
12391 
12392 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12393                          iRegIorL2I src1, iRegIorL2I src2,
12394                          immI src3, rFlagsReg cr) %{
12395   match(Set dst (SubI src1 (URShiftI src2 src3)));
12396 
12397   ins_cost(1.9 * INSN_COST);
12398   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12399 
12400   ins_encode %{
12401     __ subw(as_Register($dst$$reg),
12402               as_Register($src1$$reg),
12403               as_Register($src2$$reg),
12404               Assembler::LSR,
12405               $src3$$constant & 0x1f);
12406   %}
12407 
12408   ins_pipe(ialu_reg_reg_shift);
12409 %}
12410 
12411 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12412                          iRegL src1, iRegL src2,
12413                          immI src3, rFlagsReg cr) %{
12414   match(Set dst (SubL src1 (URShiftL src2 src3)));
12415 
12416   ins_cost(1.9 * INSN_COST);
12417   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12418 
12419   ins_encode %{
12420     __ sub(as_Register($dst$$reg),
12421               as_Register($src1$$reg),
12422               as_Register($src2$$reg),
12423               Assembler::LSR,
12424               $src3$$constant & 0x3f);
12425   %}
12426 
12427   ins_pipe(ialu_reg_reg_shift);
12428 %}
12429 
12430 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12431                          iRegIorL2I src1, iRegIorL2I src2,
12432                          immI src3, rFlagsReg cr) %{
12433   match(Set dst (SubI src1 (RShiftI src2 src3)));
12434 
12435   ins_cost(1.9 * INSN_COST);
12436   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12437 
12438   ins_encode %{
12439     __ subw(as_Register($dst$$reg),
12440               as_Register($src1$$reg),
12441               as_Register($src2$$reg),
12442               Assembler::ASR,
12443               $src3$$constant & 0x1f);
12444   %}
12445 
12446   ins_pipe(ialu_reg_reg_shift);
12447 %}
12448 
12449 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12450                          iRegL src1, iRegL src2,
12451                          immI src3, rFlagsReg cr) %{
12452   match(Set dst (SubL src1 (RShiftL src2 src3)));
12453 
12454   ins_cost(1.9 * INSN_COST);
12455   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12456 
12457   ins_encode %{
12458     __ sub(as_Register($dst$$reg),
12459               as_Register($src1$$reg),
12460               as_Register($src2$$reg),
12461               Assembler::ASR,
12462               $src3$$constant & 0x3f);
12463   %}
12464 
12465   ins_pipe(ialu_reg_reg_shift);
12466 %}
12467 
12468 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12469                          iRegIorL2I src1, iRegIorL2I src2,
12470                          immI src3, rFlagsReg cr) %{
12471   match(Set dst (SubI src1 (LShiftI src2 src3)));
12472 
12473   ins_cost(1.9 * INSN_COST);
12474   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12475 
12476   ins_encode %{
12477     __ subw(as_Register($dst$$reg),
12478               as_Register($src1$$reg),
12479               as_Register($src2$$reg),
12480               Assembler::LSL,
12481               $src3$$constant & 0x1f);
12482   %}
12483 
12484   ins_pipe(ialu_reg_reg_shift);
12485 %}
12486 
12487 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12488                          iRegL src1, iRegL src2,
12489                          immI src3, rFlagsReg cr) %{
12490   match(Set dst (SubL src1 (LShiftL src2 src3)));
12491 
12492   ins_cost(1.9 * INSN_COST);
12493   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12494 
12495   ins_encode %{
12496     __ sub(as_Register($dst$$reg),
12497               as_Register($src1$$reg),
12498               as_Register($src2$$reg),
12499               Assembler::LSL,
12500               $src3$$constant & 0x3f);
12501   %}
12502 
12503   ins_pipe(ialu_reg_reg_shift);
12504 %}
12505 
12506 
12507 
12508 // Shift Left followed by Shift Right.
12509 // This idiom is used by the compiler for the i2b bytecode etc.
12510 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12511 %{
12512   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12513   // Make sure we are not going to exceed what sbfm can do.
12514   predicate((unsigned int)n->in(2)->get_int() <= 63
12515             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12516 
12517   ins_cost(INSN_COST * 2);
12518   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12519   ins_encode %{
12520     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12521     int s = 63 - lshift;
12522     int r = (rshift - lshift) & 63;
12523     __ sbfm(as_Register($dst$$reg),
12524             as_Register($src$$reg),
12525             r, s);
12526   %}
12527 
12528   ins_pipe(ialu_reg_shift);
12529 %}
12530 
12531 // Shift Left followed by Shift Right.
12532 // This idiom is used by the compiler for the i2b bytecode etc.
12533 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12534 %{
12535   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12536   // Make sure we are not going to exceed what sbfmw can do.
12537   predicate((unsigned int)n->in(2)->get_int() <= 31
12538             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12539 
12540   ins_cost(INSN_COST * 2);
12541   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12542   ins_encode %{
12543     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12544     int s = 31 - lshift;
12545     int r = (rshift - lshift) & 31;
12546     __ sbfmw(as_Register($dst$$reg),
12547             as_Register($src$$reg),
12548             r, s);
12549   %}
12550 
12551   ins_pipe(ialu_reg_shift);
12552 %}
12553 
12554 // Shift Left followed by Shift Right.
12555 // This idiom is used by the compiler for the i2b bytecode etc.
12556 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12557 %{
12558   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12559   // Make sure we are not going to exceed what ubfm can do.
12560   predicate((unsigned int)n->in(2)->get_int() <= 63
12561             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12562 
12563   ins_cost(INSN_COST * 2);
12564   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12565   ins_encode %{
12566     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12567     int s = 63 - lshift;
12568     int r = (rshift - lshift) & 63;
12569     __ ubfm(as_Register($dst$$reg),
12570             as_Register($src$$reg),
12571             r, s);
12572   %}
12573 
12574   ins_pipe(ialu_reg_shift);
12575 %}
12576 
12577 // Shift Left followed by Shift Right.
12578 // This idiom is used by the compiler for the i2b bytecode etc.
12579 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12580 %{
12581   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12582   // Make sure we are not going to exceed what ubfmw can do.
12583   predicate((unsigned int)n->in(2)->get_int() <= 31
12584             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12585 
12586   ins_cost(INSN_COST * 2);
12587   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12588   ins_encode %{
12589     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12590     int s = 31 - lshift;
12591     int r = (rshift - lshift) & 31;
12592     __ ubfmw(as_Register($dst$$reg),
12593             as_Register($src$$reg),
12594             r, s);
12595   %}
12596 
12597   ins_pipe(ialu_reg_shift);
12598 %}
12599 // Bitfield extract with shift & mask
12600 
12601 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12602 %{
12603   match(Set dst (AndI (URShiftI src rshift) mask));
12604 
12605   ins_cost(INSN_COST);
12606   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12607   ins_encode %{
12608     int rshift = $rshift$$constant;
12609     long mask = $mask$$constant;
12610     int width = exact_log2(mask+1);
12611     __ ubfxw(as_Register($dst$$reg),
12612             as_Register($src$$reg), rshift, width);
12613   %}
12614   ins_pipe(ialu_reg_shift);
12615 %}
12616 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12617 %{
12618   match(Set dst (AndL (URShiftL src rshift) mask));
12619 
12620   ins_cost(INSN_COST);
12621   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12622   ins_encode %{
12623     int rshift = $rshift$$constant;
12624     long mask = $mask$$constant;
12625     int width = exact_log2(mask+1);
12626     __ ubfx(as_Register($dst$$reg),
12627             as_Register($src$$reg), rshift, width);
12628   %}
12629   ins_pipe(ialu_reg_shift);
12630 %}
12631 
12632 // We can use ubfx when extending an And with a mask when we know mask
12633 // is positive.  We know that because immI_bitmask guarantees it.
12634 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12635 %{
12636   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12637 
12638   ins_cost(INSN_COST * 2);
12639   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12640   ins_encode %{
12641     int rshift = $rshift$$constant;
12642     long mask = $mask$$constant;
12643     int width = exact_log2(mask+1);
12644     __ ubfx(as_Register($dst$$reg),
12645             as_Register($src$$reg), rshift, width);
12646   %}
12647   ins_pipe(ialu_reg_shift);
12648 %}
12649 
12650 // We can use ubfiz when masking by a positive number and then left shifting the result.
12651 // We know that the mask is positive because immI_bitmask guarantees it.
12652 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12653 %{
12654   match(Set dst (LShiftI (AndI src mask) lshift));
12655   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12656     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12657 
12658   ins_cost(INSN_COST);
12659   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12660   ins_encode %{
12661     int lshift = $lshift$$constant;
12662     long mask = $mask$$constant;
12663     int width = exact_log2(mask+1);
12664     __ ubfizw(as_Register($dst$$reg),
12665           as_Register($src$$reg), lshift, width);
12666   %}
12667   ins_pipe(ialu_reg_shift);
12668 %}
12669 // We can use ubfiz when masking by a positive number and then left shifting the result.
12670 // We know that the mask is positive because immL_bitmask guarantees it.
12671 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12672 %{
12673   match(Set dst (LShiftL (AndL src mask) lshift));
12674   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12675     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12676 
12677   ins_cost(INSN_COST);
12678   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12679   ins_encode %{
12680     int lshift = $lshift$$constant;
12681     long mask = $mask$$constant;
12682     int width = exact_log2(mask+1);
12683     __ ubfiz(as_Register($dst$$reg),
12684           as_Register($src$$reg), lshift, width);
12685   %}
12686   ins_pipe(ialu_reg_shift);
12687 %}
12688 
12689 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12690 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12691 %{
12692   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12693   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12694     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12695 
12696   ins_cost(INSN_COST);
12697   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12698   ins_encode %{
12699     int lshift = $lshift$$constant;
12700     long mask = $mask$$constant;
12701     int width = exact_log2(mask+1);
12702     __ ubfiz(as_Register($dst$$reg),
12703              as_Register($src$$reg), lshift, width);
12704   %}
12705   ins_pipe(ialu_reg_shift);
12706 %}
12707 
12708 // Rotations
12709 
12710 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12711 %{
12712   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12713   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12714 
12715   ins_cost(INSN_COST);
12716   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12717 
12718   ins_encode %{
12719     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12720             $rshift$$constant & 63);
12721   %}
12722   ins_pipe(ialu_reg_reg_extr);
12723 %}
12724 
12725 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12726 %{
12727   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12728   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12729 
12730   ins_cost(INSN_COST);
12731   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12732 
12733   ins_encode %{
12734     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12735             $rshift$$constant & 31);
12736   %}
12737   ins_pipe(ialu_reg_reg_extr);
12738 %}
12739 
12740 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12741 %{
12742   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12743   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12744 
12745   ins_cost(INSN_COST);
12746   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12747 
12748   ins_encode %{
12749     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12750             $rshift$$constant & 63);
12751   %}
12752   ins_pipe(ialu_reg_reg_extr);
12753 %}
12754 
12755 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12756 %{
12757   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12758   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12759 
12760   ins_cost(INSN_COST);
12761   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12762 
12763   ins_encode %{
12764     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12765             $rshift$$constant & 31);
12766   %}
12767   ins_pipe(ialu_reg_reg_extr);
12768 %}
12769 
12770 
12771 // rol expander
12772 
12773 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12774 %{
12775   effect(DEF dst, USE src, USE shift);
12776 
12777   format %{ "rol    $dst, $src, $shift" %}
12778   ins_cost(INSN_COST * 3);
12779   ins_encode %{
12780     __ subw(rscratch1, zr, as_Register($shift$$reg));
12781     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12782             rscratch1);
12783     %}
12784   ins_pipe(ialu_reg_reg_vshift);
12785 %}
12786 
12787 // rol expander
12788 
12789 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12790 %{
12791   effect(DEF dst, USE src, USE shift);
12792 
12793   format %{ "rol    $dst, $src, $shift" %}
12794   ins_cost(INSN_COST * 3);
12795   ins_encode %{
12796     __ subw(rscratch1, zr, as_Register($shift$$reg));
12797     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12798             rscratch1);
12799     %}
12800   ins_pipe(ialu_reg_reg_vshift);
12801 %}
12802 
12803 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12804 %{
12805   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12806 
12807   expand %{
12808     rolL_rReg(dst, src, shift, cr);
12809   %}
12810 %}
12811 
12812 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12813 %{
12814   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12815 
12816   expand %{
12817     rolL_rReg(dst, src, shift, cr);
12818   %}
12819 %}
12820 
12821 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12822 %{
12823   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12824 
12825   expand %{
12826     rolI_rReg(dst, src, shift, cr);
12827   %}
12828 %}
12829 
12830 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12831 %{
12832   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12833 
12834   expand %{
12835     rolI_rReg(dst, src, shift, cr);
12836   %}
12837 %}
12838 
12839 // ror expander
12840 
12841 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12842 %{
12843   effect(DEF dst, USE src, USE shift);
12844 
12845   format %{ "ror    $dst, $src, $shift" %}
12846   ins_cost(INSN_COST);
12847   ins_encode %{
12848     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12849             as_Register($shift$$reg));
12850     %}
12851   ins_pipe(ialu_reg_reg_vshift);
12852 %}
12853 
12854 // ror expander
12855 
12856 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12857 %{
12858   effect(DEF dst, USE src, USE shift);
12859 
12860   format %{ "ror    $dst, $src, $shift" %}
12861   ins_cost(INSN_COST);
12862   ins_encode %{
12863     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12864             as_Register($shift$$reg));
12865     %}
12866   ins_pipe(ialu_reg_reg_vshift);
12867 %}
12868 
12869 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12870 %{
12871   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12872 
12873   expand %{
12874     rorL_rReg(dst, src, shift, cr);
12875   %}
12876 %}
12877 
12878 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12879 %{
12880   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12881 
12882   expand %{
12883     rorL_rReg(dst, src, shift, cr);
12884   %}
12885 %}
12886 
12887 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12888 %{
12889   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12890 
12891   expand %{
12892     rorI_rReg(dst, src, shift, cr);
12893   %}
12894 %}
12895 
12896 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12897 %{
12898   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12899 
12900   expand %{
12901     rorI_rReg(dst, src, shift, cr);
12902   %}
12903 %}
12904 
12905 // Add/subtract (extended)
12906 
12907 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12908 %{
12909   match(Set dst (AddL src1 (ConvI2L src2)));
12910   ins_cost(INSN_COST);
12911   format %{ "add  $dst, $src1, $src2, sxtw" %}
12912 
12913    ins_encode %{
12914      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12915             as_Register($src2$$reg), ext::sxtw);
12916    %}
12917   ins_pipe(ialu_reg_reg);
12918 %};
12919 
12920 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12921 %{
12922   match(Set dst (SubL src1 (ConvI2L src2)));
12923   ins_cost(INSN_COST);
12924   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12925 
12926    ins_encode %{
12927      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12928             as_Register($src2$$reg), ext::sxtw);
12929    %}
12930   ins_pipe(ialu_reg_reg);
12931 %};
12932 
12933 
12934 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12935 %{
12936   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12937   ins_cost(INSN_COST);
12938   format %{ "add  $dst, $src1, $src2, sxth" %}
12939 
12940    ins_encode %{
12941      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12942             as_Register($src2$$reg), ext::sxth);
12943    %}
12944   ins_pipe(ialu_reg_reg);
12945 %}
12946 
12947 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12948 %{
12949   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12950   ins_cost(INSN_COST);
12951   format %{ "add  $dst, $src1, $src2, sxtb" %}
12952 
12953    ins_encode %{
12954      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12955             as_Register($src2$$reg), ext::sxtb);
12956    %}
12957   ins_pipe(ialu_reg_reg);
12958 %}
12959 
12960 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12961 %{
12962   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12963   ins_cost(INSN_COST);
12964   format %{ "add  $dst, $src1, $src2, uxtb" %}
12965 
12966    ins_encode %{
12967      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12968             as_Register($src2$$reg), ext::uxtb);
12969    %}
12970   ins_pipe(ialu_reg_reg);
12971 %}
12972 
12973 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12974 %{
12975   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12976   ins_cost(INSN_COST);
12977   format %{ "add  $dst, $src1, $src2, sxth" %}
12978 
12979    ins_encode %{
12980      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12981             as_Register($src2$$reg), ext::sxth);
12982    %}
12983   ins_pipe(ialu_reg_reg);
12984 %}
12985 
12986 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12987 %{
12988   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12989   ins_cost(INSN_COST);
12990   format %{ "add  $dst, $src1, $src2, sxtw" %}
12991 
12992    ins_encode %{
12993      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12994             as_Register($src2$$reg), ext::sxtw);
12995    %}
12996   ins_pipe(ialu_reg_reg);
12997 %}
12998 
12999 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13000 %{
13001   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13002   ins_cost(INSN_COST);
13003   format %{ "add  $dst, $src1, $src2, sxtb" %}
13004 
13005    ins_encode %{
13006      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13007             as_Register($src2$$reg), ext::sxtb);
13008    %}
13009   ins_pipe(ialu_reg_reg);
13010 %}
13011 
13012 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13013 %{
13014   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13015   ins_cost(INSN_COST);
13016   format %{ "add  $dst, $src1, $src2, uxtb" %}
13017 
13018    ins_encode %{
13019      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13020             as_Register($src2$$reg), ext::uxtb);
13021    %}
13022   ins_pipe(ialu_reg_reg);
13023 %}
13024 
13025 
13026 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13027 %{
13028   match(Set dst (AddI src1 (AndI src2 mask)));
13029   ins_cost(INSN_COST);
13030   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13031 
13032    ins_encode %{
13033      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13034             as_Register($src2$$reg), ext::uxtb);
13035    %}
13036   ins_pipe(ialu_reg_reg);
13037 %}
13038 
13039 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13040 %{
13041   match(Set dst (AddI src1 (AndI src2 mask)));
13042   ins_cost(INSN_COST);
13043   format %{ "addw  $dst, $src1, $src2, uxth" %}
13044 
13045    ins_encode %{
13046      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13047             as_Register($src2$$reg), ext::uxth);
13048    %}
13049   ins_pipe(ialu_reg_reg);
13050 %}
13051 
13052 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13053 %{
13054   match(Set dst (AddL src1 (AndL src2 mask)));
13055   ins_cost(INSN_COST);
13056   format %{ "add  $dst, $src1, $src2, uxtb" %}
13057 
13058    ins_encode %{
13059      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13060             as_Register($src2$$reg), ext::uxtb);
13061    %}
13062   ins_pipe(ialu_reg_reg);
13063 %}
13064 
13065 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13066 %{
13067   match(Set dst (AddL src1 (AndL src2 mask)));
13068   ins_cost(INSN_COST);
13069   format %{ "add  $dst, $src1, $src2, uxth" %}
13070 
13071    ins_encode %{
13072      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13073             as_Register($src2$$reg), ext::uxth);
13074    %}
13075   ins_pipe(ialu_reg_reg);
13076 %}
13077 
13078 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13079 %{
13080   match(Set dst (AddL src1 (AndL src2 mask)));
13081   ins_cost(INSN_COST);
13082   format %{ "add  $dst, $src1, $src2, uxtw" %}
13083 
13084    ins_encode %{
13085      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13086             as_Register($src2$$reg), ext::uxtw);
13087    %}
13088   ins_pipe(ialu_reg_reg);
13089 %}
13090 
13091 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13092 %{
13093   match(Set dst (SubI src1 (AndI src2 mask)));
13094   ins_cost(INSN_COST);
13095   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13096 
13097    ins_encode %{
13098      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13099             as_Register($src2$$reg), ext::uxtb);
13100    %}
13101   ins_pipe(ialu_reg_reg);
13102 %}
13103 
13104 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13105 %{
13106   match(Set dst (SubI src1 (AndI src2 mask)));
13107   ins_cost(INSN_COST);
13108   format %{ "subw  $dst, $src1, $src2, uxth" %}
13109 
13110    ins_encode %{
13111      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13112             as_Register($src2$$reg), ext::uxth);
13113    %}
13114   ins_pipe(ialu_reg_reg);
13115 %}
13116 
13117 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13118 %{
13119   match(Set dst (SubL src1 (AndL src2 mask)));
13120   ins_cost(INSN_COST);
13121   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13122 
13123    ins_encode %{
13124      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13125             as_Register($src2$$reg), ext::uxtb);
13126    %}
13127   ins_pipe(ialu_reg_reg);
13128 %}
13129 
13130 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13131 %{
13132   match(Set dst (SubL src1 (AndL src2 mask)));
13133   ins_cost(INSN_COST);
13134   format %{ "sub  $dst, $src1, $src2, uxth" %}
13135 
13136    ins_encode %{
13137      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13138             as_Register($src2$$reg), ext::uxth);
13139    %}
13140   ins_pipe(ialu_reg_reg);
13141 %}
13142 
13143 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13144 %{
13145   match(Set dst (SubL src1 (AndL src2 mask)));
13146   ins_cost(INSN_COST);
13147   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13148 
13149    ins_encode %{
13150      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13151             as_Register($src2$$reg), ext::uxtw);
13152    %}
13153   ins_pipe(ialu_reg_reg);
13154 %}
13155 
13156 
13157 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13158 %{
13159   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13160   ins_cost(1.9 * INSN_COST);
13161   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13162 
13163    ins_encode %{
13164      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13165             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13166    %}
13167   ins_pipe(ialu_reg_reg_shift);
13168 %}
13169 
13170 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13171 %{
13172   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13173   ins_cost(1.9 * INSN_COST);
13174   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13175 
13176    ins_encode %{
13177      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13178             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13179    %}
13180   ins_pipe(ialu_reg_reg_shift);
13181 %}
13182 
13183 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13184 %{
13185   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13186   ins_cost(1.9 * INSN_COST);
13187   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13188 
13189    ins_encode %{
13190      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13191             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13192    %}
13193   ins_pipe(ialu_reg_reg_shift);
13194 %}
13195 
13196 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13197 %{
13198   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13199   ins_cost(1.9 * INSN_COST);
13200   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13201 
13202    ins_encode %{
13203      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13204             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13205    %}
13206   ins_pipe(ialu_reg_reg_shift);
13207 %}
13208 
13209 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13210 %{
13211   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13212   ins_cost(1.9 * INSN_COST);
13213   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13214 
13215    ins_encode %{
13216      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13217             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13218    %}
13219   ins_pipe(ialu_reg_reg_shift);
13220 %}
13221 
13222 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13223 %{
13224   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13225   ins_cost(1.9 * INSN_COST);
13226   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13227 
13228    ins_encode %{
13229      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13230             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13231    %}
13232   ins_pipe(ialu_reg_reg_shift);
13233 %}
13234 
13235 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13236 %{
13237   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13238   ins_cost(1.9 * INSN_COST);
13239   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13240 
13241    ins_encode %{
13242      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13243             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13244    %}
13245   ins_pipe(ialu_reg_reg_shift);
13246 %}
13247 
13248 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13249 %{
13250   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13251   ins_cost(1.9 * INSN_COST);
13252   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13253 
13254    ins_encode %{
13255      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13256             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13257    %}
13258   ins_pipe(ialu_reg_reg_shift);
13259 %}
13260 
13261 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13262 %{
13263   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13264   ins_cost(1.9 * INSN_COST);
13265   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13266 
13267    ins_encode %{
13268      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13269             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13270    %}
13271   ins_pipe(ialu_reg_reg_shift);
13272 %}
13273 
13274 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13275 %{
13276   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13277   ins_cost(1.9 * INSN_COST);
13278   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13279 
13280    ins_encode %{
13281      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13282             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13283    %}
13284   ins_pipe(ialu_reg_reg_shift);
13285 %}
13286 
13287 
13288 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13289 %{
13290   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13291   ins_cost(1.9 * INSN_COST);
13292   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13293 
13294    ins_encode %{
13295      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13296             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13297    %}
13298   ins_pipe(ialu_reg_reg_shift);
13299 %};
13300 
13301 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13302 %{
13303   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13304   ins_cost(1.9 * INSN_COST);
13305   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13306 
13307    ins_encode %{
13308      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13309             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13310    %}
13311   ins_pipe(ialu_reg_reg_shift);
13312 %};
13313 
13314 
13315 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13316 %{
13317   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13318   ins_cost(1.9 * INSN_COST);
13319   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13320 
13321    ins_encode %{
13322      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13323             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13324    %}
13325   ins_pipe(ialu_reg_reg_shift);
13326 %}
13327 
13328 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13329 %{
13330   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13331   ins_cost(1.9 * INSN_COST);
13332   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13333 
13334    ins_encode %{
13335      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13336             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13337    %}
13338   ins_pipe(ialu_reg_reg_shift);
13339 %}
13340 
13341 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13342 %{
13343   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13344   ins_cost(1.9 * INSN_COST);
13345   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13346 
13347    ins_encode %{
13348      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13349             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13350    %}
13351   ins_pipe(ialu_reg_reg_shift);
13352 %}
13353 
13354 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13355 %{
13356   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13357   ins_cost(1.9 * INSN_COST);
13358   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13359 
13360    ins_encode %{
13361      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13362             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13363    %}
13364   ins_pipe(ialu_reg_reg_shift);
13365 %}
13366 
13367 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13368 %{
13369   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13370   ins_cost(1.9 * INSN_COST);
13371   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13372 
13373    ins_encode %{
13374      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13375             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13376    %}
13377   ins_pipe(ialu_reg_reg_shift);
13378 %}
13379 
13380 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13381 %{
13382   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13383   ins_cost(1.9 * INSN_COST);
13384   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13385 
13386    ins_encode %{
13387      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13388             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13389    %}
13390   ins_pipe(ialu_reg_reg_shift);
13391 %}
13392 
13393 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13394 %{
13395   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13396   ins_cost(1.9 * INSN_COST);
13397   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13398 
13399    ins_encode %{
13400      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13401             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13402    %}
13403   ins_pipe(ialu_reg_reg_shift);
13404 %}
13405 
13406 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13407 %{
13408   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13409   ins_cost(1.9 * INSN_COST);
13410   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13411 
13412    ins_encode %{
13413      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13414             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13415    %}
13416   ins_pipe(ialu_reg_reg_shift);
13417 %}
13418 
13419 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13420 %{
13421   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13422   ins_cost(1.9 * INSN_COST);
13423   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13424 
13425    ins_encode %{
13426      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13427             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13428    %}
13429   ins_pipe(ialu_reg_reg_shift);
13430 %}
13431 
13432 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13433 %{
13434   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13435   ins_cost(1.9 * INSN_COST);
13436   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13437 
13438    ins_encode %{
13439      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13440             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13441    %}
13442   ins_pipe(ialu_reg_reg_shift);
13443 %}
13444 // END This section of the file is automatically generated. Do not edit --------------
13445 
13446 // ============================================================================
13447 // Floating Point Arithmetic Instructions
13448 
13449 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13450   match(Set dst (AddF src1 src2));
13451 
13452   ins_cost(INSN_COST * 5);
13453   format %{ "fadds   $dst, $src1, $src2" %}
13454 
13455   ins_encode %{
13456     __ fadds(as_FloatRegister($dst$$reg),
13457              as_FloatRegister($src1$$reg),
13458              as_FloatRegister($src2$$reg));
13459   %}
13460 
13461   ins_pipe(fp_dop_reg_reg_s);
13462 %}
13463 
13464 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13465   match(Set dst (AddD src1 src2));
13466 
13467   ins_cost(INSN_COST * 5);
13468   format %{ "faddd   $dst, $src1, $src2" %}
13469 
13470   ins_encode %{
13471     __ faddd(as_FloatRegister($dst$$reg),
13472              as_FloatRegister($src1$$reg),
13473              as_FloatRegister($src2$$reg));
13474   %}
13475 
13476   ins_pipe(fp_dop_reg_reg_d);
13477 %}
13478 
13479 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13480   match(Set dst (SubF src1 src2));
13481 
13482   ins_cost(INSN_COST * 5);
13483   format %{ "fsubs   $dst, $src1, $src2" %}
13484 
13485   ins_encode %{
13486     __ fsubs(as_FloatRegister($dst$$reg),
13487              as_FloatRegister($src1$$reg),
13488              as_FloatRegister($src2$$reg));
13489   %}
13490 
13491   ins_pipe(fp_dop_reg_reg_s);
13492 %}
13493 
13494 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13495   match(Set dst (SubD src1 src2));
13496 
13497   ins_cost(INSN_COST * 5);
13498   format %{ "fsubd   $dst, $src1, $src2" %}
13499 
13500   ins_encode %{
13501     __ fsubd(as_FloatRegister($dst$$reg),
13502              as_FloatRegister($src1$$reg),
13503              as_FloatRegister($src2$$reg));
13504   %}
13505 
13506   ins_pipe(fp_dop_reg_reg_d);
13507 %}
13508 
13509 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13510   match(Set dst (MulF src1 src2));
13511 
13512   ins_cost(INSN_COST * 6);
13513   format %{ "fmuls   $dst, $src1, $src2" %}
13514 
13515   ins_encode %{
13516     __ fmuls(as_FloatRegister($dst$$reg),
13517              as_FloatRegister($src1$$reg),
13518              as_FloatRegister($src2$$reg));
13519   %}
13520 
13521   ins_pipe(fp_dop_reg_reg_s);
13522 %}
13523 
13524 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13525   match(Set dst (MulD src1 src2));
13526 
13527   ins_cost(INSN_COST * 6);
13528   format %{ "fmuld   $dst, $src1, $src2" %}
13529 
13530   ins_encode %{
13531     __ fmuld(as_FloatRegister($dst$$reg),
13532              as_FloatRegister($src1$$reg),
13533              as_FloatRegister($src2$$reg));
13534   %}
13535 
13536   ins_pipe(fp_dop_reg_reg_d);
13537 %}
13538 
13539 // src1 * src2 + src3
13540 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13541   predicate(UseFMA);
13542   match(Set dst (FmaF src3 (Binary src1 src2)));
13543 
13544   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13545 
13546   ins_encode %{
13547     __ fmadds(as_FloatRegister($dst$$reg),
13548              as_FloatRegister($src1$$reg),
13549              as_FloatRegister($src2$$reg),
13550              as_FloatRegister($src3$$reg));
13551   %}
13552 
13553   ins_pipe(pipe_class_default);
13554 %}
13555 
13556 // src1 * src2 + src3
13557 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13558   predicate(UseFMA);
13559   match(Set dst (FmaD src3 (Binary src1 src2)));
13560 
13561   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13562 
13563   ins_encode %{
13564     __ fmaddd(as_FloatRegister($dst$$reg),
13565              as_FloatRegister($src1$$reg),
13566              as_FloatRegister($src2$$reg),
13567              as_FloatRegister($src3$$reg));
13568   %}
13569 
13570   ins_pipe(pipe_class_default);
13571 %}
13572 
13573 // -src1 * src2 + src3
13574 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13575   predicate(UseFMA);
13576   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13577   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13578 
13579   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13580 
13581   ins_encode %{
13582     __ fmsubs(as_FloatRegister($dst$$reg),
13583               as_FloatRegister($src1$$reg),
13584               as_FloatRegister($src2$$reg),
13585               as_FloatRegister($src3$$reg));
13586   %}
13587 
13588   ins_pipe(pipe_class_default);
13589 %}
13590 
13591 // -src1 * src2 + src3
13592 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13593   predicate(UseFMA);
13594   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13595   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13596 
13597   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13598 
13599   ins_encode %{
13600     __ fmsubd(as_FloatRegister($dst$$reg),
13601               as_FloatRegister($src1$$reg),
13602               as_FloatRegister($src2$$reg),
13603               as_FloatRegister($src3$$reg));
13604   %}
13605 
13606   ins_pipe(pipe_class_default);
13607 %}
13608 
13609 // -src1 * src2 - src3
13610 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13611   predicate(UseFMA);
13612   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13613   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13614 
13615   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13616 
13617   ins_encode %{
13618     __ fnmadds(as_FloatRegister($dst$$reg),
13619                as_FloatRegister($src1$$reg),
13620                as_FloatRegister($src2$$reg),
13621                as_FloatRegister($src3$$reg));
13622   %}
13623 
13624   ins_pipe(pipe_class_default);
13625 %}
13626 
13627 // -src1 * src2 - src3
13628 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13629   predicate(UseFMA);
13630   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13631   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13632 
13633   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13634 
13635   ins_encode %{
13636     __ fnmaddd(as_FloatRegister($dst$$reg),
13637                as_FloatRegister($src1$$reg),
13638                as_FloatRegister($src2$$reg),
13639                as_FloatRegister($src3$$reg));
13640   %}
13641 
13642   ins_pipe(pipe_class_default);
13643 %}
13644 
13645 // src1 * src2 - src3
13646 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13647   predicate(UseFMA);
13648   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13649 
13650   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13651 
13652   ins_encode %{
13653     __ fnmsubs(as_FloatRegister($dst$$reg),
13654                as_FloatRegister($src1$$reg),
13655                as_FloatRegister($src2$$reg),
13656                as_FloatRegister($src3$$reg));
13657   %}
13658 
13659   ins_pipe(pipe_class_default);
13660 %}
13661 
13662 // src1 * src2 - src3
13663 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13664   predicate(UseFMA);
13665   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13666 
13667   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13668 
13669   ins_encode %{
13670   // n.b. insn name should be fnmsubd
13671     __ fnmsub(as_FloatRegister($dst$$reg),
13672               as_FloatRegister($src1$$reg),
13673               as_FloatRegister($src2$$reg),
13674               as_FloatRegister($src3$$reg));
13675   %}
13676 
13677   ins_pipe(pipe_class_default);
13678 %}
13679 
13680 
13681 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13682   match(Set dst (DivF src1  src2));
13683 
13684   ins_cost(INSN_COST * 18);
13685   format %{ "fdivs   $dst, $src1, $src2" %}
13686 
13687   ins_encode %{
13688     __ fdivs(as_FloatRegister($dst$$reg),
13689              as_FloatRegister($src1$$reg),
13690              as_FloatRegister($src2$$reg));
13691   %}
13692 
13693   ins_pipe(fp_div_s);
13694 %}
13695 
13696 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13697   match(Set dst (DivD src1  src2));
13698 
13699   ins_cost(INSN_COST * 32);
13700   format %{ "fdivd   $dst, $src1, $src2" %}
13701 
13702   ins_encode %{
13703     __ fdivd(as_FloatRegister($dst$$reg),
13704              as_FloatRegister($src1$$reg),
13705              as_FloatRegister($src2$$reg));
13706   %}
13707 
13708   ins_pipe(fp_div_d);
13709 %}
13710 
13711 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13712   match(Set dst (NegF src));
13713 
13714   ins_cost(INSN_COST * 3);
13715   format %{ "fneg   $dst, $src" %}
13716 
13717   ins_encode %{
13718     __ fnegs(as_FloatRegister($dst$$reg),
13719              as_FloatRegister($src$$reg));
13720   %}
13721 
13722   ins_pipe(fp_uop_s);
13723 %}
13724 
13725 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13726   match(Set dst (NegD src));
13727 
13728   ins_cost(INSN_COST * 3);
13729   format %{ "fnegd   $dst, $src" %}
13730 
13731   ins_encode %{
13732     __ fnegd(as_FloatRegister($dst$$reg),
13733              as_FloatRegister($src$$reg));
13734   %}
13735 
13736   ins_pipe(fp_uop_d);
13737 %}
13738 
13739 instruct absF_reg(vRegF dst, vRegF src) %{
13740   match(Set dst (AbsF src));
13741 
13742   ins_cost(INSN_COST * 3);
13743   format %{ "fabss   $dst, $src" %}
13744   ins_encode %{
13745     __ fabss(as_FloatRegister($dst$$reg),
13746              as_FloatRegister($src$$reg));
13747   %}
13748 
13749   ins_pipe(fp_uop_s);
13750 %}
13751 
13752 instruct absD_reg(vRegD dst, vRegD src) %{
13753   match(Set dst (AbsD src));
13754 
13755   ins_cost(INSN_COST * 3);
13756   format %{ "fabsd   $dst, $src" %}
13757   ins_encode %{
13758     __ fabsd(as_FloatRegister($dst$$reg),
13759              as_FloatRegister($src$$reg));
13760   %}
13761 
13762   ins_pipe(fp_uop_d);
13763 %}
13764 
13765 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13766   match(Set dst (SqrtD src));
13767 
13768   ins_cost(INSN_COST * 50);
13769   format %{ "fsqrtd  $dst, $src" %}
13770   ins_encode %{
13771     __ fsqrtd(as_FloatRegister($dst$$reg),
13772              as_FloatRegister($src$$reg));
13773   %}
13774 
13775   ins_pipe(fp_div_s);
13776 %}
13777 
13778 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13779   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13780 
13781   ins_cost(INSN_COST * 50);
13782   format %{ "fsqrts  $dst, $src" %}
13783   ins_encode %{
13784     __ fsqrts(as_FloatRegister($dst$$reg),
13785              as_FloatRegister($src$$reg));
13786   %}
13787 
13788   ins_pipe(fp_div_d);
13789 %}
13790 
13791 // ============================================================================
13792 // Logical Instructions
13793 
13794 // Integer Logical Instructions
13795 
13796 // And Instructions
13797 
13798 
13799 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13800   match(Set dst (AndI src1 src2));
13801 
13802   format %{ "andw  $dst, $src1, $src2\t# int" %}
13803 
13804   ins_cost(INSN_COST);
13805   ins_encode %{
13806     __ andw(as_Register($dst$$reg),
13807             as_Register($src1$$reg),
13808             as_Register($src2$$reg));
13809   %}
13810 
13811   ins_pipe(ialu_reg_reg);
13812 %}
13813 
13814 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13815   match(Set dst (AndI src1 src2));
13816 
13817   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13818 
13819   ins_cost(INSN_COST);
13820   ins_encode %{
13821     __ andw(as_Register($dst$$reg),
13822             as_Register($src1$$reg),
13823             (unsigned long)($src2$$constant));
13824   %}
13825 
13826   ins_pipe(ialu_reg_imm);
13827 %}
13828 
13829 // Or Instructions
13830 
13831 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13832   match(Set dst (OrI src1 src2));
13833 
13834   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13835 
13836   ins_cost(INSN_COST);
13837   ins_encode %{
13838     __ orrw(as_Register($dst$$reg),
13839             as_Register($src1$$reg),
13840             as_Register($src2$$reg));
13841   %}
13842 
13843   ins_pipe(ialu_reg_reg);
13844 %}
13845 
13846 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13847   match(Set dst (OrI src1 src2));
13848 
13849   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13850 
13851   ins_cost(INSN_COST);
13852   ins_encode %{
13853     __ orrw(as_Register($dst$$reg),
13854             as_Register($src1$$reg),
13855             (unsigned long)($src2$$constant));
13856   %}
13857 
13858   ins_pipe(ialu_reg_imm);
13859 %}
13860 
13861 // Xor Instructions
13862 
13863 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13864   match(Set dst (XorI src1 src2));
13865 
13866   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13867 
13868   ins_cost(INSN_COST);
13869   ins_encode %{
13870     __ eorw(as_Register($dst$$reg),
13871             as_Register($src1$$reg),
13872             as_Register($src2$$reg));
13873   %}
13874 
13875   ins_pipe(ialu_reg_reg);
13876 %}
13877 
13878 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13879   match(Set dst (XorI src1 src2));
13880 
13881   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13882 
13883   ins_cost(INSN_COST);
13884   ins_encode %{
13885     __ eorw(as_Register($dst$$reg),
13886             as_Register($src1$$reg),
13887             (unsigned long)($src2$$constant));
13888   %}
13889 
13890   ins_pipe(ialu_reg_imm);
13891 %}
13892 
13893 // Long Logical Instructions
13894 // TODO
13895 
13896 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13897   match(Set dst (AndL src1 src2));
13898 
13899   format %{ "and  $dst, $src1, $src2\t# int" %}
13900 
13901   ins_cost(INSN_COST);
13902   ins_encode %{
13903     __ andr(as_Register($dst$$reg),
13904             as_Register($src1$$reg),
13905             as_Register($src2$$reg));
13906   %}
13907 
13908   ins_pipe(ialu_reg_reg);
13909 %}
13910 
13911 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13912   match(Set dst (AndL src1 src2));
13913 
13914   format %{ "and  $dst, $src1, $src2\t# int" %}
13915 
13916   ins_cost(INSN_COST);
13917   ins_encode %{
13918     __ andr(as_Register($dst$$reg),
13919             as_Register($src1$$reg),
13920             (unsigned long)($src2$$constant));
13921   %}
13922 
13923   ins_pipe(ialu_reg_imm);
13924 %}
13925 
13926 // Or Instructions
13927 
13928 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13929   match(Set dst (OrL src1 src2));
13930 
13931   format %{ "orr  $dst, $src1, $src2\t# int" %}
13932 
13933   ins_cost(INSN_COST);
13934   ins_encode %{
13935     __ orr(as_Register($dst$$reg),
13936            as_Register($src1$$reg),
13937            as_Register($src2$$reg));
13938   %}
13939 
13940   ins_pipe(ialu_reg_reg);
13941 %}
13942 
13943 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13944   match(Set dst (OrL src1 src2));
13945 
13946   format %{ "orr  $dst, $src1, $src2\t# int" %}
13947 
13948   ins_cost(INSN_COST);
13949   ins_encode %{
13950     __ orr(as_Register($dst$$reg),
13951            as_Register($src1$$reg),
13952            (unsigned long)($src2$$constant));
13953   %}
13954 
13955   ins_pipe(ialu_reg_imm);
13956 %}
13957 
13958 // Xor Instructions
13959 
13960 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13961   match(Set dst (XorL src1 src2));
13962 
13963   format %{ "eor  $dst, $src1, $src2\t# int" %}
13964 
13965   ins_cost(INSN_COST);
13966   ins_encode %{
13967     __ eor(as_Register($dst$$reg),
13968            as_Register($src1$$reg),
13969            as_Register($src2$$reg));
13970   %}
13971 
13972   ins_pipe(ialu_reg_reg);
13973 %}
13974 
13975 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13976   match(Set dst (XorL src1 src2));
13977 
13978   ins_cost(INSN_COST);
13979   format %{ "eor  $dst, $src1, $src2\t# int" %}
13980 
13981   ins_encode %{
13982     __ eor(as_Register($dst$$reg),
13983            as_Register($src1$$reg),
13984            (unsigned long)($src2$$constant));
13985   %}
13986 
13987   ins_pipe(ialu_reg_imm);
13988 %}
13989 
13990 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13991 %{
13992   match(Set dst (ConvI2L src));
13993 
13994   ins_cost(INSN_COST);
13995   format %{ "sxtw  $dst, $src\t# i2l" %}
13996   ins_encode %{
13997     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13998   %}
13999   ins_pipe(ialu_reg_shift);
14000 %}
14001 
14002 // this pattern occurs in bigmath arithmetic
14003 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14004 %{
14005   match(Set dst (AndL (ConvI2L src) mask));
14006 
14007   ins_cost(INSN_COST);
14008   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14009   ins_encode %{
14010     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14011   %}
14012 
14013   ins_pipe(ialu_reg_shift);
14014 %}
14015 
14016 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14017   match(Set dst (ConvL2I src));
14018 
14019   ins_cost(INSN_COST);
14020   format %{ "movw  $dst, $src \t// l2i" %}
14021 
14022   ins_encode %{
14023     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14024   %}
14025 
14026   ins_pipe(ialu_reg);
14027 %}
14028 
14029 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14030 %{
14031   match(Set dst (Conv2B src));
14032   effect(KILL cr);
14033 
14034   format %{
14035     "cmpw $src, zr\n\t"
14036     "cset $dst, ne"
14037   %}
14038 
14039   ins_encode %{
14040     __ cmpw(as_Register($src$$reg), zr);
14041     __ cset(as_Register($dst$$reg), Assembler::NE);
14042   %}
14043 
14044   ins_pipe(ialu_reg);
14045 %}
14046 
14047 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14048 %{
14049   match(Set dst (Conv2B src));
14050   effect(KILL cr);
14051 
14052   format %{
14053     "cmp  $src, zr\n\t"
14054     "cset $dst, ne"
14055   %}
14056 
14057   ins_encode %{
14058     __ cmp(as_Register($src$$reg), zr);
14059     __ cset(as_Register($dst$$reg), Assembler::NE);
14060   %}
14061 
14062   ins_pipe(ialu_reg);
14063 %}
14064 
14065 instruct convD2F_reg(vRegF dst, vRegD src) %{
14066   match(Set dst (ConvD2F src));
14067 
14068   ins_cost(INSN_COST * 5);
14069   format %{ "fcvtd  $dst, $src \t// d2f" %}
14070 
14071   ins_encode %{
14072     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14073   %}
14074 
14075   ins_pipe(fp_d2f);
14076 %}
14077 
14078 instruct convF2D_reg(vRegD dst, vRegF src) %{
14079   match(Set dst (ConvF2D src));
14080 
14081   ins_cost(INSN_COST * 5);
14082   format %{ "fcvts  $dst, $src \t// f2d" %}
14083 
14084   ins_encode %{
14085     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14086   %}
14087 
14088   ins_pipe(fp_f2d);
14089 %}
14090 
14091 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14092   match(Set dst (ConvF2I src));
14093 
14094   ins_cost(INSN_COST * 5);
14095   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14096 
14097   ins_encode %{
14098     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14099   %}
14100 
14101   ins_pipe(fp_f2i);
14102 %}
14103 
14104 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14105   match(Set dst (ConvF2L src));
14106 
14107   ins_cost(INSN_COST * 5);
14108   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14109 
14110   ins_encode %{
14111     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14112   %}
14113 
14114   ins_pipe(fp_f2l);
14115 %}
14116 
14117 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14118   match(Set dst (ConvI2F src));
14119 
14120   ins_cost(INSN_COST * 5);
14121   format %{ "scvtfws  $dst, $src \t// i2f" %}
14122 
14123   ins_encode %{
14124     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14125   %}
14126 
14127   ins_pipe(fp_i2f);
14128 %}
14129 
14130 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14131   match(Set dst (ConvL2F src));
14132 
14133   ins_cost(INSN_COST * 5);
14134   format %{ "scvtfs  $dst, $src \t// l2f" %}
14135 
14136   ins_encode %{
14137     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14138   %}
14139 
14140   ins_pipe(fp_l2f);
14141 %}
14142 
14143 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14144   match(Set dst (ConvD2I src));
14145 
14146   ins_cost(INSN_COST * 5);
14147   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14148 
14149   ins_encode %{
14150     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14151   %}
14152 
14153   ins_pipe(fp_d2i);
14154 %}
14155 
14156 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14157   match(Set dst (ConvD2L src));
14158 
14159   ins_cost(INSN_COST * 5);
14160   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14161 
14162   ins_encode %{
14163     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14164   %}
14165 
14166   ins_pipe(fp_d2l);
14167 %}
14168 
14169 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14170   match(Set dst (ConvI2D src));
14171 
14172   ins_cost(INSN_COST * 5);
14173   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14174 
14175   ins_encode %{
14176     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14177   %}
14178 
14179   ins_pipe(fp_i2d);
14180 %}
14181 
14182 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14183   match(Set dst (ConvL2D src));
14184 
14185   ins_cost(INSN_COST * 5);
14186   format %{ "scvtfd  $dst, $src \t// l2d" %}
14187 
14188   ins_encode %{
14189     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14190   %}
14191 
14192   ins_pipe(fp_l2d);
14193 %}
14194 
14195 // stack <-> reg and reg <-> reg shuffles with no conversion
14196 
14197 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14198 
14199   match(Set dst (MoveF2I src));
14200 
14201   effect(DEF dst, USE src);
14202 
14203   ins_cost(4 * INSN_COST);
14204 
14205   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14206 
14207   ins_encode %{
14208     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14209   %}
14210 
14211   ins_pipe(iload_reg_reg);
14212 
14213 %}
14214 
14215 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14216 
14217   match(Set dst (MoveI2F src));
14218 
14219   effect(DEF dst, USE src);
14220 
14221   ins_cost(4 * INSN_COST);
14222 
14223   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14224 
14225   ins_encode %{
14226     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14227   %}
14228 
14229   ins_pipe(pipe_class_memory);
14230 
14231 %}
14232 
14233 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14234 
14235   match(Set dst (MoveD2L src));
14236 
14237   effect(DEF dst, USE src);
14238 
14239   ins_cost(4 * INSN_COST);
14240 
14241   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14242 
14243   ins_encode %{
14244     __ ldr($dst$$Register, Address(sp, $src$$disp));
14245   %}
14246 
14247   ins_pipe(iload_reg_reg);
14248 
14249 %}
14250 
14251 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14252 
14253   match(Set dst (MoveL2D src));
14254 
14255   effect(DEF dst, USE src);
14256 
14257   ins_cost(4 * INSN_COST);
14258 
14259   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14260 
14261   ins_encode %{
14262     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14263   %}
14264 
14265   ins_pipe(pipe_class_memory);
14266 
14267 %}
14268 
14269 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14270 
14271   match(Set dst (MoveF2I src));
14272 
14273   effect(DEF dst, USE src);
14274 
14275   ins_cost(INSN_COST);
14276 
14277   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14278 
14279   ins_encode %{
14280     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14281   %}
14282 
14283   ins_pipe(pipe_class_memory);
14284 
14285 %}
14286 
14287 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14288 
14289   match(Set dst (MoveI2F src));
14290 
14291   effect(DEF dst, USE src);
14292 
14293   ins_cost(INSN_COST);
14294 
14295   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14296 
14297   ins_encode %{
14298     __ strw($src$$Register, Address(sp, $dst$$disp));
14299   %}
14300 
14301   ins_pipe(istore_reg_reg);
14302 
14303 %}
14304 
14305 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14306 
14307   match(Set dst (MoveD2L src));
14308 
14309   effect(DEF dst, USE src);
14310 
14311   ins_cost(INSN_COST);
14312 
14313   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14314 
14315   ins_encode %{
14316     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14317   %}
14318 
14319   ins_pipe(pipe_class_memory);
14320 
14321 %}
14322 
14323 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14324 
14325   match(Set dst (MoveL2D src));
14326 
14327   effect(DEF dst, USE src);
14328 
14329   ins_cost(INSN_COST);
14330 
14331   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14332 
14333   ins_encode %{
14334     __ str($src$$Register, Address(sp, $dst$$disp));
14335   %}
14336 
14337   ins_pipe(istore_reg_reg);
14338 
14339 %}
14340 
14341 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14342 
14343   match(Set dst (MoveF2I src));
14344 
14345   effect(DEF dst, USE src);
14346 
14347   ins_cost(INSN_COST);
14348 
14349   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14350 
14351   ins_encode %{
14352     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14353   %}
14354 
14355   ins_pipe(fp_f2i);
14356 
14357 %}
14358 
14359 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14360 
14361   match(Set dst (MoveI2F src));
14362 
14363   effect(DEF dst, USE src);
14364 
14365   ins_cost(INSN_COST);
14366 
14367   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14368 
14369   ins_encode %{
14370     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14371   %}
14372 
14373   ins_pipe(fp_i2f);
14374 
14375 %}
14376 
14377 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14378 
14379   match(Set dst (MoveD2L src));
14380 
14381   effect(DEF dst, USE src);
14382 
14383   ins_cost(INSN_COST);
14384 
14385   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14386 
14387   ins_encode %{
14388     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14389   %}
14390 
14391   ins_pipe(fp_d2l);
14392 
14393 %}
14394 
14395 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14396 
14397   match(Set dst (MoveL2D src));
14398 
14399   effect(DEF dst, USE src);
14400 
14401   ins_cost(INSN_COST);
14402 
14403   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14404 
14405   ins_encode %{
14406     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14407   %}
14408 
14409   ins_pipe(fp_l2d);
14410 
14411 %}
14412 
14413 // ============================================================================
14414 // clearing of an array
14415 
14416 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14417 %{
14418   match(Set dummy (ClearArray cnt base));
14419   effect(USE_KILL cnt, USE_KILL base);
14420 
14421   ins_cost(4 * INSN_COST);
14422   format %{ "ClearArray $cnt, $base" %}
14423 
14424   ins_encode %{
14425     __ zero_words($base$$Register, $cnt$$Register);
14426   %}
14427 
14428   ins_pipe(pipe_class_memory);
14429 %}
14430 
14431 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14432 %{
14433   predicate((u_int64_t)n->in(2)->get_long()
14434             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14435   match(Set dummy (ClearArray cnt base));
14436   effect(USE_KILL base);
14437 
14438   ins_cost(4 * INSN_COST);
14439   format %{ "ClearArray $cnt, $base" %}
14440 
14441   ins_encode %{
14442     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14443   %}
14444 
14445   ins_pipe(pipe_class_memory);
14446 %}
14447 
14448 // ============================================================================
14449 // Overflow Math Instructions
14450 
14451 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14452 %{
14453   match(Set cr (OverflowAddI op1 op2));
14454 
14455   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14456   ins_cost(INSN_COST);
14457   ins_encode %{
14458     __ cmnw($op1$$Register, $op2$$Register);
14459   %}
14460 
14461   ins_pipe(icmp_reg_reg);
14462 %}
14463 
14464 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14465 %{
14466   match(Set cr (OverflowAddI op1 op2));
14467 
14468   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14469   ins_cost(INSN_COST);
14470   ins_encode %{
14471     __ cmnw($op1$$Register, $op2$$constant);
14472   %}
14473 
14474   ins_pipe(icmp_reg_imm);
14475 %}
14476 
14477 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14478 %{
14479   match(Set cr (OverflowAddL op1 op2));
14480 
14481   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14482   ins_cost(INSN_COST);
14483   ins_encode %{
14484     __ cmn($op1$$Register, $op2$$Register);
14485   %}
14486 
14487   ins_pipe(icmp_reg_reg);
14488 %}
14489 
14490 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14491 %{
14492   match(Set cr (OverflowAddL op1 op2));
14493 
14494   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14495   ins_cost(INSN_COST);
14496   ins_encode %{
14497     __ cmn($op1$$Register, $op2$$constant);
14498   %}
14499 
14500   ins_pipe(icmp_reg_imm);
14501 %}
14502 
14503 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14504 %{
14505   match(Set cr (OverflowSubI op1 op2));
14506 
14507   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14508   ins_cost(INSN_COST);
14509   ins_encode %{
14510     __ cmpw($op1$$Register, $op2$$Register);
14511   %}
14512 
14513   ins_pipe(icmp_reg_reg);
14514 %}
14515 
14516 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14517 %{
14518   match(Set cr (OverflowSubI op1 op2));
14519 
14520   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14521   ins_cost(INSN_COST);
14522   ins_encode %{
14523     __ cmpw($op1$$Register, $op2$$constant);
14524   %}
14525 
14526   ins_pipe(icmp_reg_imm);
14527 %}
14528 
14529 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14530 %{
14531   match(Set cr (OverflowSubL op1 op2));
14532 
14533   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14534   ins_cost(INSN_COST);
14535   ins_encode %{
14536     __ cmp($op1$$Register, $op2$$Register);
14537   %}
14538 
14539   ins_pipe(icmp_reg_reg);
14540 %}
14541 
14542 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14543 %{
14544   match(Set cr (OverflowSubL op1 op2));
14545 
14546   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14547   ins_cost(INSN_COST);
14548   ins_encode %{
14549     __ cmp($op1$$Register, $op2$$constant);
14550   %}
14551 
14552   ins_pipe(icmp_reg_imm);
14553 %}
14554 
14555 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14556 %{
14557   match(Set cr (OverflowSubI zero op1));
14558 
14559   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14560   ins_cost(INSN_COST);
14561   ins_encode %{
14562     __ cmpw(zr, $op1$$Register);
14563   %}
14564 
14565   ins_pipe(icmp_reg_imm);
14566 %}
14567 
14568 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14569 %{
14570   match(Set cr (OverflowSubL zero op1));
14571 
14572   format %{ "cmp   zr, $op1\t# overflow check long" %}
14573   ins_cost(INSN_COST);
14574   ins_encode %{
14575     __ cmp(zr, $op1$$Register);
14576   %}
14577 
14578   ins_pipe(icmp_reg_imm);
14579 %}
14580 
14581 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14582 %{
14583   match(Set cr (OverflowMulI op1 op2));
14584 
14585   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14586             "cmp   rscratch1, rscratch1, sxtw\n\t"
14587             "movw  rscratch1, #0x80000000\n\t"
14588             "cselw rscratch1, rscratch1, zr, NE\n\t"
14589             "cmpw  rscratch1, #1" %}
14590   ins_cost(5 * INSN_COST);
14591   ins_encode %{
14592     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14593     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14594     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14595     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14596     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14597   %}
14598 
14599   ins_pipe(pipe_slow);
14600 %}
14601 
14602 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14603 %{
14604   match(If cmp (OverflowMulI op1 op2));
14605   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14606             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14607   effect(USE labl, KILL cr);
14608 
14609   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14610             "cmp   rscratch1, rscratch1, sxtw\n\t"
14611             "b$cmp   $labl" %}
14612   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14613   ins_encode %{
14614     Label* L = $labl$$label;
14615     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14616     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14617     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14618     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14619   %}
14620 
14621   ins_pipe(pipe_serial);
14622 %}
14623 
14624 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14625 %{
14626   match(Set cr (OverflowMulL op1 op2));
14627 
14628   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14629             "smulh rscratch2, $op1, $op2\n\t"
14630             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14631             "movw  rscratch1, #0x80000000\n\t"
14632             "cselw rscratch1, rscratch1, zr, NE\n\t"
14633             "cmpw  rscratch1, #1" %}
14634   ins_cost(6 * INSN_COST);
14635   ins_encode %{
14636     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14637     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14638     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14639     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14640     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14641     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14642   %}
14643 
14644   ins_pipe(pipe_slow);
14645 %}
14646 
14647 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14648 %{
14649   match(If cmp (OverflowMulL op1 op2));
14650   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14651             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14652   effect(USE labl, KILL cr);
14653 
14654   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14655             "smulh rscratch2, $op1, $op2\n\t"
14656             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14657             "b$cmp $labl" %}
14658   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14659   ins_encode %{
14660     Label* L = $labl$$label;
14661     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14662     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14663     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14664     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14665     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14666   %}
14667 
14668   ins_pipe(pipe_serial);
14669 %}
14670 
14671 // ============================================================================
14672 // Compare Instructions
14673 
14674 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14675 %{
14676   match(Set cr (CmpI op1 op2));
14677 
14678   effect(DEF cr, USE op1, USE op2);
14679 
14680   ins_cost(INSN_COST);
14681   format %{ "cmpw  $op1, $op2" %}
14682 
14683   ins_encode(aarch64_enc_cmpw(op1, op2));
14684 
14685   ins_pipe(icmp_reg_reg);
14686 %}
14687 
14688 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14689 %{
14690   match(Set cr (CmpI op1 zero));
14691 
14692   effect(DEF cr, USE op1);
14693 
14694   ins_cost(INSN_COST);
14695   format %{ "cmpw $op1, 0" %}
14696 
14697   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14698 
14699   ins_pipe(icmp_reg_imm);
14700 %}
14701 
14702 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14703 %{
14704   match(Set cr (CmpI op1 op2));
14705 
14706   effect(DEF cr, USE op1);
14707 
14708   ins_cost(INSN_COST);
14709   format %{ "cmpw  $op1, $op2" %}
14710 
14711   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14712 
14713   ins_pipe(icmp_reg_imm);
14714 %}
14715 
14716 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14717 %{
14718   match(Set cr (CmpI op1 op2));
14719 
14720   effect(DEF cr, USE op1);
14721 
14722   ins_cost(INSN_COST * 2);
14723   format %{ "cmpw  $op1, $op2" %}
14724 
14725   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14726 
14727   ins_pipe(icmp_reg_imm);
14728 %}
14729 
14730 // Unsigned compare Instructions; really, same as signed compare
14731 // except it should only be used to feed an If or a CMovI which takes a
14732 // cmpOpU.
14733 
14734 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14735 %{
14736   match(Set cr (CmpU op1 op2));
14737 
14738   effect(DEF cr, USE op1, USE op2);
14739 
14740   ins_cost(INSN_COST);
14741   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14742 
14743   ins_encode(aarch64_enc_cmpw(op1, op2));
14744 
14745   ins_pipe(icmp_reg_reg);
14746 %}
14747 
14748 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14749 %{
14750   match(Set cr (CmpU op1 zero));
14751 
14752   effect(DEF cr, USE op1);
14753 
14754   ins_cost(INSN_COST);
14755   format %{ "cmpw $op1, #0\t# unsigned" %}
14756 
14757   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14758 
14759   ins_pipe(icmp_reg_imm);
14760 %}
14761 
14762 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14763 %{
14764   match(Set cr (CmpU op1 op2));
14765 
14766   effect(DEF cr, USE op1);
14767 
14768   ins_cost(INSN_COST);
14769   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14770 
14771   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14772 
14773   ins_pipe(icmp_reg_imm);
14774 %}
14775 
14776 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14777 %{
14778   match(Set cr (CmpU op1 op2));
14779 
14780   effect(DEF cr, USE op1);
14781 
14782   ins_cost(INSN_COST * 2);
14783   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14784 
14785   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14786 
14787   ins_pipe(icmp_reg_imm);
14788 %}
14789 
14790 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14791 %{
14792   match(Set cr (CmpL op1 op2));
14793 
14794   effect(DEF cr, USE op1, USE op2);
14795 
14796   ins_cost(INSN_COST);
14797   format %{ "cmp  $op1, $op2" %}
14798 
14799   ins_encode(aarch64_enc_cmp(op1, op2));
14800 
14801   ins_pipe(icmp_reg_reg);
14802 %}
14803 
14804 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14805 %{
14806   match(Set cr (CmpL op1 zero));
14807 
14808   effect(DEF cr, USE op1);
14809 
14810   ins_cost(INSN_COST);
14811   format %{ "tst  $op1" %}
14812 
14813   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14814 
14815   ins_pipe(icmp_reg_imm);
14816 %}
14817 
14818 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14819 %{
14820   match(Set cr (CmpL op1 op2));
14821 
14822   effect(DEF cr, USE op1);
14823 
14824   ins_cost(INSN_COST);
14825   format %{ "cmp  $op1, $op2" %}
14826 
14827   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14828 
14829   ins_pipe(icmp_reg_imm);
14830 %}
14831 
14832 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14833 %{
14834   match(Set cr (CmpL op1 op2));
14835 
14836   effect(DEF cr, USE op1);
14837 
14838   ins_cost(INSN_COST * 2);
14839   format %{ "cmp  $op1, $op2" %}
14840 
14841   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14842 
14843   ins_pipe(icmp_reg_imm);
14844 %}
14845 
14846 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14847 %{
14848   match(Set cr (CmpUL op1 op2));
14849 
14850   effect(DEF cr, USE op1, USE op2);
14851 
14852   ins_cost(INSN_COST);
14853   format %{ "cmp  $op1, $op2" %}
14854 
14855   ins_encode(aarch64_enc_cmp(op1, op2));
14856 
14857   ins_pipe(icmp_reg_reg);
14858 %}
14859 
14860 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14861 %{
14862   match(Set cr (CmpUL op1 zero));
14863 
14864   effect(DEF cr, USE op1);
14865 
14866   ins_cost(INSN_COST);
14867   format %{ "tst  $op1" %}
14868 
14869   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14870 
14871   ins_pipe(icmp_reg_imm);
14872 %}
14873 
14874 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14875 %{
14876   match(Set cr (CmpUL op1 op2));
14877 
14878   effect(DEF cr, USE op1);
14879 
14880   ins_cost(INSN_COST);
14881   format %{ "cmp  $op1, $op2" %}
14882 
14883   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14884 
14885   ins_pipe(icmp_reg_imm);
14886 %}
14887 
14888 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14889 %{
14890   match(Set cr (CmpUL op1 op2));
14891 
14892   effect(DEF cr, USE op1);
14893 
14894   ins_cost(INSN_COST * 2);
14895   format %{ "cmp  $op1, $op2" %}
14896 
14897   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14898 
14899   ins_pipe(icmp_reg_imm);
14900 %}
14901 
14902 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14903 %{
14904   match(Set cr (CmpP op1 op2));
14905 
14906   effect(DEF cr, USE op1, USE op2);
14907 
14908   ins_cost(INSN_COST);
14909   format %{ "cmp  $op1, $op2\t // ptr" %}
14910 
14911   ins_encode(aarch64_enc_cmpp(op1, op2));
14912 
14913   ins_pipe(icmp_reg_reg);
14914 %}
14915 
14916 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14917 %{
14918   match(Set cr (CmpN op1 op2));
14919 
14920   effect(DEF cr, USE op1, USE op2);
14921 
14922   ins_cost(INSN_COST);
14923   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14924 
14925   ins_encode(aarch64_enc_cmpn(op1, op2));
14926 
14927   ins_pipe(icmp_reg_reg);
14928 %}
14929 
14930 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14931 %{
14932   match(Set cr (CmpP op1 zero));
14933 
14934   effect(DEF cr, USE op1, USE zero);
14935 
14936   ins_cost(INSN_COST);
14937   format %{ "cmp  $op1, 0\t // ptr" %}
14938 
14939   ins_encode(aarch64_enc_testp(op1));
14940 
14941   ins_pipe(icmp_reg_imm);
14942 %}
14943 
14944 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14945 %{
14946   match(Set cr (CmpN op1 zero));
14947 
14948   effect(DEF cr, USE op1, USE zero);
14949 
14950   ins_cost(INSN_COST);
14951   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14952 
14953   ins_encode(aarch64_enc_testn(op1));
14954 
14955   ins_pipe(icmp_reg_imm);
14956 %}
14957 
14958 // FP comparisons
14959 //
14960 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14961 // using normal cmpOp. See declaration of rFlagsReg for details.
14962 
14963 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14964 %{
14965   match(Set cr (CmpF src1 src2));
14966 
14967   ins_cost(3 * INSN_COST);
14968   format %{ "fcmps $src1, $src2" %}
14969 
14970   ins_encode %{
14971     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14972   %}
14973 
14974   ins_pipe(pipe_class_compare);
14975 %}
14976 
14977 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14978 %{
14979   match(Set cr (CmpF src1 src2));
14980 
14981   ins_cost(3 * INSN_COST);
14982   format %{ "fcmps $src1, 0.0" %}
14983 
14984   ins_encode %{
14985     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14986   %}
14987 
14988   ins_pipe(pipe_class_compare);
14989 %}
14990 // FROM HERE
14991 
14992 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14993 %{
14994   match(Set cr (CmpD src1 src2));
14995 
14996   ins_cost(3 * INSN_COST);
14997   format %{ "fcmpd $src1, $src2" %}
14998 
14999   ins_encode %{
15000     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15001   %}
15002 
15003   ins_pipe(pipe_class_compare);
15004 %}
15005 
15006 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15007 %{
15008   match(Set cr (CmpD src1 src2));
15009 
15010   ins_cost(3 * INSN_COST);
15011   format %{ "fcmpd $src1, 0.0" %}
15012 
15013   ins_encode %{
15014     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
15015   %}
15016 
15017   ins_pipe(pipe_class_compare);
15018 %}
15019 
15020 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15021 %{
15022   match(Set dst (CmpF3 src1 src2));
15023   effect(KILL cr);
15024 
15025   ins_cost(5 * INSN_COST);
15026   format %{ "fcmps $src1, $src2\n\t"
15027             "csinvw($dst, zr, zr, eq\n\t"
15028             "csnegw($dst, $dst, $dst, lt)"
15029   %}
15030 
15031   ins_encode %{
15032     Label done;
15033     FloatRegister s1 = as_FloatRegister($src1$$reg);
15034     FloatRegister s2 = as_FloatRegister($src2$$reg);
15035     Register d = as_Register($dst$$reg);
15036     __ fcmps(s1, s2);
15037     // installs 0 if EQ else -1
15038     __ csinvw(d, zr, zr, Assembler::EQ);
15039     // keeps -1 if less or unordered else installs 1
15040     __ csnegw(d, d, d, Assembler::LT);
15041     __ bind(done);
15042   %}
15043 
15044   ins_pipe(pipe_class_default);
15045 
15046 %}
15047 
15048 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15049 %{
15050   match(Set dst (CmpD3 src1 src2));
15051   effect(KILL cr);
15052 
15053   ins_cost(5 * INSN_COST);
15054   format %{ "fcmpd $src1, $src2\n\t"
15055             "csinvw($dst, zr, zr, eq\n\t"
15056             "csnegw($dst, $dst, $dst, lt)"
15057   %}
15058 
15059   ins_encode %{
15060     Label done;
15061     FloatRegister s1 = as_FloatRegister($src1$$reg);
15062     FloatRegister s2 = as_FloatRegister($src2$$reg);
15063     Register d = as_Register($dst$$reg);
15064     __ fcmpd(s1, s2);
15065     // installs 0 if EQ else -1
15066     __ csinvw(d, zr, zr, Assembler::EQ);
15067     // keeps -1 if less or unordered else installs 1
15068     __ csnegw(d, d, d, Assembler::LT);
15069     __ bind(done);
15070   %}
15071   ins_pipe(pipe_class_default);
15072 
15073 %}
15074 
15075 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15076 %{
15077   match(Set dst (CmpF3 src1 zero));
15078   effect(KILL cr);
15079 
15080   ins_cost(5 * INSN_COST);
15081   format %{ "fcmps $src1, 0.0\n\t"
15082             "csinvw($dst, zr, zr, eq\n\t"
15083             "csnegw($dst, $dst, $dst, lt)"
15084   %}
15085 
15086   ins_encode %{
15087     Label done;
15088     FloatRegister s1 = as_FloatRegister($src1$$reg);
15089     Register d = as_Register($dst$$reg);
15090     __ fcmps(s1, 0.0D);
15091     // installs 0 if EQ else -1
15092     __ csinvw(d, zr, zr, Assembler::EQ);
15093     // keeps -1 if less or unordered else installs 1
15094     __ csnegw(d, d, d, Assembler::LT);
15095     __ bind(done);
15096   %}
15097 
15098   ins_pipe(pipe_class_default);
15099 
15100 %}
15101 
15102 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15103 %{
15104   match(Set dst (CmpD3 src1 zero));
15105   effect(KILL cr);
15106 
15107   ins_cost(5 * INSN_COST);
15108   format %{ "fcmpd $src1, 0.0\n\t"
15109             "csinvw($dst, zr, zr, eq\n\t"
15110             "csnegw($dst, $dst, $dst, lt)"
15111   %}
15112 
15113   ins_encode %{
15114     Label done;
15115     FloatRegister s1 = as_FloatRegister($src1$$reg);
15116     Register d = as_Register($dst$$reg);
15117     __ fcmpd(s1, 0.0D);
15118     // installs 0 if EQ else -1
15119     __ csinvw(d, zr, zr, Assembler::EQ);
15120     // keeps -1 if less or unordered else installs 1
15121     __ csnegw(d, d, d, Assembler::LT);
15122     __ bind(done);
15123   %}
15124   ins_pipe(pipe_class_default);
15125 
15126 %}
15127 
15128 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15129 %{
15130   match(Set dst (CmpLTMask p q));
15131   effect(KILL cr);
15132 
15133   ins_cost(3 * INSN_COST);
15134 
15135   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15136             "csetw $dst, lt\n\t"
15137             "subw $dst, zr, $dst"
15138   %}
15139 
15140   ins_encode %{
15141     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15142     __ csetw(as_Register($dst$$reg), Assembler::LT);
15143     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15144   %}
15145 
15146   ins_pipe(ialu_reg_reg);
15147 %}
15148 
15149 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15150 %{
15151   match(Set dst (CmpLTMask src zero));
15152   effect(KILL cr);
15153 
15154   ins_cost(INSN_COST);
15155 
15156   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15157 
15158   ins_encode %{
15159     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15160   %}
15161 
15162   ins_pipe(ialu_reg_shift);
15163 %}
15164 
15165 // ============================================================================
15166 // Max and Min
15167 
15168 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15169 %{
15170   match(Set dst (MinI src1 src2));
15171 
15172   effect(DEF dst, USE src1, USE src2, KILL cr);
15173   size(8);
15174 
15175   ins_cost(INSN_COST * 3);
15176   format %{
15177     "cmpw $src1 $src2\t signed int\n\t"
15178     "cselw $dst, $src1, $src2 lt\t"
15179   %}
15180 
15181   ins_encode %{
15182     __ cmpw(as_Register($src1$$reg),
15183             as_Register($src2$$reg));
15184     __ cselw(as_Register($dst$$reg),
15185              as_Register($src1$$reg),
15186              as_Register($src2$$reg),
15187              Assembler::LT);
15188   %}
15189 
15190   ins_pipe(ialu_reg_reg);
15191 %}
15192 // FROM HERE
15193 
15194 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15195 %{
15196   match(Set dst (MaxI src1 src2));
15197 
15198   effect(DEF dst, USE src1, USE src2, KILL cr);
15199   size(8);
15200 
15201   ins_cost(INSN_COST * 3);
15202   format %{
15203     "cmpw $src1 $src2\t signed int\n\t"
15204     "cselw $dst, $src1, $src2 gt\t"
15205   %}
15206 
15207   ins_encode %{
15208     __ cmpw(as_Register($src1$$reg),
15209             as_Register($src2$$reg));
15210     __ cselw(as_Register($dst$$reg),
15211              as_Register($src1$$reg),
15212              as_Register($src2$$reg),
15213              Assembler::GT);
15214   %}
15215 
15216   ins_pipe(ialu_reg_reg);
15217 %}
15218 
15219 // ============================================================================
15220 // Branch Instructions
15221 
15222 // Direct Branch.
15223 instruct branch(label lbl)
15224 %{
15225   match(Goto);
15226 
15227   effect(USE lbl);
15228 
15229   ins_cost(BRANCH_COST);
15230   format %{ "b  $lbl" %}
15231 
15232   ins_encode(aarch64_enc_b(lbl));
15233 
15234   ins_pipe(pipe_branch);
15235 %}
15236 
15237 // Conditional Near Branch
15238 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15239 %{
15240   // Same match rule as `branchConFar'.
15241   match(If cmp cr);
15242 
15243   effect(USE lbl);
15244 
15245   ins_cost(BRANCH_COST);
15246   // If set to 1 this indicates that the current instruction is a
15247   // short variant of a long branch. This avoids using this
15248   // instruction in first-pass matching. It will then only be used in
15249   // the `Shorten_branches' pass.
15250   // ins_short_branch(1);
15251   format %{ "b$cmp  $lbl" %}
15252 
15253   ins_encode(aarch64_enc_br_con(cmp, lbl));
15254 
15255   ins_pipe(pipe_branch_cond);
15256 %}
15257 
15258 // Conditional Near Branch Unsigned
15259 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15260 %{
15261   // Same match rule as `branchConFar'.
15262   match(If cmp cr);
15263 
15264   effect(USE lbl);
15265 
15266   ins_cost(BRANCH_COST);
15267   // If set to 1 this indicates that the current instruction is a
15268   // short variant of a long branch. This avoids using this
15269   // instruction in first-pass matching. It will then only be used in
15270   // the `Shorten_branches' pass.
15271   // ins_short_branch(1);
15272   format %{ "b$cmp  $lbl\t# unsigned" %}
15273 
15274   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15275 
15276   ins_pipe(pipe_branch_cond);
15277 %}
15278 
15279 // Make use of CBZ and CBNZ.  These instructions, as well as being
15280 // shorter than (cmp; branch), have the additional benefit of not
15281 // killing the flags.
15282 
15283 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15284   match(If cmp (CmpI op1 op2));
15285   effect(USE labl);
15286 
15287   ins_cost(BRANCH_COST);
15288   format %{ "cbw$cmp   $op1, $labl" %}
15289   ins_encode %{
15290     Label* L = $labl$$label;
15291     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15292     if (cond == Assembler::EQ)
15293       __ cbzw($op1$$Register, *L);
15294     else
15295       __ cbnzw($op1$$Register, *L);
15296   %}
15297   ins_pipe(pipe_cmp_branch);
15298 %}
15299 
15300 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15301   match(If cmp (CmpL op1 op2));
15302   effect(USE labl);
15303 
15304   ins_cost(BRANCH_COST);
15305   format %{ "cb$cmp   $op1, $labl" %}
15306   ins_encode %{
15307     Label* L = $labl$$label;
15308     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15309     if (cond == Assembler::EQ)
15310       __ cbz($op1$$Register, *L);
15311     else
15312       __ cbnz($op1$$Register, *L);
15313   %}
15314   ins_pipe(pipe_cmp_branch);
15315 %}
15316 
15317 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15318   match(If cmp (CmpP op1 op2));
15319   effect(USE labl);
15320 
15321   ins_cost(BRANCH_COST);
15322   format %{ "cb$cmp   $op1, $labl" %}
15323   ins_encode %{
15324     Label* L = $labl$$label;
15325     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15326     if (cond == Assembler::EQ)
15327       __ cbz($op1$$Register, *L);
15328     else
15329       __ cbnz($op1$$Register, *L);
15330   %}
15331   ins_pipe(pipe_cmp_branch);
15332 %}
15333 
15334 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15335   match(If cmp (CmpN op1 op2));
15336   effect(USE labl);
15337 
15338   ins_cost(BRANCH_COST);
15339   format %{ "cbw$cmp   $op1, $labl" %}
15340   ins_encode %{
15341     Label* L = $labl$$label;
15342     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15343     if (cond == Assembler::EQ)
15344       __ cbzw($op1$$Register, *L);
15345     else
15346       __ cbnzw($op1$$Register, *L);
15347   %}
15348   ins_pipe(pipe_cmp_branch);
15349 %}
15350 
15351 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15352   match(If cmp (CmpP (DecodeN oop) zero));
15353   effect(USE labl);
15354 
15355   ins_cost(BRANCH_COST);
15356   format %{ "cb$cmp   $oop, $labl" %}
15357   ins_encode %{
15358     Label* L = $labl$$label;
15359     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15360     if (cond == Assembler::EQ)
15361       __ cbzw($oop$$Register, *L);
15362     else
15363       __ cbnzw($oop$$Register, *L);
15364   %}
15365   ins_pipe(pipe_cmp_branch);
15366 %}
15367 
15368 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15369   match(If cmp (CmpU op1 op2));
15370   effect(USE labl);
15371 
15372   ins_cost(BRANCH_COST);
15373   format %{ "cbw$cmp   $op1, $labl" %}
15374   ins_encode %{
15375     Label* L = $labl$$label;
15376     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15377     if (cond == Assembler::EQ || cond == Assembler::LS)
15378       __ cbzw($op1$$Register, *L);
15379     else
15380       __ cbnzw($op1$$Register, *L);
15381   %}
15382   ins_pipe(pipe_cmp_branch);
15383 %}
15384 
15385 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15386   match(If cmp (CmpUL op1 op2));
15387   effect(USE labl);
15388 
15389   ins_cost(BRANCH_COST);
15390   format %{ "cb$cmp   $op1, $labl" %}
15391   ins_encode %{
15392     Label* L = $labl$$label;
15393     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15394     if (cond == Assembler::EQ || cond == Assembler::LS)
15395       __ cbz($op1$$Register, *L);
15396     else
15397       __ cbnz($op1$$Register, *L);
15398   %}
15399   ins_pipe(pipe_cmp_branch);
15400 %}
15401 
15402 // Test bit and Branch
15403 
15404 // Patterns for short (< 32KiB) variants
15405 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15406   match(If cmp (CmpL op1 op2));
15407   effect(USE labl);
15408 
15409   ins_cost(BRANCH_COST);
15410   format %{ "cb$cmp   $op1, $labl # long" %}
15411   ins_encode %{
15412     Label* L = $labl$$label;
15413     Assembler::Condition cond =
15414       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15415     __ tbr(cond, $op1$$Register, 63, *L);
15416   %}
15417   ins_pipe(pipe_cmp_branch);
15418   ins_short_branch(1);
15419 %}
15420 
15421 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15422   match(If cmp (CmpI op1 op2));
15423   effect(USE labl);
15424 
15425   ins_cost(BRANCH_COST);
15426   format %{ "cb$cmp   $op1, $labl # int" %}
15427   ins_encode %{
15428     Label* L = $labl$$label;
15429     Assembler::Condition cond =
15430       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15431     __ tbr(cond, $op1$$Register, 31, *L);
15432   %}
15433   ins_pipe(pipe_cmp_branch);
15434   ins_short_branch(1);
15435 %}
15436 
15437 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15438   match(If cmp (CmpL (AndL op1 op2) op3));
15439   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15440   effect(USE labl);
15441 
15442   ins_cost(BRANCH_COST);
15443   format %{ "tb$cmp   $op1, $op2, $labl" %}
15444   ins_encode %{
15445     Label* L = $labl$$label;
15446     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15447     int bit = exact_log2($op2$$constant);
15448     __ tbr(cond, $op1$$Register, bit, *L);
15449   %}
15450   ins_pipe(pipe_cmp_branch);
15451   ins_short_branch(1);
15452 %}
15453 
15454 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15455   match(If cmp (CmpI (AndI op1 op2) op3));
15456   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15457   effect(USE labl);
15458 
15459   ins_cost(BRANCH_COST);
15460   format %{ "tb$cmp   $op1, $op2, $labl" %}
15461   ins_encode %{
15462     Label* L = $labl$$label;
15463     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15464     int bit = exact_log2($op2$$constant);
15465     __ tbr(cond, $op1$$Register, bit, *L);
15466   %}
15467   ins_pipe(pipe_cmp_branch);
15468   ins_short_branch(1);
15469 %}
15470 
15471 // And far variants
15472 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15473   match(If cmp (CmpL op1 op2));
15474   effect(USE labl);
15475 
15476   ins_cost(BRANCH_COST);
15477   format %{ "cb$cmp   $op1, $labl # long" %}
15478   ins_encode %{
15479     Label* L = $labl$$label;
15480     Assembler::Condition cond =
15481       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15482     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15483   %}
15484   ins_pipe(pipe_cmp_branch);
15485 %}
15486 
15487 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15488   match(If cmp (CmpI op1 op2));
15489   effect(USE labl);
15490 
15491   ins_cost(BRANCH_COST);
15492   format %{ "cb$cmp   $op1, $labl # int" %}
15493   ins_encode %{
15494     Label* L = $labl$$label;
15495     Assembler::Condition cond =
15496       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15497     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15498   %}
15499   ins_pipe(pipe_cmp_branch);
15500 %}
15501 
15502 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15503   match(If cmp (CmpL (AndL op1 op2) op3));
15504   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15505   effect(USE labl);
15506 
15507   ins_cost(BRANCH_COST);
15508   format %{ "tb$cmp   $op1, $op2, $labl" %}
15509   ins_encode %{
15510     Label* L = $labl$$label;
15511     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15512     int bit = exact_log2($op2$$constant);
15513     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15514   %}
15515   ins_pipe(pipe_cmp_branch);
15516 %}
15517 
15518 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15519   match(If cmp (CmpI (AndI op1 op2) op3));
15520   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15521   effect(USE labl);
15522 
15523   ins_cost(BRANCH_COST);
15524   format %{ "tb$cmp   $op1, $op2, $labl" %}
15525   ins_encode %{
15526     Label* L = $labl$$label;
15527     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15528     int bit = exact_log2($op2$$constant);
15529     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15530   %}
15531   ins_pipe(pipe_cmp_branch);
15532 %}
15533 
15534 // Test bits
15535 
15536 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15537   match(Set cr (CmpL (AndL op1 op2) op3));
15538   predicate(Assembler::operand_valid_for_logical_immediate
15539             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15540 
15541   ins_cost(INSN_COST);
15542   format %{ "tst $op1, $op2 # long" %}
15543   ins_encode %{
15544     __ tst($op1$$Register, $op2$$constant);
15545   %}
15546   ins_pipe(ialu_reg_reg);
15547 %}
15548 
15549 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15550   match(Set cr (CmpI (AndI op1 op2) op3));
15551   predicate(Assembler::operand_valid_for_logical_immediate
15552             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15553 
15554   ins_cost(INSN_COST);
15555   format %{ "tst $op1, $op2 # int" %}
15556   ins_encode %{
15557     __ tstw($op1$$Register, $op2$$constant);
15558   %}
15559   ins_pipe(ialu_reg_reg);
15560 %}
15561 
15562 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15563   match(Set cr (CmpL (AndL op1 op2) op3));
15564 
15565   ins_cost(INSN_COST);
15566   format %{ "tst $op1, $op2 # long" %}
15567   ins_encode %{
15568     __ tst($op1$$Register, $op2$$Register);
15569   %}
15570   ins_pipe(ialu_reg_reg);
15571 %}
15572 
15573 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15574   match(Set cr (CmpI (AndI op1 op2) op3));
15575 
15576   ins_cost(INSN_COST);
15577   format %{ "tstw $op1, $op2 # int" %}
15578   ins_encode %{
15579     __ tstw($op1$$Register, $op2$$Register);
15580   %}
15581   ins_pipe(ialu_reg_reg);
15582 %}
15583 
15584 
15585 // Conditional Far Branch
15586 // Conditional Far Branch Unsigned
15587 // TODO: fixme
15588 
15589 // counted loop end branch near
15590 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15591 %{
15592   match(CountedLoopEnd cmp cr);
15593 
15594   effect(USE lbl);
15595 
15596   ins_cost(BRANCH_COST);
15597   // short variant.
15598   // ins_short_branch(1);
15599   format %{ "b$cmp $lbl \t// counted loop end" %}
15600 
15601   ins_encode(aarch64_enc_br_con(cmp, lbl));
15602 
15603   ins_pipe(pipe_branch);
15604 %}
15605 
15606 // counted loop end branch near Unsigned
15607 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15608 %{
15609   match(CountedLoopEnd cmp cr);
15610 
15611   effect(USE lbl);
15612 
15613   ins_cost(BRANCH_COST);
15614   // short variant.
15615   // ins_short_branch(1);
15616   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15617 
15618   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15619 
15620   ins_pipe(pipe_branch);
15621 %}
15622 
15623 // counted loop end branch far
15624 // counted loop end branch far unsigned
15625 // TODO: fixme
15626 
15627 // ============================================================================
15628 // inlined locking and unlocking
15629 
15630 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15631 %{
15632   match(Set cr (FastLock object box));
15633   effect(TEMP tmp, TEMP tmp2);
15634 
15635   // TODO
15636   // identify correct cost
15637   ins_cost(5 * INSN_COST);
15638   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15639 
15640   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15641 
15642   ins_pipe(pipe_serial);
15643 %}
15644 
15645 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15646 %{
15647   match(Set cr (FastUnlock object box));
15648   effect(TEMP tmp, TEMP tmp2);
15649 
15650   ins_cost(5 * INSN_COST);
15651   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15652 
15653   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15654 
15655   ins_pipe(pipe_serial);
15656 %}
15657 
15658 
15659 // ============================================================================
15660 // Safepoint Instructions
15661 
15662 // TODO
15663 // provide a near and far version of this code
15664 
15665 instruct safePoint(iRegP poll)
15666 %{
15667   match(SafePoint poll);
15668 
15669   format %{
15670     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15671   %}
15672   ins_encode %{
15673     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15674   %}
15675   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15676 %}
15677 
15678 
15679 // ============================================================================
15680 // Procedure Call/Return Instructions
15681 
15682 // Call Java Static Instruction
15683 
15684 instruct CallStaticJavaDirect(method meth)
15685 %{
15686   match(CallStaticJava);
15687 
15688   effect(USE meth);
15689 
15690   ins_cost(CALL_COST);
15691 
15692   format %{ "call,static $meth \t// ==> " %}
15693 
15694   ins_encode( aarch64_enc_java_static_call(meth),
15695               aarch64_enc_call_epilog );
15696 
15697   ins_pipe(pipe_class_call);
15698 %}
15699 
15700 // TO HERE
15701 
15702 // Call Java Dynamic Instruction
15703 instruct CallDynamicJavaDirect(method meth)
15704 %{
15705   match(CallDynamicJava);
15706 
15707   effect(USE meth);
15708 
15709   ins_cost(CALL_COST);
15710 
15711   format %{ "CALL,dynamic $meth \t// ==> " %}
15712 
15713   ins_encode( aarch64_enc_java_dynamic_call(meth),
15714                aarch64_enc_call_epilog );
15715 
15716   ins_pipe(pipe_class_call);
15717 %}
15718 
15719 // Call Runtime Instruction
15720 
15721 instruct CallRuntimeDirect(method meth)
15722 %{
15723   match(CallRuntime);
15724 
15725   effect(USE meth);
15726 
15727   ins_cost(CALL_COST);
15728 
15729   format %{ "CALL, runtime $meth" %}
15730 
15731   ins_encode( aarch64_enc_java_to_runtime(meth) );
15732 
15733   ins_pipe(pipe_class_call);
15734 %}
15735 
15736 // Call Runtime Instruction
15737 
15738 instruct CallLeafDirect(method meth)
15739 %{
15740   match(CallLeaf);
15741 
15742   effect(USE meth);
15743 
15744   ins_cost(CALL_COST);
15745 
15746   format %{ "CALL, runtime leaf $meth" %}
15747 
15748   ins_encode( aarch64_enc_java_to_runtime(meth) );
15749 
15750   ins_pipe(pipe_class_call);
15751 %}
15752 
15753 // Call Runtime Instruction
15754 
15755 instruct CallLeafNoFPDirect(method meth)
15756 %{
15757   match(CallLeafNoFP);
15758 
15759   effect(USE meth);
15760 
15761   ins_cost(CALL_COST);
15762 
15763   format %{ "CALL, runtime leaf nofp $meth" %}
15764 
15765   ins_encode( aarch64_enc_java_to_runtime(meth) );
15766 
15767   ins_pipe(pipe_class_call);
15768 %}
15769 
15770 // Tail Call; Jump from runtime stub to Java code.
15771 // Also known as an 'interprocedural jump'.
15772 // Target of jump will eventually return to caller.
15773 // TailJump below removes the return address.
15774 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15775 %{
15776   match(TailCall jump_target method_oop);
15777 
15778   ins_cost(CALL_COST);
15779 
15780   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15781 
15782   ins_encode(aarch64_enc_tail_call(jump_target));
15783 
15784   ins_pipe(pipe_class_call);
15785 %}
15786 
15787 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15788 %{
15789   match(TailJump jump_target ex_oop);
15790 
15791   ins_cost(CALL_COST);
15792 
15793   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15794 
15795   ins_encode(aarch64_enc_tail_jmp(jump_target));
15796 
15797   ins_pipe(pipe_class_call);
15798 %}
15799 
15800 // Create exception oop: created by stack-crawling runtime code.
15801 // Created exception is now available to this handler, and is setup
15802 // just prior to jumping to this handler. No code emitted.
15803 // TODO check
15804 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15805 instruct CreateException(iRegP_R0 ex_oop)
15806 %{
15807   match(Set ex_oop (CreateEx));
15808 
15809   format %{ " -- \t// exception oop; no code emitted" %}
15810 
15811   size(0);
15812 
15813   ins_encode( /*empty*/ );
15814 
15815   ins_pipe(pipe_class_empty);
15816 %}
15817 
15818 // Rethrow exception: The exception oop will come in the first
15819 // argument position. Then JUMP (not call) to the rethrow stub code.
15820 instruct RethrowException() %{
15821   match(Rethrow);
15822   ins_cost(CALL_COST);
15823 
15824   format %{ "b rethrow_stub" %}
15825 
15826   ins_encode( aarch64_enc_rethrow() );
15827 
15828   ins_pipe(pipe_class_call);
15829 %}
15830 
15831 
15832 // Return Instruction
15833 // epilog node loads ret address into lr as part of frame pop
15834 instruct Ret()
15835 %{
15836   match(Return);
15837 
15838   format %{ "ret\t// return register" %}
15839 
15840   ins_encode( aarch64_enc_ret() );
15841 
15842   ins_pipe(pipe_branch);
15843 %}
15844 
15845 // Die now.
15846 instruct ShouldNotReachHere() %{
15847   match(Halt);
15848 
15849   ins_cost(CALL_COST);
15850   format %{ "ShouldNotReachHere" %}
15851 
15852   ins_encode %{
15853     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15854     // return true
15855     __ dpcs1(0xdead + 1);
15856   %}
15857 
15858   ins_pipe(pipe_class_default);
15859 %}
15860 
15861 // ============================================================================
15862 // Partial Subtype Check
15863 //
15864 // superklass array for an instance of the superklass.  Set a hidden
15865 // internal cache on a hit (cache is checked with exposed code in
15866 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15867 // encoding ALSO sets flags.
15868 
15869 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15870 %{
15871   match(Set result (PartialSubtypeCheck sub super));
15872   effect(KILL cr, KILL temp);
15873 
15874   ins_cost(1100);  // slightly larger than the next version
15875   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15876 
15877   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15878 
15879   opcode(0x1); // Force zero of result reg on hit
15880 
15881   ins_pipe(pipe_class_memory);
15882 %}
15883 
15884 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15885 %{
15886   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15887   effect(KILL temp, KILL result);
15888 
15889   ins_cost(1100);  // slightly larger than the next version
15890   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15891 
15892   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15893 
15894   opcode(0x0); // Don't zero result reg on hit
15895 
15896   ins_pipe(pipe_class_memory);
15897 %}
15898 
15899 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15900                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15901 %{
15902   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15903   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15904   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15905 
15906   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15907   ins_encode %{
15908     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15909     __ string_compare($str1$$Register, $str2$$Register,
15910                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15911                       $tmp1$$Register, $tmp2$$Register,
15912                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15913   %}
15914   ins_pipe(pipe_class_memory);
15915 %}
15916 
15917 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15918                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15919 %{
15920   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15921   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15922   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15923 
15924   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15925   ins_encode %{
15926     __ string_compare($str1$$Register, $str2$$Register,
15927                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15928                       $tmp1$$Register, $tmp2$$Register,
15929                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15930   %}
15931   ins_pipe(pipe_class_memory);
15932 %}
15933 
15934 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15935                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15936                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15937 %{
15938   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15939   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15940   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15941          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15942 
15943   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15944   ins_encode %{
15945     __ string_compare($str1$$Register, $str2$$Register,
15946                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15947                       $tmp1$$Register, $tmp2$$Register,
15948                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15949                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15950   %}
15951   ins_pipe(pipe_class_memory);
15952 %}
15953 
15954 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15955                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15956                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15957 %{
15958   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15959   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15960   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15961          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15962 
15963   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15964   ins_encode %{
15965     __ string_compare($str1$$Register, $str2$$Register,
15966                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15967                       $tmp1$$Register, $tmp2$$Register,
15968                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15969                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15970   %}
15971   ins_pipe(pipe_class_memory);
15972 %}
15973 
15974 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15975        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15976        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15977 %{
15978   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15979   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15980   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15981          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15982   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15983 
15984   ins_encode %{
15985     __ string_indexof($str1$$Register, $str2$$Register,
15986                       $cnt1$$Register, $cnt2$$Register,
15987                       $tmp1$$Register, $tmp2$$Register,
15988                       $tmp3$$Register, $tmp4$$Register,
15989                       $tmp5$$Register, $tmp6$$Register,
15990                       -1, $result$$Register, StrIntrinsicNode::UU);
15991   %}
15992   ins_pipe(pipe_class_memory);
15993 %}
15994 
15995 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15996        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15997        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15998 %{
15999   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16000   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16001   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16002          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16003   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16004 
16005   ins_encode %{
16006     __ string_indexof($str1$$Register, $str2$$Register,
16007                       $cnt1$$Register, $cnt2$$Register,
16008                       $tmp1$$Register, $tmp2$$Register,
16009                       $tmp3$$Register, $tmp4$$Register,
16010                       $tmp5$$Register, $tmp6$$Register,
16011                       -1, $result$$Register, StrIntrinsicNode::LL);
16012   %}
16013   ins_pipe(pipe_class_memory);
16014 %}
16015 
16016 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16017        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16018        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16019 %{
16020   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16021   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16022   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16023          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16024   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16025 
16026   ins_encode %{
16027     __ string_indexof($str1$$Register, $str2$$Register,
16028                       $cnt1$$Register, $cnt2$$Register,
16029                       $tmp1$$Register, $tmp2$$Register,
16030                       $tmp3$$Register, $tmp4$$Register,
16031                       $tmp5$$Register, $tmp6$$Register,
16032                       -1, $result$$Register, StrIntrinsicNode::UL);
16033   %}
16034   ins_pipe(pipe_class_memory);
16035 %}
16036 
16037 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16038                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16039                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16040 %{
16041   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16042   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16043   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16044          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16045   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16046 
16047   ins_encode %{
16048     int icnt2 = (int)$int_cnt2$$constant;
16049     __ string_indexof($str1$$Register, $str2$$Register,
16050                       $cnt1$$Register, zr,
16051                       $tmp1$$Register, $tmp2$$Register,
16052                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16053                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16054   %}
16055   ins_pipe(pipe_class_memory);
16056 %}
16057 
16058 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16059                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16060                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16061 %{
16062   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16063   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16064   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16065          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16066   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16067 
16068   ins_encode %{
16069     int icnt2 = (int)$int_cnt2$$constant;
16070     __ string_indexof($str1$$Register, $str2$$Register,
16071                       $cnt1$$Register, zr,
16072                       $tmp1$$Register, $tmp2$$Register,
16073                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16074                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16075   %}
16076   ins_pipe(pipe_class_memory);
16077 %}
16078 
16079 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16080                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16081                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16082 %{
16083   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16084   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16085   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16086          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16087   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16088 
16089   ins_encode %{
16090     int icnt2 = (int)$int_cnt2$$constant;
16091     __ string_indexof($str1$$Register, $str2$$Register,
16092                       $cnt1$$Register, zr,
16093                       $tmp1$$Register, $tmp2$$Register,
16094                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16095                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16096   %}
16097   ins_pipe(pipe_class_memory);
16098 %}
16099 
16100 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16101                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16102                               iRegINoSp tmp3, rFlagsReg cr)
16103 %{
16104   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16105   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16106          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16107 
16108   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16109 
16110   ins_encode %{
16111     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16112                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16113                            $tmp3$$Register);
16114   %}
16115   ins_pipe(pipe_class_memory);
16116 %}
16117 
16118 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16119                         iRegI_R0 result, rFlagsReg cr)
16120 %{
16121   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16122   match(Set result (StrEquals (Binary str1 str2) cnt));
16123   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16124 
16125   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16126   ins_encode %{
16127     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16128     __ string_equals($str1$$Register, $str2$$Register,
16129                      $result$$Register, $cnt$$Register, 1);
16130   %}
16131   ins_pipe(pipe_class_memory);
16132 %}
16133 
16134 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16135                         iRegI_R0 result, rFlagsReg cr)
16136 %{
16137   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16138   match(Set result (StrEquals (Binary str1 str2) cnt));
16139   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16140 
16141   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16142   ins_encode %{
16143     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16144     __ string_equals($str1$$Register, $str2$$Register,
16145                      $result$$Register, $cnt$$Register, 2);
16146   %}
16147   ins_pipe(pipe_class_memory);
16148 %}
16149 
16150 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16151                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16152                        iRegP_R10 tmp, rFlagsReg cr)
16153 %{
16154   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16155   match(Set result (AryEq ary1 ary2));
16156   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16157 
16158   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16159   ins_encode %{
16160     __ arrays_equals($ary1$$Register, $ary2$$Register,
16161                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16162                      $result$$Register, $tmp$$Register, 1);
16163     %}
16164   ins_pipe(pipe_class_memory);
16165 %}
16166 
16167 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16168                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16169                        iRegP_R10 tmp, rFlagsReg cr)
16170 %{
16171   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16172   match(Set result (AryEq ary1 ary2));
16173   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16174 
16175   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16176   ins_encode %{
16177     __ arrays_equals($ary1$$Register, $ary2$$Register,
16178                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16179                      $result$$Register, $tmp$$Register, 2);
16180   %}
16181   ins_pipe(pipe_class_memory);
16182 %}
16183 
16184 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16185 %{
16186   match(Set result (HasNegatives ary1 len));
16187   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16188   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16189   ins_encode %{
16190     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16191   %}
16192   ins_pipe( pipe_slow );
16193 %}
16194 
16195 // fast char[] to byte[] compression
16196 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16197                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16198                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16199                          iRegI_R0 result, rFlagsReg cr)
16200 %{
16201   match(Set result (StrCompressedCopy src (Binary dst len)));
16202   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16203 
16204   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16205   ins_encode %{
16206     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16207                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16208                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16209                            $result$$Register);
16210   %}
16211   ins_pipe( pipe_slow );
16212 %}
16213 
16214 // fast byte[] to char[] inflation
16215 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16216                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16217 %{
16218   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16219   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16220 
16221   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16222   ins_encode %{
16223     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16224                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16225   %}
16226   ins_pipe(pipe_class_memory);
16227 %}
16228 
16229 // encode char[] to byte[] in ISO_8859_1
16230 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16231                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16232                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16233                           iRegI_R0 result, rFlagsReg cr)
16234 %{
16235   match(Set result (EncodeISOArray src (Binary dst len)));
16236   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16237          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16238 
16239   format %{ "Encode array $src,$dst,$len -> $result" %}
16240   ins_encode %{
16241     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16242          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16243          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16244   %}
16245   ins_pipe( pipe_class_memory );
16246 %}
16247 
16248 // ============================================================================
16249 // This name is KNOWN by the ADLC and cannot be changed.
16250 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16251 // for this guy.
16252 instruct tlsLoadP(thread_RegP dst)
16253 %{
16254   match(Set dst (ThreadLocal));
16255 
16256   ins_cost(0);
16257 
16258   format %{ " -- \t// $dst=Thread::current(), empty" %}
16259 
16260   size(0);
16261 
16262   ins_encode( /*empty*/ );
16263 
16264   ins_pipe(pipe_class_empty);
16265 %}
16266 
16267 // ====================VECTOR INSTRUCTIONS=====================================
16268 
16269 // Load vector (32 bits)
16270 instruct loadV4(vecD dst, vmem4 mem)
16271 %{
16272   predicate(n->as_LoadVector()->memory_size() == 4);
16273   match(Set dst (LoadVector mem));
16274   ins_cost(4 * INSN_COST);
16275   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16276   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16277   ins_pipe(vload_reg_mem64);
16278 %}
16279 
16280 // Load vector (64 bits)
16281 instruct loadV8(vecD dst, vmem8 mem)
16282 %{
16283   predicate(n->as_LoadVector()->memory_size() == 8);
16284   match(Set dst (LoadVector mem));
16285   ins_cost(4 * INSN_COST);
16286   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16287   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16288   ins_pipe(vload_reg_mem64);
16289 %}
16290 
16291 // Load Vector (128 bits)
16292 instruct loadV16(vecX dst, vmem16 mem)
16293 %{
16294   predicate(n->as_LoadVector()->memory_size() == 16);
16295   match(Set dst (LoadVector mem));
16296   ins_cost(4 * INSN_COST);
16297   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16298   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16299   ins_pipe(vload_reg_mem128);
16300 %}
16301 
16302 // Store Vector (32 bits)
16303 instruct storeV4(vecD src, vmem4 mem)
16304 %{
16305   predicate(n->as_StoreVector()->memory_size() == 4);
16306   match(Set mem (StoreVector mem src));
16307   ins_cost(4 * INSN_COST);
16308   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16309   ins_encode( aarch64_enc_strvS(src, mem) );
16310   ins_pipe(vstore_reg_mem64);
16311 %}
16312 
16313 // Store Vector (64 bits)
16314 instruct storeV8(vecD src, vmem8 mem)
16315 %{
16316   predicate(n->as_StoreVector()->memory_size() == 8);
16317   match(Set mem (StoreVector mem src));
16318   ins_cost(4 * INSN_COST);
16319   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16320   ins_encode( aarch64_enc_strvD(src, mem) );
16321   ins_pipe(vstore_reg_mem64);
16322 %}
16323 
16324 // Store Vector (128 bits)
16325 instruct storeV16(vecX src, vmem16 mem)
16326 %{
16327   predicate(n->as_StoreVector()->memory_size() == 16);
16328   match(Set mem (StoreVector mem src));
16329   ins_cost(4 * INSN_COST);
16330   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16331   ins_encode( aarch64_enc_strvQ(src, mem) );
16332   ins_pipe(vstore_reg_mem128);
16333 %}
16334 
16335 instruct replicate8B(vecD dst, iRegIorL2I src)
16336 %{
16337   predicate(n->as_Vector()->length() == 4 ||
16338             n->as_Vector()->length() == 8);
16339   match(Set dst (ReplicateB src));
16340   ins_cost(INSN_COST);
16341   format %{ "dup  $dst, $src\t# vector (8B)" %}
16342   ins_encode %{
16343     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16344   %}
16345   ins_pipe(vdup_reg_reg64);
16346 %}
16347 
16348 instruct replicate16B(vecX dst, iRegIorL2I src)
16349 %{
16350   predicate(n->as_Vector()->length() == 16);
16351   match(Set dst (ReplicateB src));
16352   ins_cost(INSN_COST);
16353   format %{ "dup  $dst, $src\t# vector (16B)" %}
16354   ins_encode %{
16355     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16356   %}
16357   ins_pipe(vdup_reg_reg128);
16358 %}
16359 
16360 instruct replicate8B_imm(vecD dst, immI con)
16361 %{
16362   predicate(n->as_Vector()->length() == 4 ||
16363             n->as_Vector()->length() == 8);
16364   match(Set dst (ReplicateB con));
16365   ins_cost(INSN_COST);
16366   format %{ "movi  $dst, $con\t# vector(8B)" %}
16367   ins_encode %{
16368     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16369   %}
16370   ins_pipe(vmovi_reg_imm64);
16371 %}
16372 
16373 instruct replicate16B_imm(vecX dst, immI con)
16374 %{
16375   predicate(n->as_Vector()->length() == 16);
16376   match(Set dst (ReplicateB con));
16377   ins_cost(INSN_COST);
16378   format %{ "movi  $dst, $con\t# vector(16B)" %}
16379   ins_encode %{
16380     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16381   %}
16382   ins_pipe(vmovi_reg_imm128);
16383 %}
16384 
16385 instruct replicate4S(vecD dst, iRegIorL2I src)
16386 %{
16387   predicate(n->as_Vector()->length() == 2 ||
16388             n->as_Vector()->length() == 4);
16389   match(Set dst (ReplicateS src));
16390   ins_cost(INSN_COST);
16391   format %{ "dup  $dst, $src\t# vector (4S)" %}
16392   ins_encode %{
16393     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16394   %}
16395   ins_pipe(vdup_reg_reg64);
16396 %}
16397 
16398 instruct replicate8S(vecX dst, iRegIorL2I src)
16399 %{
16400   predicate(n->as_Vector()->length() == 8);
16401   match(Set dst (ReplicateS src));
16402   ins_cost(INSN_COST);
16403   format %{ "dup  $dst, $src\t# vector (8S)" %}
16404   ins_encode %{
16405     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16406   %}
16407   ins_pipe(vdup_reg_reg128);
16408 %}
16409 
16410 instruct replicate4S_imm(vecD dst, immI con)
16411 %{
16412   predicate(n->as_Vector()->length() == 2 ||
16413             n->as_Vector()->length() == 4);
16414   match(Set dst (ReplicateS con));
16415   ins_cost(INSN_COST);
16416   format %{ "movi  $dst, $con\t# vector(4H)" %}
16417   ins_encode %{
16418     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16419   %}
16420   ins_pipe(vmovi_reg_imm64);
16421 %}
16422 
16423 instruct replicate8S_imm(vecX dst, immI con)
16424 %{
16425   predicate(n->as_Vector()->length() == 8);
16426   match(Set dst (ReplicateS con));
16427   ins_cost(INSN_COST);
16428   format %{ "movi  $dst, $con\t# vector(8H)" %}
16429   ins_encode %{
16430     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16431   %}
16432   ins_pipe(vmovi_reg_imm128);
16433 %}
16434 
16435 instruct replicate2I(vecD dst, iRegIorL2I src)
16436 %{
16437   predicate(n->as_Vector()->length() == 2);
16438   match(Set dst (ReplicateI src));
16439   ins_cost(INSN_COST);
16440   format %{ "dup  $dst, $src\t# vector (2I)" %}
16441   ins_encode %{
16442     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16443   %}
16444   ins_pipe(vdup_reg_reg64);
16445 %}
16446 
16447 instruct replicate4I(vecX dst, iRegIorL2I src)
16448 %{
16449   predicate(n->as_Vector()->length() == 4);
16450   match(Set dst (ReplicateI src));
16451   ins_cost(INSN_COST);
16452   format %{ "dup  $dst, $src\t# vector (4I)" %}
16453   ins_encode %{
16454     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16455   %}
16456   ins_pipe(vdup_reg_reg128);
16457 %}
16458 
16459 instruct replicate2I_imm(vecD dst, immI con)
16460 %{
16461   predicate(n->as_Vector()->length() == 2);
16462   match(Set dst (ReplicateI con));
16463   ins_cost(INSN_COST);
16464   format %{ "movi  $dst, $con\t# vector(2I)" %}
16465   ins_encode %{
16466     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16467   %}
16468   ins_pipe(vmovi_reg_imm64);
16469 %}
16470 
16471 instruct replicate4I_imm(vecX dst, immI con)
16472 %{
16473   predicate(n->as_Vector()->length() == 4);
16474   match(Set dst (ReplicateI con));
16475   ins_cost(INSN_COST);
16476   format %{ "movi  $dst, $con\t# vector(4I)" %}
16477   ins_encode %{
16478     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16479   %}
16480   ins_pipe(vmovi_reg_imm128);
16481 %}
16482 
16483 instruct replicate2L(vecX dst, iRegL src)
16484 %{
16485   predicate(n->as_Vector()->length() == 2);
16486   match(Set dst (ReplicateL src));
16487   ins_cost(INSN_COST);
16488   format %{ "dup  $dst, $src\t# vector (2L)" %}
16489   ins_encode %{
16490     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16491   %}
16492   ins_pipe(vdup_reg_reg128);
16493 %}
16494 
16495 instruct replicate2L_zero(vecX dst, immI0 zero)
16496 %{
16497   predicate(n->as_Vector()->length() == 2);
16498   match(Set dst (ReplicateI zero));
16499   ins_cost(INSN_COST);
16500   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16501   ins_encode %{
16502     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16503            as_FloatRegister($dst$$reg),
16504            as_FloatRegister($dst$$reg));
16505   %}
16506   ins_pipe(vmovi_reg_imm128);
16507 %}
16508 
16509 instruct replicate2F(vecD dst, vRegF src)
16510 %{
16511   predicate(n->as_Vector()->length() == 2);
16512   match(Set dst (ReplicateF src));
16513   ins_cost(INSN_COST);
16514   format %{ "dup  $dst, $src\t# vector (2F)" %}
16515   ins_encode %{
16516     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16517            as_FloatRegister($src$$reg));
16518   %}
16519   ins_pipe(vdup_reg_freg64);
16520 %}
16521 
16522 instruct replicate4F(vecX dst, vRegF src)
16523 %{
16524   predicate(n->as_Vector()->length() == 4);
16525   match(Set dst (ReplicateF src));
16526   ins_cost(INSN_COST);
16527   format %{ "dup  $dst, $src\t# vector (4F)" %}
16528   ins_encode %{
16529     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16530            as_FloatRegister($src$$reg));
16531   %}
16532   ins_pipe(vdup_reg_freg128);
16533 %}
16534 
16535 instruct replicate2D(vecX dst, vRegD src)
16536 %{
16537   predicate(n->as_Vector()->length() == 2);
16538   match(Set dst (ReplicateD src));
16539   ins_cost(INSN_COST);
16540   format %{ "dup  $dst, $src\t# vector (2D)" %}
16541   ins_encode %{
16542     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16543            as_FloatRegister($src$$reg));
16544   %}
16545   ins_pipe(vdup_reg_dreg128);
16546 %}
16547 
16548 // ====================REDUCTION ARITHMETIC====================================
16549 
16550 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16551 %{
16552   match(Set dst (AddReductionVI src1 src2));
16553   ins_cost(INSN_COST);
16554   effect(TEMP tmp, TEMP tmp2);
16555   format %{ "umov  $tmp, $src2, S, 0\n\t"
16556             "umov  $tmp2, $src2, S, 1\n\t"
16557             "addw  $dst, $src1, $tmp\n\t"
16558             "addw  $dst, $dst, $tmp2\t add reduction2i"
16559   %}
16560   ins_encode %{
16561     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16562     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16563     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16564     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16565   %}
16566   ins_pipe(pipe_class_default);
16567 %}
16568 
16569 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16570 %{
16571   match(Set dst (AddReductionVI src1 src2));
16572   ins_cost(INSN_COST);
16573   effect(TEMP tmp, TEMP tmp2);
16574   format %{ "addv  $tmp, T4S, $src2\n\t"
16575             "umov  $tmp2, $tmp, S, 0\n\t"
16576             "addw  $dst, $tmp2, $src1\t add reduction4i"
16577   %}
16578   ins_encode %{
16579     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16580             as_FloatRegister($src2$$reg));
16581     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16582     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16583   %}
16584   ins_pipe(pipe_class_default);
16585 %}
16586 
16587 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16588 %{
16589   match(Set dst (MulReductionVI src1 src2));
16590   ins_cost(INSN_COST);
16591   effect(TEMP tmp, TEMP dst);
16592   format %{ "umov  $tmp, $src2, S, 0\n\t"
16593             "mul   $dst, $tmp, $src1\n\t"
16594             "umov  $tmp, $src2, S, 1\n\t"
16595             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16596   %}
16597   ins_encode %{
16598     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16599     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16600     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16601     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16602   %}
16603   ins_pipe(pipe_class_default);
16604 %}
16605 
16606 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16607 %{
16608   match(Set dst (MulReductionVI src1 src2));
16609   ins_cost(INSN_COST);
16610   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16611   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16612             "mul   $tmp, $tmp, $src2\n\t"
16613             "umov  $tmp2, $tmp, S, 0\n\t"
16614             "mul   $dst, $tmp2, $src1\n\t"
16615             "umov  $tmp2, $tmp, S, 1\n\t"
16616             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16617   %}
16618   ins_encode %{
16619     __ ins(as_FloatRegister($tmp$$reg), __ D,
16620            as_FloatRegister($src2$$reg), 0, 1);
16621     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16622            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16623     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16624     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16625     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16626     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16627   %}
16628   ins_pipe(pipe_class_default);
16629 %}
16630 
16631 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16632 %{
16633   match(Set dst (AddReductionVF src1 src2));
16634   ins_cost(INSN_COST);
16635   effect(TEMP tmp, TEMP dst);
16636   format %{ "fadds $dst, $src1, $src2\n\t"
16637             "ins   $tmp, S, $src2, 0, 1\n\t"
16638             "fadds $dst, $dst, $tmp\t add reduction2f"
16639   %}
16640   ins_encode %{
16641     __ fadds(as_FloatRegister($dst$$reg),
16642              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16643     __ ins(as_FloatRegister($tmp$$reg), __ S,
16644            as_FloatRegister($src2$$reg), 0, 1);
16645     __ fadds(as_FloatRegister($dst$$reg),
16646              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16647   %}
16648   ins_pipe(pipe_class_default);
16649 %}
16650 
16651 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16652 %{
16653   match(Set dst (AddReductionVF src1 src2));
16654   ins_cost(INSN_COST);
16655   effect(TEMP tmp, TEMP dst);
16656   format %{ "fadds $dst, $src1, $src2\n\t"
16657             "ins   $tmp, S, $src2, 0, 1\n\t"
16658             "fadds $dst, $dst, $tmp\n\t"
16659             "ins   $tmp, S, $src2, 0, 2\n\t"
16660             "fadds $dst, $dst, $tmp\n\t"
16661             "ins   $tmp, S, $src2, 0, 3\n\t"
16662             "fadds $dst, $dst, $tmp\t add reduction4f"
16663   %}
16664   ins_encode %{
16665     __ fadds(as_FloatRegister($dst$$reg),
16666              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16667     __ ins(as_FloatRegister($tmp$$reg), __ S,
16668            as_FloatRegister($src2$$reg), 0, 1);
16669     __ fadds(as_FloatRegister($dst$$reg),
16670              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16671     __ ins(as_FloatRegister($tmp$$reg), __ S,
16672            as_FloatRegister($src2$$reg), 0, 2);
16673     __ fadds(as_FloatRegister($dst$$reg),
16674              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16675     __ ins(as_FloatRegister($tmp$$reg), __ S,
16676            as_FloatRegister($src2$$reg), 0, 3);
16677     __ fadds(as_FloatRegister($dst$$reg),
16678              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16679   %}
16680   ins_pipe(pipe_class_default);
16681 %}
16682 
16683 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16684 %{
16685   match(Set dst (MulReductionVF src1 src2));
16686   ins_cost(INSN_COST);
16687   effect(TEMP tmp, TEMP dst);
16688   format %{ "fmuls $dst, $src1, $src2\n\t"
16689             "ins   $tmp, S, $src2, 0, 1\n\t"
16690             "fmuls $dst, $dst, $tmp\t add reduction4f"
16691   %}
16692   ins_encode %{
16693     __ fmuls(as_FloatRegister($dst$$reg),
16694              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16695     __ ins(as_FloatRegister($tmp$$reg), __ S,
16696            as_FloatRegister($src2$$reg), 0, 1);
16697     __ fmuls(as_FloatRegister($dst$$reg),
16698              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16699   %}
16700   ins_pipe(pipe_class_default);
16701 %}
16702 
16703 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16704 %{
16705   match(Set dst (MulReductionVF src1 src2));
16706   ins_cost(INSN_COST);
16707   effect(TEMP tmp, TEMP dst);
16708   format %{ "fmuls $dst, $src1, $src2\n\t"
16709             "ins   $tmp, S, $src2, 0, 1\n\t"
16710             "fmuls $dst, $dst, $tmp\n\t"
16711             "ins   $tmp, S, $src2, 0, 2\n\t"
16712             "fmuls $dst, $dst, $tmp\n\t"
16713             "ins   $tmp, S, $src2, 0, 3\n\t"
16714             "fmuls $dst, $dst, $tmp\t add reduction4f"
16715   %}
16716   ins_encode %{
16717     __ fmuls(as_FloatRegister($dst$$reg),
16718              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16719     __ ins(as_FloatRegister($tmp$$reg), __ S,
16720            as_FloatRegister($src2$$reg), 0, 1);
16721     __ fmuls(as_FloatRegister($dst$$reg),
16722              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16723     __ ins(as_FloatRegister($tmp$$reg), __ S,
16724            as_FloatRegister($src2$$reg), 0, 2);
16725     __ fmuls(as_FloatRegister($dst$$reg),
16726              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16727     __ ins(as_FloatRegister($tmp$$reg), __ S,
16728            as_FloatRegister($src2$$reg), 0, 3);
16729     __ fmuls(as_FloatRegister($dst$$reg),
16730              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16731   %}
16732   ins_pipe(pipe_class_default);
16733 %}
16734 
16735 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16736 %{
16737   match(Set dst (AddReductionVD src1 src2));
16738   ins_cost(INSN_COST);
16739   effect(TEMP tmp, TEMP dst);
16740   format %{ "faddd $dst, $src1, $src2\n\t"
16741             "ins   $tmp, D, $src2, 0, 1\n\t"
16742             "faddd $dst, $dst, $tmp\t add reduction2d"
16743   %}
16744   ins_encode %{
16745     __ faddd(as_FloatRegister($dst$$reg),
16746              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16747     __ ins(as_FloatRegister($tmp$$reg), __ D,
16748            as_FloatRegister($src2$$reg), 0, 1);
16749     __ faddd(as_FloatRegister($dst$$reg),
16750              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16751   %}
16752   ins_pipe(pipe_class_default);
16753 %}
16754 
16755 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16756 %{
16757   match(Set dst (MulReductionVD src1 src2));
16758   ins_cost(INSN_COST);
16759   effect(TEMP tmp, TEMP dst);
16760   format %{ "fmuld $dst, $src1, $src2\n\t"
16761             "ins   $tmp, D, $src2, 0, 1\n\t"
16762             "fmuld $dst, $dst, $tmp\t add reduction2d"
16763   %}
16764   ins_encode %{
16765     __ fmuld(as_FloatRegister($dst$$reg),
16766              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16767     __ ins(as_FloatRegister($tmp$$reg), __ D,
16768            as_FloatRegister($src2$$reg), 0, 1);
16769     __ fmuld(as_FloatRegister($dst$$reg),
16770              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16771   %}
16772   ins_pipe(pipe_class_default);
16773 %}
16774 
16775 // ====================VECTOR ARITHMETIC=======================================
16776 
16777 // --------------------------------- ADD --------------------------------------
16778 
16779 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16780 %{
16781   predicate(n->as_Vector()->length() == 4 ||
16782             n->as_Vector()->length() == 8);
16783   match(Set dst (AddVB src1 src2));
16784   ins_cost(INSN_COST);
16785   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16786   ins_encode %{
16787     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16788             as_FloatRegister($src1$$reg),
16789             as_FloatRegister($src2$$reg));
16790   %}
16791   ins_pipe(vdop64);
16792 %}
16793 
16794 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16795 %{
16796   predicate(n->as_Vector()->length() == 16);
16797   match(Set dst (AddVB src1 src2));
16798   ins_cost(INSN_COST);
16799   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16800   ins_encode %{
16801     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16802             as_FloatRegister($src1$$reg),
16803             as_FloatRegister($src2$$reg));
16804   %}
16805   ins_pipe(vdop128);
16806 %}
16807 
16808 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16809 %{
16810   predicate(n->as_Vector()->length() == 2 ||
16811             n->as_Vector()->length() == 4);
16812   match(Set dst (AddVS src1 src2));
16813   ins_cost(INSN_COST);
16814   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16815   ins_encode %{
16816     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16817             as_FloatRegister($src1$$reg),
16818             as_FloatRegister($src2$$reg));
16819   %}
16820   ins_pipe(vdop64);
16821 %}
16822 
16823 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16824 %{
16825   predicate(n->as_Vector()->length() == 8);
16826   match(Set dst (AddVS src1 src2));
16827   ins_cost(INSN_COST);
16828   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16829   ins_encode %{
16830     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16831             as_FloatRegister($src1$$reg),
16832             as_FloatRegister($src2$$reg));
16833   %}
16834   ins_pipe(vdop128);
16835 %}
16836 
16837 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16838 %{
16839   predicate(n->as_Vector()->length() == 2);
16840   match(Set dst (AddVI src1 src2));
16841   ins_cost(INSN_COST);
16842   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16843   ins_encode %{
16844     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16845             as_FloatRegister($src1$$reg),
16846             as_FloatRegister($src2$$reg));
16847   %}
16848   ins_pipe(vdop64);
16849 %}
16850 
16851 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16852 %{
16853   predicate(n->as_Vector()->length() == 4);
16854   match(Set dst (AddVI src1 src2));
16855   ins_cost(INSN_COST);
16856   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16857   ins_encode %{
16858     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16859             as_FloatRegister($src1$$reg),
16860             as_FloatRegister($src2$$reg));
16861   %}
16862   ins_pipe(vdop128);
16863 %}
16864 
16865 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16866 %{
16867   predicate(n->as_Vector()->length() == 2);
16868   match(Set dst (AddVL src1 src2));
16869   ins_cost(INSN_COST);
16870   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16871   ins_encode %{
16872     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16873             as_FloatRegister($src1$$reg),
16874             as_FloatRegister($src2$$reg));
16875   %}
16876   ins_pipe(vdop128);
16877 %}
16878 
16879 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16880 %{
16881   predicate(n->as_Vector()->length() == 2);
16882   match(Set dst (AddVF src1 src2));
16883   ins_cost(INSN_COST);
16884   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16885   ins_encode %{
16886     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16887             as_FloatRegister($src1$$reg),
16888             as_FloatRegister($src2$$reg));
16889   %}
16890   ins_pipe(vdop_fp64);
16891 %}
16892 
16893 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16894 %{
16895   predicate(n->as_Vector()->length() == 4);
16896   match(Set dst (AddVF src1 src2));
16897   ins_cost(INSN_COST);
16898   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16899   ins_encode %{
16900     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16901             as_FloatRegister($src1$$reg),
16902             as_FloatRegister($src2$$reg));
16903   %}
16904   ins_pipe(vdop_fp128);
16905 %}
16906 
16907 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16908 %{
16909   match(Set dst (AddVD src1 src2));
16910   ins_cost(INSN_COST);
16911   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16912   ins_encode %{
16913     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16914             as_FloatRegister($src1$$reg),
16915             as_FloatRegister($src2$$reg));
16916   %}
16917   ins_pipe(vdop_fp128);
16918 %}
16919 
16920 // --------------------------------- SUB --------------------------------------
16921 
16922 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16923 %{
16924   predicate(n->as_Vector()->length() == 4 ||
16925             n->as_Vector()->length() == 8);
16926   match(Set dst (SubVB src1 src2));
16927   ins_cost(INSN_COST);
16928   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16929   ins_encode %{
16930     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16931             as_FloatRegister($src1$$reg),
16932             as_FloatRegister($src2$$reg));
16933   %}
16934   ins_pipe(vdop64);
16935 %}
16936 
16937 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16938 %{
16939   predicate(n->as_Vector()->length() == 16);
16940   match(Set dst (SubVB src1 src2));
16941   ins_cost(INSN_COST);
16942   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16943   ins_encode %{
16944     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16945             as_FloatRegister($src1$$reg),
16946             as_FloatRegister($src2$$reg));
16947   %}
16948   ins_pipe(vdop128);
16949 %}
16950 
16951 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16952 %{
16953   predicate(n->as_Vector()->length() == 2 ||
16954             n->as_Vector()->length() == 4);
16955   match(Set dst (SubVS src1 src2));
16956   ins_cost(INSN_COST);
16957   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16958   ins_encode %{
16959     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16960             as_FloatRegister($src1$$reg),
16961             as_FloatRegister($src2$$reg));
16962   %}
16963   ins_pipe(vdop64);
16964 %}
16965 
16966 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16967 %{
16968   predicate(n->as_Vector()->length() == 8);
16969   match(Set dst (SubVS src1 src2));
16970   ins_cost(INSN_COST);
16971   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16972   ins_encode %{
16973     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16974             as_FloatRegister($src1$$reg),
16975             as_FloatRegister($src2$$reg));
16976   %}
16977   ins_pipe(vdop128);
16978 %}
16979 
16980 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16981 %{
16982   predicate(n->as_Vector()->length() == 2);
16983   match(Set dst (SubVI src1 src2));
16984   ins_cost(INSN_COST);
16985   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16986   ins_encode %{
16987     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16988             as_FloatRegister($src1$$reg),
16989             as_FloatRegister($src2$$reg));
16990   %}
16991   ins_pipe(vdop64);
16992 %}
16993 
16994 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16995 %{
16996   predicate(n->as_Vector()->length() == 4);
16997   match(Set dst (SubVI src1 src2));
16998   ins_cost(INSN_COST);
16999   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17000   ins_encode %{
17001     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17002             as_FloatRegister($src1$$reg),
17003             as_FloatRegister($src2$$reg));
17004   %}
17005   ins_pipe(vdop128);
17006 %}
17007 
17008 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17009 %{
17010   predicate(n->as_Vector()->length() == 2);
17011   match(Set dst (SubVL src1 src2));
17012   ins_cost(INSN_COST);
17013   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17014   ins_encode %{
17015     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17016             as_FloatRegister($src1$$reg),
17017             as_FloatRegister($src2$$reg));
17018   %}
17019   ins_pipe(vdop128);
17020 %}
17021 
17022 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17023 %{
17024   predicate(n->as_Vector()->length() == 2);
17025   match(Set dst (SubVF src1 src2));
17026   ins_cost(INSN_COST);
17027   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17028   ins_encode %{
17029     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17030             as_FloatRegister($src1$$reg),
17031             as_FloatRegister($src2$$reg));
17032   %}
17033   ins_pipe(vdop_fp64);
17034 %}
17035 
17036 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17037 %{
17038   predicate(n->as_Vector()->length() == 4);
17039   match(Set dst (SubVF src1 src2));
17040   ins_cost(INSN_COST);
17041   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17042   ins_encode %{
17043     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17044             as_FloatRegister($src1$$reg),
17045             as_FloatRegister($src2$$reg));
17046   %}
17047   ins_pipe(vdop_fp128);
17048 %}
17049 
17050 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17051 %{
17052   predicate(n->as_Vector()->length() == 2);
17053   match(Set dst (SubVD src1 src2));
17054   ins_cost(INSN_COST);
17055   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17056   ins_encode %{
17057     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17058             as_FloatRegister($src1$$reg),
17059             as_FloatRegister($src2$$reg));
17060   %}
17061   ins_pipe(vdop_fp128);
17062 %}
17063 
17064 // --------------------------------- MUL --------------------------------------
17065 
17066 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17067 %{
17068   predicate(n->as_Vector()->length() == 2 ||
17069             n->as_Vector()->length() == 4);
17070   match(Set dst (MulVS src1 src2));
17071   ins_cost(INSN_COST);
17072   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17073   ins_encode %{
17074     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17075             as_FloatRegister($src1$$reg),
17076             as_FloatRegister($src2$$reg));
17077   %}
17078   ins_pipe(vmul64);
17079 %}
17080 
17081 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17082 %{
17083   predicate(n->as_Vector()->length() == 8);
17084   match(Set dst (MulVS src1 src2));
17085   ins_cost(INSN_COST);
17086   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17087   ins_encode %{
17088     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17089             as_FloatRegister($src1$$reg),
17090             as_FloatRegister($src2$$reg));
17091   %}
17092   ins_pipe(vmul128);
17093 %}
17094 
17095 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17096 %{
17097   predicate(n->as_Vector()->length() == 2);
17098   match(Set dst (MulVI src1 src2));
17099   ins_cost(INSN_COST);
17100   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17101   ins_encode %{
17102     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17103             as_FloatRegister($src1$$reg),
17104             as_FloatRegister($src2$$reg));
17105   %}
17106   ins_pipe(vmul64);
17107 %}
17108 
17109 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17110 %{
17111   predicate(n->as_Vector()->length() == 4);
17112   match(Set dst (MulVI src1 src2));
17113   ins_cost(INSN_COST);
17114   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17115   ins_encode %{
17116     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17117             as_FloatRegister($src1$$reg),
17118             as_FloatRegister($src2$$reg));
17119   %}
17120   ins_pipe(vmul128);
17121 %}
17122 
17123 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17124 %{
17125   predicate(n->as_Vector()->length() == 2);
17126   match(Set dst (MulVF src1 src2));
17127   ins_cost(INSN_COST);
17128   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17129   ins_encode %{
17130     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17131             as_FloatRegister($src1$$reg),
17132             as_FloatRegister($src2$$reg));
17133   %}
17134   ins_pipe(vmuldiv_fp64);
17135 %}
17136 
17137 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17138 %{
17139   predicate(n->as_Vector()->length() == 4);
17140   match(Set dst (MulVF src1 src2));
17141   ins_cost(INSN_COST);
17142   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17143   ins_encode %{
17144     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17145             as_FloatRegister($src1$$reg),
17146             as_FloatRegister($src2$$reg));
17147   %}
17148   ins_pipe(vmuldiv_fp128);
17149 %}
17150 
17151 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17152 %{
17153   predicate(n->as_Vector()->length() == 2);
17154   match(Set dst (MulVD src1 src2));
17155   ins_cost(INSN_COST);
17156   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17157   ins_encode %{
17158     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17159             as_FloatRegister($src1$$reg),
17160             as_FloatRegister($src2$$reg));
17161   %}
17162   ins_pipe(vmuldiv_fp128);
17163 %}
17164 
17165 // --------------------------------- MLA --------------------------------------
17166 
17167 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17168 %{
17169   predicate(n->as_Vector()->length() == 2 ||
17170             n->as_Vector()->length() == 4);
17171   match(Set dst (AddVS dst (MulVS src1 src2)));
17172   ins_cost(INSN_COST);
17173   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17174   ins_encode %{
17175     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17176             as_FloatRegister($src1$$reg),
17177             as_FloatRegister($src2$$reg));
17178   %}
17179   ins_pipe(vmla64);
17180 %}
17181 
17182 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17183 %{
17184   predicate(n->as_Vector()->length() == 8);
17185   match(Set dst (AddVS dst (MulVS src1 src2)));
17186   ins_cost(INSN_COST);
17187   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17188   ins_encode %{
17189     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17190             as_FloatRegister($src1$$reg),
17191             as_FloatRegister($src2$$reg));
17192   %}
17193   ins_pipe(vmla128);
17194 %}
17195 
17196 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17197 %{
17198   predicate(n->as_Vector()->length() == 2);
17199   match(Set dst (AddVI dst (MulVI src1 src2)));
17200   ins_cost(INSN_COST);
17201   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17202   ins_encode %{
17203     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17204             as_FloatRegister($src1$$reg),
17205             as_FloatRegister($src2$$reg));
17206   %}
17207   ins_pipe(vmla64);
17208 %}
17209 
17210 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17211 %{
17212   predicate(n->as_Vector()->length() == 4);
17213   match(Set dst (AddVI dst (MulVI src1 src2)));
17214   ins_cost(INSN_COST);
17215   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17216   ins_encode %{
17217     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17218             as_FloatRegister($src1$$reg),
17219             as_FloatRegister($src2$$reg));
17220   %}
17221   ins_pipe(vmla128);
17222 %}
17223 
17224 // dst + src1 * src2
17225 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17226   predicate(UseFMA && n->as_Vector()->length() == 2);
17227   match(Set dst (FmaVF  dst (Binary src1 src2)));
17228   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17229   ins_cost(INSN_COST);
17230   ins_encode %{
17231     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17232             as_FloatRegister($src1$$reg),
17233             as_FloatRegister($src2$$reg));
17234   %}
17235   ins_pipe(vmuldiv_fp64);
17236 %}
17237 
17238 // dst + src1 * src2
17239 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17240   predicate(UseFMA && n->as_Vector()->length() == 4);
17241   match(Set dst (FmaVF  dst (Binary src1 src2)));
17242   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17243   ins_cost(INSN_COST);
17244   ins_encode %{
17245     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17246             as_FloatRegister($src1$$reg),
17247             as_FloatRegister($src2$$reg));
17248   %}
17249   ins_pipe(vmuldiv_fp128);
17250 %}
17251 
17252 // dst + src1 * src2
17253 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17254   predicate(UseFMA && n->as_Vector()->length() == 2);
17255   match(Set dst (FmaVD  dst (Binary src1 src2)));
17256   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17257   ins_cost(INSN_COST);
17258   ins_encode %{
17259     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17260             as_FloatRegister($src1$$reg),
17261             as_FloatRegister($src2$$reg));
17262   %}
17263   ins_pipe(vmuldiv_fp128);
17264 %}
17265 
17266 // --------------------------------- MLS --------------------------------------
17267 
17268 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17269 %{
17270   predicate(n->as_Vector()->length() == 2 ||
17271             n->as_Vector()->length() == 4);
17272   match(Set dst (SubVS dst (MulVS src1 src2)));
17273   ins_cost(INSN_COST);
17274   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17275   ins_encode %{
17276     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17277             as_FloatRegister($src1$$reg),
17278             as_FloatRegister($src2$$reg));
17279   %}
17280   ins_pipe(vmla64);
17281 %}
17282 
17283 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17284 %{
17285   predicate(n->as_Vector()->length() == 8);
17286   match(Set dst (SubVS dst (MulVS src1 src2)));
17287   ins_cost(INSN_COST);
17288   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17289   ins_encode %{
17290     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17291             as_FloatRegister($src1$$reg),
17292             as_FloatRegister($src2$$reg));
17293   %}
17294   ins_pipe(vmla128);
17295 %}
17296 
17297 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17298 %{
17299   predicate(n->as_Vector()->length() == 2);
17300   match(Set dst (SubVI dst (MulVI src1 src2)));
17301   ins_cost(INSN_COST);
17302   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17303   ins_encode %{
17304     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17305             as_FloatRegister($src1$$reg),
17306             as_FloatRegister($src2$$reg));
17307   %}
17308   ins_pipe(vmla64);
17309 %}
17310 
17311 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17312 %{
17313   predicate(n->as_Vector()->length() == 4);
17314   match(Set dst (SubVI dst (MulVI src1 src2)));
17315   ins_cost(INSN_COST);
17316   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17317   ins_encode %{
17318     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17319             as_FloatRegister($src1$$reg),
17320             as_FloatRegister($src2$$reg));
17321   %}
17322   ins_pipe(vmla128);
17323 %}
17324 
17325 // dst - src1 * src2
17326 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17327   predicate(UseFMA && n->as_Vector()->length() == 2);
17328   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17329   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17330   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17331   ins_cost(INSN_COST);
17332   ins_encode %{
17333     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17334             as_FloatRegister($src1$$reg),
17335             as_FloatRegister($src2$$reg));
17336   %}
17337   ins_pipe(vmuldiv_fp64);
17338 %}
17339 
17340 // dst - src1 * src2
17341 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17342   predicate(UseFMA && n->as_Vector()->length() == 4);
17343   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17344   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17345   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17346   ins_cost(INSN_COST);
17347   ins_encode %{
17348     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17349             as_FloatRegister($src1$$reg),
17350             as_FloatRegister($src2$$reg));
17351   %}
17352   ins_pipe(vmuldiv_fp128);
17353 %}
17354 
17355 // dst - src1 * src2
17356 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17357   predicate(UseFMA && n->as_Vector()->length() == 2);
17358   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17359   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17360   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17361   ins_cost(INSN_COST);
17362   ins_encode %{
17363     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17364             as_FloatRegister($src1$$reg),
17365             as_FloatRegister($src2$$reg));
17366   %}
17367   ins_pipe(vmuldiv_fp128);
17368 %}
17369 
17370 // --------------------------------- DIV --------------------------------------
17371 
17372 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17373 %{
17374   predicate(n->as_Vector()->length() == 2);
17375   match(Set dst (DivVF src1 src2));
17376   ins_cost(INSN_COST);
17377   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17378   ins_encode %{
17379     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17380             as_FloatRegister($src1$$reg),
17381             as_FloatRegister($src2$$reg));
17382   %}
17383   ins_pipe(vmuldiv_fp64);
17384 %}
17385 
17386 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17387 %{
17388   predicate(n->as_Vector()->length() == 4);
17389   match(Set dst (DivVF src1 src2));
17390   ins_cost(INSN_COST);
17391   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17392   ins_encode %{
17393     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17394             as_FloatRegister($src1$$reg),
17395             as_FloatRegister($src2$$reg));
17396   %}
17397   ins_pipe(vmuldiv_fp128);
17398 %}
17399 
17400 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17401 %{
17402   predicate(n->as_Vector()->length() == 2);
17403   match(Set dst (DivVD src1 src2));
17404   ins_cost(INSN_COST);
17405   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17406   ins_encode %{
17407     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17408             as_FloatRegister($src1$$reg),
17409             as_FloatRegister($src2$$reg));
17410   %}
17411   ins_pipe(vmuldiv_fp128);
17412 %}
17413 
17414 // --------------------------------- SQRT -------------------------------------
17415 
17416 instruct vsqrt2D(vecX dst, vecX src)
17417 %{
17418   predicate(n->as_Vector()->length() == 2);
17419   match(Set dst (SqrtVD src));
17420   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17421   ins_encode %{
17422     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17423              as_FloatRegister($src$$reg));
17424   %}
17425   ins_pipe(vsqrt_fp128);
17426 %}
17427 
17428 // --------------------------------- ABS --------------------------------------
17429 
17430 instruct vabs2F(vecD dst, vecD src)
17431 %{
17432   predicate(n->as_Vector()->length() == 2);
17433   match(Set dst (AbsVF src));
17434   ins_cost(INSN_COST * 3);
17435   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17436   ins_encode %{
17437     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17438             as_FloatRegister($src$$reg));
17439   %}
17440   ins_pipe(vunop_fp64);
17441 %}
17442 
17443 instruct vabs4F(vecX dst, vecX src)
17444 %{
17445   predicate(n->as_Vector()->length() == 4);
17446   match(Set dst (AbsVF src));
17447   ins_cost(INSN_COST * 3);
17448   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17449   ins_encode %{
17450     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17451             as_FloatRegister($src$$reg));
17452   %}
17453   ins_pipe(vunop_fp128);
17454 %}
17455 
17456 instruct vabs2D(vecX dst, vecX src)
17457 %{
17458   predicate(n->as_Vector()->length() == 2);
17459   match(Set dst (AbsVD src));
17460   ins_cost(INSN_COST * 3);
17461   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17462   ins_encode %{
17463     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17464             as_FloatRegister($src$$reg));
17465   %}
17466   ins_pipe(vunop_fp128);
17467 %}
17468 
17469 // --------------------------------- NEG --------------------------------------
17470 
17471 instruct vneg2F(vecD dst, vecD src)
17472 %{
17473   predicate(n->as_Vector()->length() == 2);
17474   match(Set dst (NegVF src));
17475   ins_cost(INSN_COST * 3);
17476   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17477   ins_encode %{
17478     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17479             as_FloatRegister($src$$reg));
17480   %}
17481   ins_pipe(vunop_fp64);
17482 %}
17483 
17484 instruct vneg4F(vecX dst, vecX src)
17485 %{
17486   predicate(n->as_Vector()->length() == 4);
17487   match(Set dst (NegVF src));
17488   ins_cost(INSN_COST * 3);
17489   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17490   ins_encode %{
17491     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17492             as_FloatRegister($src$$reg));
17493   %}
17494   ins_pipe(vunop_fp128);
17495 %}
17496 
17497 instruct vneg2D(vecX dst, vecX src)
17498 %{
17499   predicate(n->as_Vector()->length() == 2);
17500   match(Set dst (NegVD src));
17501   ins_cost(INSN_COST * 3);
17502   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17503   ins_encode %{
17504     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17505             as_FloatRegister($src$$reg));
17506   %}
17507   ins_pipe(vunop_fp128);
17508 %}
17509 
17510 // --------------------------------- AND --------------------------------------
17511 
17512 instruct vand8B(vecD dst, vecD src1, vecD src2)
17513 %{
17514   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17515             n->as_Vector()->length_in_bytes() == 8);
17516   match(Set dst (AndV src1 src2));
17517   ins_cost(INSN_COST);
17518   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17519   ins_encode %{
17520     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17521             as_FloatRegister($src1$$reg),
17522             as_FloatRegister($src2$$reg));
17523   %}
17524   ins_pipe(vlogical64);
17525 %}
17526 
17527 instruct vand16B(vecX dst, vecX src1, vecX src2)
17528 %{
17529   predicate(n->as_Vector()->length_in_bytes() == 16);
17530   match(Set dst (AndV src1 src2));
17531   ins_cost(INSN_COST);
17532   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17533   ins_encode %{
17534     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17535             as_FloatRegister($src1$$reg),
17536             as_FloatRegister($src2$$reg));
17537   %}
17538   ins_pipe(vlogical128);
17539 %}
17540 
17541 // --------------------------------- OR ---------------------------------------
17542 
17543 instruct vor8B(vecD dst, vecD src1, vecD src2)
17544 %{
17545   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17546             n->as_Vector()->length_in_bytes() == 8);
17547   match(Set dst (OrV src1 src2));
17548   ins_cost(INSN_COST);
17549   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17550   ins_encode %{
17551     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17552             as_FloatRegister($src1$$reg),
17553             as_FloatRegister($src2$$reg));
17554   %}
17555   ins_pipe(vlogical64);
17556 %}
17557 
17558 instruct vor16B(vecX dst, vecX src1, vecX src2)
17559 %{
17560   predicate(n->as_Vector()->length_in_bytes() == 16);
17561   match(Set dst (OrV src1 src2));
17562   ins_cost(INSN_COST);
17563   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17564   ins_encode %{
17565     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17566             as_FloatRegister($src1$$reg),
17567             as_FloatRegister($src2$$reg));
17568   %}
17569   ins_pipe(vlogical128);
17570 %}
17571 
17572 // --------------------------------- XOR --------------------------------------
17573 
17574 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17575 %{
17576   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17577             n->as_Vector()->length_in_bytes() == 8);
17578   match(Set dst (XorV src1 src2));
17579   ins_cost(INSN_COST);
17580   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17581   ins_encode %{
17582     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17583             as_FloatRegister($src1$$reg),
17584             as_FloatRegister($src2$$reg));
17585   %}
17586   ins_pipe(vlogical64);
17587 %}
17588 
17589 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17590 %{
17591   predicate(n->as_Vector()->length_in_bytes() == 16);
17592   match(Set dst (XorV src1 src2));
17593   ins_cost(INSN_COST);
17594   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17595   ins_encode %{
17596     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17597             as_FloatRegister($src1$$reg),
17598             as_FloatRegister($src2$$reg));
17599   %}
17600   ins_pipe(vlogical128);
17601 %}
17602 
17603 // ------------------------------ Shift ---------------------------------------
17604 
17605 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17606   match(Set dst (LShiftCntV cnt));
17607   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17608   ins_encode %{
17609     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17610   %}
17611   ins_pipe(vdup_reg_reg128);
17612 %}
17613 
17614 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17615 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17616   match(Set dst (RShiftCntV cnt));
17617   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17618   ins_encode %{
17619     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17620     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17621   %}
17622   ins_pipe(vdup_reg_reg128);
17623 %}
17624 
17625 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17626   predicate(n->as_Vector()->length() == 4 ||
17627             n->as_Vector()->length() == 8);
17628   match(Set dst (LShiftVB src shift));
17629   match(Set dst (RShiftVB src shift));
17630   ins_cost(INSN_COST);
17631   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17632   ins_encode %{
17633     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17634             as_FloatRegister($src$$reg),
17635             as_FloatRegister($shift$$reg));
17636   %}
17637   ins_pipe(vshift64);
17638 %}
17639 
17640 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17641   predicate(n->as_Vector()->length() == 16);
17642   match(Set dst (LShiftVB src shift));
17643   match(Set dst (RShiftVB src shift));
17644   ins_cost(INSN_COST);
17645   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17646   ins_encode %{
17647     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17648             as_FloatRegister($src$$reg),
17649             as_FloatRegister($shift$$reg));
17650   %}
17651   ins_pipe(vshift128);
17652 %}
17653 
17654 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17655   predicate(n->as_Vector()->length() == 4 ||
17656             n->as_Vector()->length() == 8);
17657   match(Set dst (URShiftVB src shift));
17658   ins_cost(INSN_COST);
17659   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17660   ins_encode %{
17661     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17662             as_FloatRegister($src$$reg),
17663             as_FloatRegister($shift$$reg));
17664   %}
17665   ins_pipe(vshift64);
17666 %}
17667 
17668 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17669   predicate(n->as_Vector()->length() == 16);
17670   match(Set dst (URShiftVB src shift));
17671   ins_cost(INSN_COST);
17672   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17673   ins_encode %{
17674     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17675             as_FloatRegister($src$$reg),
17676             as_FloatRegister($shift$$reg));
17677   %}
17678   ins_pipe(vshift128);
17679 %}
17680 
17681 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17682   predicate(n->as_Vector()->length() == 4 ||
17683             n->as_Vector()->length() == 8);
17684   match(Set dst (LShiftVB src shift));
17685   ins_cost(INSN_COST);
17686   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17687   ins_encode %{
17688     int sh = (int)$shift$$constant;
17689     if (sh >= 8) {
17690       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17691              as_FloatRegister($src$$reg),
17692              as_FloatRegister($src$$reg));
17693     } else {
17694       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17695              as_FloatRegister($src$$reg), sh);
17696     }
17697   %}
17698   ins_pipe(vshift64_imm);
17699 %}
17700 
17701 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17702   predicate(n->as_Vector()->length() == 16);
17703   match(Set dst (LShiftVB src shift));
17704   ins_cost(INSN_COST);
17705   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17706   ins_encode %{
17707     int sh = (int)$shift$$constant;
17708     if (sh >= 8) {
17709       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17710              as_FloatRegister($src$$reg),
17711              as_FloatRegister($src$$reg));
17712     } else {
17713       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17714              as_FloatRegister($src$$reg), sh);
17715     }
17716   %}
17717   ins_pipe(vshift128_imm);
17718 %}
17719 
17720 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17721   predicate(n->as_Vector()->length() == 4 ||
17722             n->as_Vector()->length() == 8);
17723   match(Set dst (RShiftVB src shift));
17724   ins_cost(INSN_COST);
17725   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17726   ins_encode %{
17727     int sh = (int)$shift$$constant;
17728     if (sh >= 8) sh = 7;
17729     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17730            as_FloatRegister($src$$reg), sh);
17731   %}
17732   ins_pipe(vshift64_imm);
17733 %}
17734 
17735 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17736   predicate(n->as_Vector()->length() == 16);
17737   match(Set dst (RShiftVB src shift));
17738   ins_cost(INSN_COST);
17739   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17740   ins_encode %{
17741     int sh = (int)$shift$$constant;
17742     if (sh >= 8) sh = 7;
17743     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17744            as_FloatRegister($src$$reg), sh);
17745   %}
17746   ins_pipe(vshift128_imm);
17747 %}
17748 
17749 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17750   predicate(n->as_Vector()->length() == 4 ||
17751             n->as_Vector()->length() == 8);
17752   match(Set dst (URShiftVB src shift));
17753   ins_cost(INSN_COST);
17754   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17755   ins_encode %{
17756     int sh = (int)$shift$$constant;
17757     if (sh >= 8) {
17758       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17759              as_FloatRegister($src$$reg),
17760              as_FloatRegister($src$$reg));
17761     } else {
17762       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17763              as_FloatRegister($src$$reg), sh);
17764     }
17765   %}
17766   ins_pipe(vshift64_imm);
17767 %}
17768 
17769 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17770   predicate(n->as_Vector()->length() == 16);
17771   match(Set dst (URShiftVB src shift));
17772   ins_cost(INSN_COST);
17773   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17774   ins_encode %{
17775     int sh = (int)$shift$$constant;
17776     if (sh >= 8) {
17777       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17778              as_FloatRegister($src$$reg),
17779              as_FloatRegister($src$$reg));
17780     } else {
17781       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17782              as_FloatRegister($src$$reg), sh);
17783     }
17784   %}
17785   ins_pipe(vshift128_imm);
17786 %}
17787 
17788 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17789   predicate(n->as_Vector()->length() == 2 ||
17790             n->as_Vector()->length() == 4);
17791   match(Set dst (LShiftVS src shift));
17792   match(Set dst (RShiftVS src shift));
17793   ins_cost(INSN_COST);
17794   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17795   ins_encode %{
17796     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17797             as_FloatRegister($src$$reg),
17798             as_FloatRegister($shift$$reg));
17799   %}
17800   ins_pipe(vshift64);
17801 %}
17802 
17803 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17804   predicate(n->as_Vector()->length() == 8);
17805   match(Set dst (LShiftVS src shift));
17806   match(Set dst (RShiftVS src shift));
17807   ins_cost(INSN_COST);
17808   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17809   ins_encode %{
17810     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17811             as_FloatRegister($src$$reg),
17812             as_FloatRegister($shift$$reg));
17813   %}
17814   ins_pipe(vshift128);
17815 %}
17816 
17817 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17818   predicate(n->as_Vector()->length() == 2 ||
17819             n->as_Vector()->length() == 4);
17820   match(Set dst (URShiftVS src shift));
17821   ins_cost(INSN_COST);
17822   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17823   ins_encode %{
17824     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17825             as_FloatRegister($src$$reg),
17826             as_FloatRegister($shift$$reg));
17827   %}
17828   ins_pipe(vshift64);
17829 %}
17830 
17831 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17832   predicate(n->as_Vector()->length() == 8);
17833   match(Set dst (URShiftVS src shift));
17834   ins_cost(INSN_COST);
17835   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17836   ins_encode %{
17837     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17838             as_FloatRegister($src$$reg),
17839             as_FloatRegister($shift$$reg));
17840   %}
17841   ins_pipe(vshift128);
17842 %}
17843 
17844 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17845   predicate(n->as_Vector()->length() == 2 ||
17846             n->as_Vector()->length() == 4);
17847   match(Set dst (LShiftVS src shift));
17848   ins_cost(INSN_COST);
17849   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17850   ins_encode %{
17851     int sh = (int)$shift$$constant;
17852     if (sh >= 16) {
17853       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17854              as_FloatRegister($src$$reg),
17855              as_FloatRegister($src$$reg));
17856     } else {
17857       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17858              as_FloatRegister($src$$reg), sh);
17859     }
17860   %}
17861   ins_pipe(vshift64_imm);
17862 %}
17863 
17864 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17865   predicate(n->as_Vector()->length() == 8);
17866   match(Set dst (LShiftVS src shift));
17867   ins_cost(INSN_COST);
17868   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17869   ins_encode %{
17870     int sh = (int)$shift$$constant;
17871     if (sh >= 16) {
17872       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17873              as_FloatRegister($src$$reg),
17874              as_FloatRegister($src$$reg));
17875     } else {
17876       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17877              as_FloatRegister($src$$reg), sh);
17878     }
17879   %}
17880   ins_pipe(vshift128_imm);
17881 %}
17882 
17883 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17884   predicate(n->as_Vector()->length() == 2 ||
17885             n->as_Vector()->length() == 4);
17886   match(Set dst (RShiftVS src shift));
17887   ins_cost(INSN_COST);
17888   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17889   ins_encode %{
17890     int sh = (int)$shift$$constant;
17891     if (sh >= 16) sh = 15;
17892     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17893            as_FloatRegister($src$$reg), sh);
17894   %}
17895   ins_pipe(vshift64_imm);
17896 %}
17897 
17898 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17899   predicate(n->as_Vector()->length() == 8);
17900   match(Set dst (RShiftVS src shift));
17901   ins_cost(INSN_COST);
17902   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17903   ins_encode %{
17904     int sh = (int)$shift$$constant;
17905     if (sh >= 16) sh = 15;
17906     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17907            as_FloatRegister($src$$reg), sh);
17908   %}
17909   ins_pipe(vshift128_imm);
17910 %}
17911 
17912 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17913   predicate(n->as_Vector()->length() == 2 ||
17914             n->as_Vector()->length() == 4);
17915   match(Set dst (URShiftVS src shift));
17916   ins_cost(INSN_COST);
17917   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17918   ins_encode %{
17919     int sh = (int)$shift$$constant;
17920     if (sh >= 16) {
17921       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17922              as_FloatRegister($src$$reg),
17923              as_FloatRegister($src$$reg));
17924     } else {
17925       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17926              as_FloatRegister($src$$reg), sh);
17927     }
17928   %}
17929   ins_pipe(vshift64_imm);
17930 %}
17931 
17932 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17933   predicate(n->as_Vector()->length() == 8);
17934   match(Set dst (URShiftVS src shift));
17935   ins_cost(INSN_COST);
17936   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17937   ins_encode %{
17938     int sh = (int)$shift$$constant;
17939     if (sh >= 16) {
17940       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17941              as_FloatRegister($src$$reg),
17942              as_FloatRegister($src$$reg));
17943     } else {
17944       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17945              as_FloatRegister($src$$reg), sh);
17946     }
17947   %}
17948   ins_pipe(vshift128_imm);
17949 %}
17950 
17951 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17952   predicate(n->as_Vector()->length() == 2);
17953   match(Set dst (LShiftVI src shift));
17954   match(Set dst (RShiftVI src shift));
17955   ins_cost(INSN_COST);
17956   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17957   ins_encode %{
17958     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17959             as_FloatRegister($src$$reg),
17960             as_FloatRegister($shift$$reg));
17961   %}
17962   ins_pipe(vshift64);
17963 %}
17964 
17965 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17966   predicate(n->as_Vector()->length() == 4);
17967   match(Set dst (LShiftVI src shift));
17968   match(Set dst (RShiftVI src shift));
17969   ins_cost(INSN_COST);
17970   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17971   ins_encode %{
17972     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17973             as_FloatRegister($src$$reg),
17974             as_FloatRegister($shift$$reg));
17975   %}
17976   ins_pipe(vshift128);
17977 %}
17978 
17979 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17980   predicate(n->as_Vector()->length() == 2);
17981   match(Set dst (URShiftVI src shift));
17982   ins_cost(INSN_COST);
17983   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17984   ins_encode %{
17985     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17986             as_FloatRegister($src$$reg),
17987             as_FloatRegister($shift$$reg));
17988   %}
17989   ins_pipe(vshift64);
17990 %}
17991 
17992 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17993   predicate(n->as_Vector()->length() == 4);
17994   match(Set dst (URShiftVI src shift));
17995   ins_cost(INSN_COST);
17996   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17997   ins_encode %{
17998     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17999             as_FloatRegister($src$$reg),
18000             as_FloatRegister($shift$$reg));
18001   %}
18002   ins_pipe(vshift128);
18003 %}
18004 
18005 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18006   predicate(n->as_Vector()->length() == 2);
18007   match(Set dst (LShiftVI src shift));
18008   ins_cost(INSN_COST);
18009   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18010   ins_encode %{
18011     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18012            as_FloatRegister($src$$reg),
18013            (int)$shift$$constant);
18014   %}
18015   ins_pipe(vshift64_imm);
18016 %}
18017 
18018 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18019   predicate(n->as_Vector()->length() == 4);
18020   match(Set dst (LShiftVI src shift));
18021   ins_cost(INSN_COST);
18022   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18023   ins_encode %{
18024     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18025            as_FloatRegister($src$$reg),
18026            (int)$shift$$constant);
18027   %}
18028   ins_pipe(vshift128_imm);
18029 %}
18030 
18031 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18032   predicate(n->as_Vector()->length() == 2);
18033   match(Set dst (RShiftVI src shift));
18034   ins_cost(INSN_COST);
18035   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18036   ins_encode %{
18037     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18038             as_FloatRegister($src$$reg),
18039             (int)$shift$$constant);
18040   %}
18041   ins_pipe(vshift64_imm);
18042 %}
18043 
18044 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18045   predicate(n->as_Vector()->length() == 4);
18046   match(Set dst (RShiftVI src shift));
18047   ins_cost(INSN_COST);
18048   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18049   ins_encode %{
18050     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18051             as_FloatRegister($src$$reg),
18052             (int)$shift$$constant);
18053   %}
18054   ins_pipe(vshift128_imm);
18055 %}
18056 
18057 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18058   predicate(n->as_Vector()->length() == 2);
18059   match(Set dst (URShiftVI src shift));
18060   ins_cost(INSN_COST);
18061   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18062   ins_encode %{
18063     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18064             as_FloatRegister($src$$reg),
18065             (int)$shift$$constant);
18066   %}
18067   ins_pipe(vshift64_imm);
18068 %}
18069 
18070 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18071   predicate(n->as_Vector()->length() == 4);
18072   match(Set dst (URShiftVI src shift));
18073   ins_cost(INSN_COST);
18074   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18075   ins_encode %{
18076     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18077             as_FloatRegister($src$$reg),
18078             (int)$shift$$constant);
18079   %}
18080   ins_pipe(vshift128_imm);
18081 %}
18082 
18083 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18084   predicate(n->as_Vector()->length() == 2);
18085   match(Set dst (LShiftVL src shift));
18086   match(Set dst (RShiftVL src shift));
18087   ins_cost(INSN_COST);
18088   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18089   ins_encode %{
18090     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18091             as_FloatRegister($src$$reg),
18092             as_FloatRegister($shift$$reg));
18093   %}
18094   ins_pipe(vshift128);
18095 %}
18096 
18097 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18098   predicate(n->as_Vector()->length() == 2);
18099   match(Set dst (URShiftVL src shift));
18100   ins_cost(INSN_COST);
18101   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18102   ins_encode %{
18103     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18104             as_FloatRegister($src$$reg),
18105             as_FloatRegister($shift$$reg));
18106   %}
18107   ins_pipe(vshift128);
18108 %}
18109 
18110 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18111   predicate(n->as_Vector()->length() == 2);
18112   match(Set dst (LShiftVL src shift));
18113   ins_cost(INSN_COST);
18114   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18115   ins_encode %{
18116     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18117            as_FloatRegister($src$$reg),
18118            (int)$shift$$constant);
18119   %}
18120   ins_pipe(vshift128_imm);
18121 %}
18122 
18123 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18124   predicate(n->as_Vector()->length() == 2);
18125   match(Set dst (RShiftVL src shift));
18126   ins_cost(INSN_COST);
18127   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18128   ins_encode %{
18129     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18130             as_FloatRegister($src$$reg),
18131             (int)$shift$$constant);
18132   %}
18133   ins_pipe(vshift128_imm);
18134 %}
18135 
18136 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18137   predicate(n->as_Vector()->length() == 2);
18138   match(Set dst (URShiftVL src shift));
18139   ins_cost(INSN_COST);
18140   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18141   ins_encode %{
18142     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18143             as_FloatRegister($src$$reg),
18144             (int)$shift$$constant);
18145   %}
18146   ins_pipe(vshift128_imm);
18147 %}
18148 
18149 //----------PEEPHOLE RULES-----------------------------------------------------
18150 // These must follow all instruction definitions as they use the names
18151 // defined in the instructions definitions.
18152 //
18153 // peepmatch ( root_instr_name [preceding_instruction]* );
18154 //
18155 // peepconstraint %{
18156 // (instruction_number.operand_name relational_op instruction_number.operand_name
18157 //  [, ...] );
18158 // // instruction numbers are zero-based using left to right order in peepmatch
18159 //
18160 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18161 // // provide an instruction_number.operand_name for each operand that appears
18162 // // in the replacement instruction's match rule
18163 //
18164 // ---------VM FLAGS---------------------------------------------------------
18165 //
18166 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18167 //
18168 // Each peephole rule is given an identifying number starting with zero and
18169 // increasing by one in the order seen by the parser.  An individual peephole
18170 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18171 // on the command-line.
18172 //
18173 // ---------CURRENT LIMITATIONS----------------------------------------------
18174 //
18175 // Only match adjacent instructions in same basic block
18176 // Only equality constraints
18177 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18178 // Only one replacement instruction
18179 //
18180 // ---------EXAMPLE----------------------------------------------------------
18181 //
18182 // // pertinent parts of existing instructions in architecture description
18183 // instruct movI(iRegINoSp dst, iRegI src)
18184 // %{
18185 //   match(Set dst (CopyI src));
18186 // %}
18187 //
18188 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18189 // %{
18190 //   match(Set dst (AddI dst src));
18191 //   effect(KILL cr);
18192 // %}
18193 //
18194 // // Change (inc mov) to lea
18195 // peephole %{
18196 //   // increment preceeded by register-register move
18197 //   peepmatch ( incI_iReg movI );
18198 //   // require that the destination register of the increment
18199 //   // match the destination register of the move
18200 //   peepconstraint ( 0.dst == 1.dst );
18201 //   // construct a replacement instruction that sets
18202 //   // the destination to ( move's source register + one )
18203 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18204 // %}
18205 //
18206 
18207 // Implementation no longer uses movX instructions since
18208 // machine-independent system no longer uses CopyX nodes.
18209 //
18210 // peephole
18211 // %{
18212 //   peepmatch (incI_iReg movI);
18213 //   peepconstraint (0.dst == 1.dst);
18214 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18215 // %}
18216 
18217 // peephole
18218 // %{
18219 //   peepmatch (decI_iReg movI);
18220 //   peepconstraint (0.dst == 1.dst);
18221 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18222 // %}
18223 
18224 // peephole
18225 // %{
18226 //   peepmatch (addI_iReg_imm movI);
18227 //   peepconstraint (0.dst == 1.dst);
18228 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18229 // %}
18230 
18231 // peephole
18232 // %{
18233 //   peepmatch (incL_iReg movL);
18234 //   peepconstraint (0.dst == 1.dst);
18235 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18236 // %}
18237 
18238 // peephole
18239 // %{
18240 //   peepmatch (decL_iReg movL);
18241 //   peepconstraint (0.dst == 1.dst);
18242 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18243 // %}
18244 
18245 // peephole
18246 // %{
18247 //   peepmatch (addL_iReg_imm movL);
18248 //   peepconstraint (0.dst == 1.dst);
18249 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18250 // %}
18251 
18252 // peephole
18253 // %{
18254 //   peepmatch (addP_iReg_imm movP);
18255 //   peepconstraint (0.dst == 1.dst);
18256 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18257 // %}
18258 
18259 // // Change load of spilled value to only a spill
18260 // instruct storeI(memory mem, iRegI src)
18261 // %{
18262 //   match(Set mem (StoreI mem src));
18263 // %}
18264 //
18265 // instruct loadI(iRegINoSp dst, memory mem)
18266 // %{
18267 //   match(Set dst (LoadI mem));
18268 // %}
18269 //
18270 
18271 //----------SMARTSPILL RULES---------------------------------------------------
18272 // These must follow all instruction definitions as they use the names
18273 // defined in the instructions definitions.
18274 
18275 // Local Variables:
18276 // mode: c++
18277 // End: