1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 #include "opto/addnode.hpp"
1000 
1001 class CallStubImpl {
1002 
1003   //--------------------------------------------------------------
1004   //---<  Used for optimization in Compile::shorten_branches  >---
1005   //--------------------------------------------------------------
1006 
1007  public:
1008   // Size of call trampoline stub.
1009   static uint size_call_trampoline() {
1010     return 0; // no call trampolines on this platform
1011   }
1012 
1013   // number of relocations needed by a call trampoline stub
1014   static uint reloc_call_trampoline() {
1015     return 0; // no call trampolines on this platform
1016   }
1017 };
1018 
1019 class HandlerImpl {
1020 
1021  public:
1022 
1023   static int emit_exception_handler(CodeBuffer &cbuf);
1024   static int emit_deopt_handler(CodeBuffer& cbuf);
1025 
1026   static uint size_exception_handler() {
1027     return MacroAssembler::far_branch_size();
1028   }
1029 
1030   static uint size_deopt_handler() {
1031     // count one adr and one far branch instruction
1032     return 4 * NativeInstruction::instruction_size;
1033   }
1034 };
1035 
1036   // graph traversal helpers
1037 
1038   MemBarNode *parent_membar(const Node *n);
1039   MemBarNode *child_membar(const MemBarNode *n);
1040   bool leading_membar(const MemBarNode *barrier);
1041 
1042   bool is_card_mark_membar(const MemBarNode *barrier);
1043   bool is_CAS(int opcode);
1044 
1045   MemBarNode *leading_to_trailing(MemBarNode *leading);
1046   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1048 
1049   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1050 
1051   bool unnecessary_acquire(const Node *barrier);
1052   bool needs_acquiring_load(const Node *load);
1053 
1054   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1055 
1056   bool unnecessary_release(const Node *barrier);
1057   bool unnecessary_volatile(const Node *barrier);
1058   bool needs_releasing_store(const Node *store);
1059 
1060   // predicate controlling translation of CompareAndSwapX
1061   bool needs_acquiring_load_exclusive(const Node *load);
1062 
1063   // predicate controlling translation of StoreCM
1064   bool unnecessary_storestore(const Node *storecm);
1065 
1066   // predicate controlling addressing modes
1067   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1068 %}
1069 
1070 source %{
1071 
1072   // Optimizaton of volatile gets and puts
1073   // -------------------------------------
1074   //
1075   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1076   // use to implement volatile reads and writes. For a volatile read
1077   // we simply need
1078   //
1079   //   ldar<x>
1080   //
1081   // and for a volatile write we need
1082   //
1083   //   stlr<x>
1084   //
1085   // Alternatively, we can implement them by pairing a normal
1086   // load/store with a memory barrier. For a volatile read we need
1087   //
1088   //   ldr<x>
1089   //   dmb ishld
1090   //
1091   // for a volatile write
1092   //
1093   //   dmb ish
1094   //   str<x>
1095   //   dmb ish
1096   //
1097   // We can also use ldaxr and stlxr to implement compare and swap CAS
1098   // sequences. These are normally translated to an instruction
1099   // sequence like the following
1100   //
1101   //   dmb      ish
1102   // retry:
1103   //   ldxr<x>   rval raddr
1104   //   cmp       rval rold
1105   //   b.ne done
1106   //   stlxr<x>  rval, rnew, rold
1107   //   cbnz      rval retry
1108   // done:
1109   //   cset      r0, eq
1110   //   dmb ishld
1111   //
1112   // Note that the exclusive store is already using an stlxr
1113   // instruction. That is required to ensure visibility to other
1114   // threads of the exclusive write (assuming it succeeds) before that
1115   // of any subsequent writes.
1116   //
1117   // The following instruction sequence is an improvement on the above
1118   //
1119   // retry:
1120   //   ldaxr<x>  rval raddr
1121   //   cmp       rval rold
1122   //   b.ne done
1123   //   stlxr<x>  rval, rnew, rold
1124   //   cbnz      rval retry
1125   // done:
1126   //   cset      r0, eq
1127   //
1128   // We don't need the leading dmb ish since the stlxr guarantees
1129   // visibility of prior writes in the case that the swap is
1130   // successful. Crucially we don't have to worry about the case where
1131   // the swap is not successful since no valid program should be
1132   // relying on visibility of prior changes by the attempting thread
1133   // in the case where the CAS fails.
1134   //
1135   // Similarly, we don't need the trailing dmb ishld if we substitute
1136   // an ldaxr instruction since that will provide all the guarantees we
1137   // require regarding observation of changes made by other threads
1138   // before any change to the CAS address observed by the load.
1139   //
1140   // In order to generate the desired instruction sequence we need to
1141   // be able to identify specific 'signature' ideal graph node
1142   // sequences which i) occur as a translation of a volatile reads or
1143   // writes or CAS operations and ii) do not occur through any other
1144   // translation or graph transformation. We can then provide
1145   // alternative aldc matching rules which translate these node
1146   // sequences to the desired machine code sequences. Selection of the
1147   // alternative rules can be implemented by predicates which identify
1148   // the relevant node sequences.
1149   //
1150   // The ideal graph generator translates a volatile read to the node
1151   // sequence
1152   //
1153   //   LoadX[mo_acquire]
1154   //   MemBarAcquire
1155   //
1156   // As a special case when using the compressed oops optimization we
1157   // may also see this variant
1158   //
1159   //   LoadN[mo_acquire]
1160   //   DecodeN
1161   //   MemBarAcquire
1162   //
1163   // A volatile write is translated to the node sequence
1164   //
1165   //   MemBarRelease
1166   //   StoreX[mo_release] {CardMark}-optional
1167   //   MemBarVolatile
1168   //
1169   // n.b. the above node patterns are generated with a strict
1170   // 'signature' configuration of input and output dependencies (see
1171   // the predicates below for exact details). The card mark may be as
1172   // simple as a few extra nodes or, in a few GC configurations, may
1173   // include more complex control flow between the leading and
1174   // trailing memory barriers. However, whatever the card mark
1175   // configuration these signatures are unique to translated volatile
1176   // reads/stores -- they will not appear as a result of any other
1177   // bytecode translation or inlining nor as a consequence of
1178   // optimizing transforms.
1179   //
1180   // We also want to catch inlined unsafe volatile gets and puts and
1181   // be able to implement them using either ldar<x>/stlr<x> or some
1182   // combination of ldr<x>/stlr<x> and dmb instructions.
1183   //
1184   // Inlined unsafe volatiles puts manifest as a minor variant of the
1185   // normal volatile put node sequence containing an extra cpuorder
1186   // membar
1187   //
1188   //   MemBarRelease
1189   //   MemBarCPUOrder
1190   //   StoreX[mo_release] {CardMark}-optional
1191   //   MemBarVolatile
1192   //
1193   // n.b. as an aside, the cpuorder membar is not itself subject to
1194   // matching and translation by adlc rules.  However, the rule
1195   // predicates need to detect its presence in order to correctly
1196   // select the desired adlc rules.
1197   //
1198   // Inlined unsafe volatile gets manifest as a somewhat different
1199   // node sequence to a normal volatile get
1200   //
1201   //   MemBarCPUOrder
1202   //        ||       \\
1203   //   MemBarAcquire LoadX[mo_acquire]
1204   //        ||
1205   //   MemBarCPUOrder
1206   //
1207   // In this case the acquire membar does not directly depend on the
1208   // load. However, we can be sure that the load is generated from an
1209   // inlined unsafe volatile get if we see it dependent on this unique
1210   // sequence of membar nodes. Similarly, given an acquire membar we
1211   // can know that it was added because of an inlined unsafe volatile
1212   // get if it is fed and feeds a cpuorder membar and if its feed
1213   // membar also feeds an acquiring load.
1214   //
1215   // Finally an inlined (Unsafe) CAS operation is translated to the
1216   // following ideal graph
1217   //
1218   //   MemBarRelease
1219   //   MemBarCPUOrder
1220   //   CompareAndSwapX {CardMark}-optional
1221   //   MemBarCPUOrder
1222   //   MemBarAcquire
1223   //
1224   // So, where we can identify these volatile read and write
1225   // signatures we can choose to plant either of the above two code
1226   // sequences. For a volatile read we can simply plant a normal
1227   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1228   // also choose to inhibit translation of the MemBarAcquire and
1229   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1230   //
1231   // When we recognise a volatile store signature we can choose to
1232   // plant at a dmb ish as a translation for the MemBarRelease, a
1233   // normal str<x> and then a dmb ish for the MemBarVolatile.
1234   // Alternatively, we can inhibit translation of the MemBarRelease
1235   // and MemBarVolatile and instead plant a simple stlr<x>
1236   // instruction.
1237   //
1238   // when we recognise a CAS signature we can choose to plant a dmb
1239   // ish as a translation for the MemBarRelease, the conventional
1240   // macro-instruction sequence for the CompareAndSwap node (which
1241   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1242   // Alternatively, we can elide generation of the dmb instructions
1243   // and plant the alternative CompareAndSwap macro-instruction
1244   // sequence (which uses ldaxr<x>).
1245   //
1246   // Of course, the above only applies when we see these signature
1247   // configurations. We still want to plant dmb instructions in any
1248   // other cases where we may see a MemBarAcquire, MemBarRelease or
1249   // MemBarVolatile. For example, at the end of a constructor which
1250   // writes final/volatile fields we will see a MemBarRelease
1251   // instruction and this needs a 'dmb ish' lest we risk the
1252   // constructed object being visible without making the
1253   // final/volatile field writes visible.
1254   //
1255   // n.b. the translation rules below which rely on detection of the
1256   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1257   // If we see anything other than the signature configurations we
1258   // always just translate the loads and stores to ldr<x> and str<x>
1259   // and translate acquire, release and volatile membars to the
1260   // relevant dmb instructions.
1261   //
1262 
1263   // graph traversal helpers used for volatile put/get and CAS
1264   // optimization
1265 
1266   // 1) general purpose helpers
1267 
1268   // if node n is linked to a parent MemBarNode by an intervening
1269   // Control and Memory ProjNode return the MemBarNode otherwise return
1270   // NULL.
1271   //
1272   // n may only be a Load or a MemBar.
1273 
1274   MemBarNode *parent_membar(const Node *n)
1275   {
1276     Node *ctl = NULL;
1277     Node *mem = NULL;
1278     Node *membar = NULL;
1279 
1280     if (n->is_Load()) {
1281       ctl = n->lookup(LoadNode::Control);
1282       mem = n->lookup(LoadNode::Memory);
1283     } else if (n->is_MemBar()) {
1284       ctl = n->lookup(TypeFunc::Control);
1285       mem = n->lookup(TypeFunc::Memory);
1286     } else {
1287         return NULL;
1288     }
1289 
1290     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1291       return NULL;
1292     }
1293 
1294     membar = ctl->lookup(0);
1295 
1296     if (!membar || !membar->is_MemBar()) {
1297       return NULL;
1298     }
1299 
1300     if (mem->lookup(0) != membar) {
1301       return NULL;
1302     }
1303 
1304     return membar->as_MemBar();
1305   }
1306 
1307   // if n is linked to a child MemBarNode by intervening Control and
1308   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1309 
1310   MemBarNode *child_membar(const MemBarNode *n)
1311   {
1312     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1313     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1314 
1315     // MemBar needs to have both a Ctl and Mem projection
1316     if (! ctl || ! mem)
1317       return NULL;
1318 
1319     MemBarNode *child = NULL;
1320     Node *x;
1321 
1322     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1323       x = ctl->fast_out(i);
1324       // if we see a membar we keep hold of it. we may also see a new
1325       // arena copy of the original but it will appear later
1326       if (x->is_MemBar()) {
1327           child = x->as_MemBar();
1328           break;
1329       }
1330     }
1331 
1332     if (child == NULL) {
1333       return NULL;
1334     }
1335 
1336     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1337       x = mem->fast_out(i);
1338       // if we see a membar we keep hold of it. we may also see a new
1339       // arena copy of the original but it will appear later
1340       if (x == child) {
1341         return child;
1342       }
1343     }
1344     return NULL;
1345   }
1346 
1347   // helper predicate use to filter candidates for a leading memory
1348   // barrier
1349   //
1350   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1351   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1352 
1353   bool leading_membar(const MemBarNode *barrier)
1354   {
1355     int opcode = barrier->Opcode();
1356     // if this is a release membar we are ok
1357     if (opcode == Op_MemBarRelease) {
1358       return true;
1359     }
1360     // if its a cpuorder membar . . .
1361     if (opcode != Op_MemBarCPUOrder) {
1362       return false;
1363     }
1364     // then the parent has to be a release membar
1365     MemBarNode *parent = parent_membar(barrier);
1366     if (!parent) {
1367       return false;
1368     }
1369     opcode = parent->Opcode();
1370     return opcode == Op_MemBarRelease;
1371   }
1372 
1373   // 2) card mark detection helper
1374 
1375   // helper predicate which can be used to detect a volatile membar
1376   // introduced as part of a conditional card mark sequence either by
1377   // G1 or by CMS when UseCondCardMark is true.
1378   //
1379   // membar can be definitively determined to be part of a card mark
1380   // sequence if and only if all the following hold
1381   //
1382   // i) it is a MemBarVolatile
1383   //
1384   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1385   // true
1386   //
1387   // iii) the node's Mem projection feeds a StoreCM node.
1388 
1389   bool is_card_mark_membar(const MemBarNode *barrier)
1390   {
1391     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1392       return false;
1393     }
1394 
1395     if (barrier->Opcode() != Op_MemBarVolatile) {
1396       return false;
1397     }
1398 
1399     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1400 
1401     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1402       Node *y = mem->fast_out(i);
1403       if (y->Opcode() == Op_StoreCM) {
1404         return true;
1405       }
1406     }
1407 
1408     return false;
1409   }
1410 
1411 
1412   // 3) helper predicates to traverse volatile put or CAS graphs which
1413   // may contain GC barrier subgraphs
1414 
1415   // Preamble
1416   // --------
1417   //
1418   // for volatile writes we can omit generating barriers and employ a
1419   // releasing store when we see a node sequence sequence with a
1420   // leading MemBarRelease and a trailing MemBarVolatile as follows
1421   //
1422   //   MemBarRelease
1423   //  {    ||        } -- optional
1424   //  {MemBarCPUOrder}
1425   //       ||       \\
1426   //       ||     StoreX[mo_release]
1427   //       | \ Bot    / ???
1428   //       | MergeMem
1429   //       | /
1430   //   MemBarVolatile
1431   //
1432   // where
1433   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1434   //  | \ and / indicate further routing of the Ctl and Mem feeds
1435   //
1436   // Note that the memory feed from the CPUOrder membar to the
1437   // MergeMem node is an AliasIdxBot slice while the feed from the
1438   // StoreX is for a slice determined by the type of value being
1439   // written.
1440   //
1441   // the diagram above shows the graph we see for non-object stores.
1442   // for a volatile Object store (StoreN/P) we may see other nodes
1443   // below the leading membar because of the need for a GC pre- or
1444   // post-write barrier.
1445   //
1446   // with most GC configurations we with see this simple variant which
1447   // includes a post-write barrier card mark.
1448   //
1449   //   MemBarRelease______________________________
1450   //         ||    \\               Ctl \        \\
1451   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1452   //         | \ Bot  / oop                 . . .  /
1453   //         | MergeMem
1454   //         | /
1455   //         ||      /
1456   //   MemBarVolatile
1457   //
1458   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1459   // the object address to an int used to compute the card offset) and
1460   // Ctl+Mem to a StoreB node (which does the actual card mark).
1461   //
1462   // n.b. a StoreCM node is only ever used when CMS (with or without
1463   // CondCardMark) or G1 is configured. This abstract instruction
1464   // differs from a normal card mark write (StoreB) because it implies
1465   // a requirement to order visibility of the card mark (StoreCM)
1466   // after that of the object put (StoreP/N) using a StoreStore memory
1467   // barrier. Note that this is /not/ a requirement to order the
1468   // instructions in the generated code (that is already guaranteed by
1469   // the order of memory dependencies). Rather it is a requirement to
1470   // ensure visibility order which only applies on architectures like
1471   // AArch64 which do not implement TSO. This ordering is required for
1472   // both non-volatile and volatile puts.
1473   //
1474   // That implies that we need to translate a StoreCM using the
1475   // sequence
1476   //
1477   //   dmb ishst
1478   //   stlrb
1479   //
1480   // This dmb cannot be omitted even when the associated StoreX or
1481   // CompareAndSwapX is implemented using stlr. However, as described
1482   // below there are circumstances where a specific GC configuration
1483   // requires a stronger barrier in which case it can be omitted.
1484   // 
1485   // With the Serial or Parallel GC using +CondCardMark the card mark
1486   // is performed conditionally on it currently being unmarked in
1487   // which case the volatile put graph looks slightly different
1488   //
1489   //   MemBarRelease____________________________________________
1490   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1491   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1492   //         | \ Bot / oop                          \            |
1493   //         | MergeMem                            . . .      StoreB
1494   //         | /                                                /
1495   //         ||     /
1496   //   MemBarVolatile
1497   //
1498   // It is worth noting at this stage that all the above
1499   // configurations can be uniquely identified by checking that the
1500   // memory flow includes the following subgraph:
1501   //
1502   //   MemBarRelease
1503   //  {MemBarCPUOrder}
1504   //      |  \      . . .
1505   //      |  StoreX[mo_release]  . . .
1506   //  Bot |   / oop
1507   //     MergeMem
1508   //      |
1509   //   MemBarVolatile
1510   //
1511   // This is referred to as a *normal* volatile store subgraph. It can
1512   // easily be detected starting from any candidate MemBarRelease,
1513   // StoreX[mo_release] or MemBarVolatile node.
1514   //
1515   // A small variation on this normal case occurs for an unsafe CAS
1516   // operation. The basic memory flow subgraph for a non-object CAS is
1517   // as follows
1518   //
1519   //   MemBarRelease
1520   //         ||
1521   //   MemBarCPUOrder
1522   //          |     \\   . . .
1523   //          |     CompareAndSwapX
1524   //          |       |
1525   //      Bot |     SCMemProj
1526   //           \     / Bot
1527   //           MergeMem
1528   //           /
1529   //   MemBarCPUOrder
1530   //         ||
1531   //   MemBarAcquire
1532   //
1533   // The same basic variations on this arrangement (mutatis mutandis)
1534   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1535   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1536   // flow subgraph is still present.
1537   // 
1538   // This is referred to as a *normal* CAS subgraph. It can easily be
1539   // detected starting from any candidate MemBarRelease,
1540   // StoreX[mo_release] or MemBarAcquire node.
1541   //
1542   // The code below uses two helper predicates, leading_to_trailing
1543   // and trailing_to_leading to identify these normal graphs, one
1544   // validating the layout starting from the top membar and searching
1545   // down and the other validating the layout starting from the lower
1546   // membar and searching up.
1547   //
1548   // There are two special case GC configurations when the simple
1549   // normal graphs above may not be generated: when using G1 (which
1550   // always employs a conditional card mark); and when using CMS with
1551   // conditional card marking (+CondCardMark) configured. These GCs
1552   // are both concurrent rather than stop-the world GCs. So they
1553   // introduce extra Ctl+Mem flow into the graph between the leading
1554   // and trailing membar nodes, in particular enforcing stronger
1555   // memory serialisation beween the object put and the corresponding
1556   // conditional card mark. CMS employs a post-write GC barrier while
1557   // G1 employs both a pre- and post-write GC barrier.
1558   //
1559   // The post-write barrier subgraph for these configurations includes
1560   // a MemBarVolatile node -- referred to as a card mark membar --
1561   // which is needed to order the card write (StoreCM) operation in
1562   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1563   // operations performed by GC threads i.e. a card mark membar
1564   // constitutes a StoreLoad barrier hence must be translated to a dmb
1565   // ish (whether or not it sits inside a volatile store sequence).
1566   //
1567   // Of course, the use of the dmb ish for the card mark membar also
1568   // implies theat the StoreCM which follows can omit the dmb ishst
1569   // instruction. The necessary visibility ordering will already be
1570   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1571   // needs to be generated for as part of the StoreCM sequence with GC
1572   // configuration +CMS -CondCardMark.
1573   // 
1574   // Of course all these extra barrier nodes may well be absent --
1575   // they are only inserted for object puts. Their potential presence
1576   // significantly complicates the task of identifying whether a
1577   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1578   // MemBarAcquire forms part of a volatile put or CAS when using
1579   // these GC configurations (see below) and also complicates the
1580   // decision as to how to translate a MemBarVolatile and StoreCM.
1581   //
1582   // So, thjis means that a card mark MemBarVolatile occurring in the
1583   // post-barrier graph it needs to be distinguished from a normal
1584   // trailing MemBarVolatile. Resolving this is straightforward: a
1585   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1586   // node and that is a unique marker
1587   //
1588   //      MemBarVolatile (card mark)
1589   //       C |    \     . . .
1590   //         |   StoreCM   . . .
1591   //       . . .
1592   //
1593   // Returning to the task of translating the object put and the
1594   // leading/trailing membar nodes: what do the node graphs look like
1595   // for these 2 special cases? and how can we determine the status of
1596   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1597   // normal and non-normal cases?
1598   //
1599   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1600   // which selects conditonal execution based on the value loaded
1601   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1602   // intervening StoreLoad barrier (MemBarVolatile).
1603   //
1604   // So, with CMS we may see a node graph for a volatile object store
1605   // which looks like this
1606   //
1607   //   MemBarRelease
1608   //   MemBarCPUOrder_(leading)____________________
1609   //     C |  | M \       \\               M |   C \
1610   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1611   //       |  | Bot \    / oop      \        |
1612   //       |  |    MergeMem          \      / 
1613   //       |  |      /                |    /
1614   //     MemBarVolatile (card mark)   |   /
1615   //     C |  ||    M |               |  /
1616   //       | LoadB    | Bot       oop | / Bot
1617   //       |   |      |              / /
1618   //       | Cmp      |\            / /
1619   //       | /        | \          / /
1620   //       If         |  \        / /
1621   //       | \        |   \      / /
1622   // IfFalse  IfTrue  |    \    / /
1623   //       \     / \  |    |   / /
1624   //        \   / StoreCM  |  / /
1625   //         \ /      \   /  / /
1626   //        Region     Phi  / /
1627   //          | \   Raw |  / /
1628   //          |  . . .  | / /
1629   //          |       MergeMem
1630   //          |           |
1631   //        MemBarVolatile (trailing)
1632   //
1633   // Notice that there are two MergeMem nodes below the leading
1634   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1635   // the leading membar and the oopptr Mem slice from the Store into
1636   // the card mark membar. The trailing MergeMem merges the
1637   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1638   // slice from the StoreCM and an oop slice from the StoreN/P node
1639   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1640   // associated with the If region).
1641   //
1642   // So, in the case of CMS + CondCardMark the volatile object store
1643   // graph still includes a normal volatile store subgraph from the
1644   // leading membar to the trailing membar. However, it also contains
1645   // the same shape memory flow to the card mark membar. The two flows
1646   // can be distinguished by testing whether or not the downstream
1647   // membar is a card mark membar.
1648   //
1649   // The graph for a CAS also varies with CMS + CondCardMark, in
1650   // particular employing a control feed from the CompareAndSwapX node
1651   // through a CmpI and If to the card mark membar and StoreCM which
1652   // updates the associated card. This avoids executing the card mark
1653   // if the CAS fails. However, it can be seen from the diagram below
1654   // that the presence of the barrier does not alter the normal CAS
1655   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1656   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1657   // MemBarAcquire pair.
1658   //
1659   //   MemBarRelease
1660   //   MemBarCPUOrder__(leading)_______________________
1661   //   C /  M |                        \\            C \
1662   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1663   //          |                  C /  M |
1664   //          |                 CmpI    |
1665   //          |                  /      |
1666   //          |               . . .     |
1667   //          |              IfTrue     |
1668   //          |              /          |
1669   //       MemBarVolatile (card mark)   |
1670   //        C |  ||    M |              |
1671   //          | LoadB    | Bot   ______/|
1672   //          |   |      |      /       |
1673   //          | Cmp      |     /      SCMemProj
1674   //          | /        |    /         |
1675   //          If         |   /         /
1676   //          | \        |  /         / Bot
1677   //     IfFalse  IfTrue | /         /
1678   //          |   / \   / / prec    /
1679   //   . . .  |  /  StoreCM        /
1680   //        \ | /      | raw      /
1681   //        Region    . . .      /
1682   //           | \              /
1683   //           |   . . .   \    / Bot
1684   //           |        MergeMem
1685   //           |          /
1686   //         MemBarCPUOrder
1687   //         MemBarAcquire (trailing)
1688   //
1689   // This has a slightly different memory subgraph to the one seen
1690   // previously but the core of it has a similar memory flow to the
1691   // CAS normal subgraph:
1692   //
1693   //   MemBarRelease
1694   //   MemBarCPUOrder____
1695   //         |          \      . . .
1696   //         |       CompareAndSwapX  . . .
1697   //         |       C /  M |
1698   //         |      CmpI    |
1699   //         |       /      |
1700   //         |      . .    /
1701   //     Bot |   IfTrue   /
1702   //         |   /       /
1703   //    MemBarVolatile  /
1704   //         | ...     /
1705   //      StoreCM ... /
1706   //         |       / 
1707   //       . . .  SCMemProj
1708   //      Raw \    / Bot
1709   //        MergeMem
1710   //           |
1711   //   MemBarCPUOrder
1712   //   MemBarAcquire
1713   //
1714   // The G1 graph for a volatile object put is a lot more complicated.
1715   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1716   // which adds the old value to the SATB queue; the releasing store
1717   // itself; and, finally, a post-write graph which performs a card
1718   // mark.
1719   //
1720   // The pre-write graph may be omitted, but only when the put is
1721   // writing to a newly allocated (young gen) object and then only if
1722   // there is a direct memory chain to the Initialize node for the
1723   // object allocation. This will not happen for a volatile put since
1724   // any memory chain passes through the leading membar.
1725   //
1726   // The pre-write graph includes a series of 3 If tests. The outermost
1727   // If tests whether SATB is enabled (no else case). The next If tests
1728   // whether the old value is non-NULL (no else case). The third tests
1729   // whether the SATB queue index is > 0, if so updating the queue. The
1730   // else case for this third If calls out to the runtime to allocate a
1731   // new queue buffer.
1732   //
1733   // So with G1 the pre-write and releasing store subgraph looks like
1734   // this (the nested Ifs are omitted).
1735   //
1736   //  MemBarRelease (leading)____________
1737   //     C |  ||  M \   M \    M \  M \ . . .
1738   //       | LoadB   \  LoadL  LoadN   \
1739   //       | /        \                 \
1740   //       If         |\                 \
1741   //       | \        | \                 \
1742   //  IfFalse  IfTrue |  \                 \
1743   //       |     |    |   \                 |
1744   //       |     If   |   /\                |
1745   //       |     |          \               |
1746   //       |                 \              |
1747   //       |    . . .         \             |
1748   //       | /       | /       |            |
1749   //      Region  Phi[M]       |            |
1750   //       | \       |         |            |
1751   //       |  \_____ | ___     |            |
1752   //     C | C \     |   C \ M |            |
1753   //       | CastP2X | StoreN/P[mo_release] |
1754   //       |         |         |            |
1755   //     C |       M |       M |          M |
1756   //        \        | Raw     | oop       / Bot
1757   //                  . . .
1758   //          (post write subtree elided)
1759   //                    . . .
1760   //             C \         M /
1761   //         MemBarVolatile (trailing)
1762   //
1763   // Note that the three memory feeds into the post-write tree are an
1764   // AliasRawIdx slice associated with the writes in the pre-write
1765   // tree, an oop type slice from the StoreX specific to the type of
1766   // the volatile field and the AliasBotIdx slice emanating from the
1767   // leading membar.
1768   //
1769   // n.b. the LoadB in this subgraph is not the card read -- it's a
1770   // read of the SATB queue active flag.
1771   //
1772   // The CAS graph is once again a variant of the above with a
1773   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1774   // value from the CompareAndSwapX node is fed into the post-write
1775   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1776   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1777   //
1778   //  MemBarRelease (leading)____________
1779   //     C |  ||  M \   M \    M \  M \ . . .
1780   //       | LoadB   \  LoadL  LoadN   \
1781   //       | /        \                 \
1782   //       If         |\                 \
1783   //       | \        | \                 \
1784   //  IfFalse  IfTrue |  \                 \
1785   //       |     |    |   \                 \
1786   //       |     If   |    \                 |
1787   //       |     |          \                |
1788   //       |                 \               |
1789   //       |    . . .         \              |
1790   //       | /       | /       \             |
1791   //      Region  Phi[M]        \            |
1792   //       | \       |           \           |
1793   //       |  \_____ |            |          |
1794   //     C | C \     |            |          |
1795   //       | CastP2X |     CompareAndSwapX   |
1796   //       |         |   res |     |         |
1797   //     C |       M |       |  SCMemProj  M |
1798   //        \        | Raw   |     | Bot    / Bot
1799   //                  . . .
1800   //          (post write subtree elided)
1801   //                    . . .
1802   //             C \         M /
1803   //         MemBarVolatile (trailing)
1804   //
1805   // The G1 post-write subtree is also optional, this time when the
1806   // new value being written is either null or can be identified as a
1807   // newly allocated (young gen) object with no intervening control
1808   // flow. The latter cannot happen but the former may, in which case
1809   // the card mark membar is omitted and the memory feeds from the
1810   // leading membar and the SToreN/P are merged direct into the
1811   // trailing membar as per the normal subgraph. So, the only special
1812   // case which arises is when the post-write subgraph is generated.
1813   //
1814   // The kernel of the post-write G1 subgraph is the card mark itself
1815   // which includes a card mark memory barrier (MemBarVolatile), a
1816   // card test (LoadB), and a conditional update (If feeding a
1817   // StoreCM). These nodes are surrounded by a series of nested Ifs
1818   // which try to avoid doing the card mark. The top level If skips if
1819   // the object reference does not cross regions (i.e. it tests if
1820   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1821   // need not be recorded. The next If, which skips on a NULL value,
1822   // may be absent (it is not generated if the type of value is >=
1823   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1824   // checking if card_val != young).  n.b. although this test requires
1825   // a pre-read of the card it can safely be done before the StoreLoad
1826   // barrier. However that does not bypass the need to reread the card
1827   // after the barrier.
1828   //
1829   //                (pre-write subtree elided)
1830   //        . . .                  . . .    . . .  . . .
1831   //        C |               M |    M |    M |
1832   //       Region            Phi[M] StoreN    |
1833   //          |            Raw  |  oop |  Bot |
1834   //         / \_______         |\     |\     |\
1835   //      C / C \      . . .    | \    | \    | \
1836   //       If   CastP2X . . .   |  \   |  \   |  \
1837   //       / \                  |   \  |   \  |   \
1838   //      /   \                 |    \ |    \ |    \
1839   // IfFalse IfTrue             |      |      |     \
1840   //   |       |                 \     |     /       |
1841   //   |       If                 \    | \  /   \    |
1842   //   |      / \                  \   |   /     \   |
1843   //   |     /   \                  \  |  / \     |  |
1844   //   | IfFalse IfTrue           MergeMem   \    |  |
1845   //   |  . . .    / \                 |      \   |  |
1846   //   |          /   \                |       |  |  |
1847   //   |     IfFalse IfTrue            |       |  |  |
1848   //   |      . . .    |               |       |  |  |
1849   //   |               If             /        |  |  |
1850   //   |               / \           /         |  |  |
1851   //   |              /   \         /          |  |  |
1852   //   |         IfFalse IfTrue    /           |  |  |
1853   //   |           . . .   |      /            |  |  |
1854   //   |                    \    /             |  |  |
1855   //   |                     \  /              |  |  |
1856   //   |         MemBarVolatile__(card mark  ) |  |  |
1857   //   |              ||   C |     \           |  |  |
1858   //   |             LoadB   If     |         /   |  |
1859   //   |                    / \ Raw |        /   /  /
1860   //   |                   . . .    |       /   /  /
1861   //   |                        \   |      /   /  /
1862   //   |                        StoreCM   /   /  /
1863   //   |                           |     /   /  /
1864   //   |                            . . .   /  /
1865   //   |                                   /  /
1866   //   |   . . .                          /  /
1867   //   |    |             | /            /  /
1868   //   |    |           Phi[M] /        /  /
1869   //   |    |             |   /        /  /
1870   //   |    |             |  /        /  /
1871   //   |  Region  . . .  Phi[M]      /  /
1872   //   |    |             |         /  /
1873   //    \   |             |        /  /
1874   //     \  | . . .       |       /  /
1875   //      \ |             |      /  /
1876   //      Region         Phi[M] /  /
1877   //        |               \  /  /
1878   //         \             MergeMem
1879   //          \            /
1880   //          MemBarVolatile
1881   //
1882   // As with CMS + CondCardMark the first MergeMem merges the
1883   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1884   // slice from the Store into the card mark membar. However, in this
1885   // case it may also merge an AliasRawIdx mem slice from the pre
1886   // barrier write.
1887   //
1888   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1889   // leading membar with an oop slice from the StoreN and an
1890   // AliasRawIdx slice from the post barrier writes. In this case the
1891   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1892   // which combine feeds from the If regions in the post barrier
1893   // subgraph.
1894   //
1895   // So, for G1 the same characteristic subgraph arises as for CMS +
1896   // CondCardMark. There is a normal subgraph feeding the card mark
1897   // membar and a normal subgraph feeding the trailing membar.
1898   //
1899   // The CAS graph when using G1GC also includes an optional
1900   // post-write subgraph. It is very similar to the above graph except
1901   // for a few details.
1902   // 
1903   // - The control flow is gated by an additonal If which tests the
1904   // result from the CompareAndSwapX node
1905   // 
1906   //  - The MergeMem which feeds the card mark membar only merges the
1907   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1908   // slice from the pre-barrier. It does not merge the SCMemProj
1909   // AliasIdxBot slice. So, this subgraph does not look like the
1910   // normal CAS subgraph.
1911   //
1912   // - The MergeMem which feeds the trailing membar merges the
1913   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1914   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1915   // has two AliasIdxBot input slices. However, this subgraph does
1916   // still look like the normal CAS subgraph.
1917   //
1918   // So, the upshot is:
1919   //
1920   // In all cases a volatile put graph will include a *normal*
1921   // volatile store subgraph betwen the leading membar and the
1922   // trailing membar. It may also include a normal volatile store
1923   // subgraph betwen the leading membar and the card mark membar.
1924   //
1925   // In all cases a CAS graph will contain a unique normal CAS graph
1926   // feeding the trailing membar.
1927   //
1928   // In all cases where there is a card mark membar (either as part of
1929   // a volatile object put or CAS) it will be fed by a MergeMem whose
1930   // AliasIdxBot slice feed will be a leading membar.
1931   //
1932   // The predicates controlling generation of instructions for store
1933   // and barrier nodes employ a few simple helper functions (described
1934   // below) which identify the presence or absence of all these
1935   // subgraph configurations and provide a means of traversing from
1936   // one node in the subgraph to another.
1937 
1938   // is_CAS(int opcode)
1939   //
1940   // return true if opcode is one of the possible CompareAndSwapX
1941   // values otherwise false.
1942 
1943   bool is_CAS(int opcode)
1944   {
1945     return (opcode == Op_CompareAndSwapI ||
1946             opcode == Op_CompareAndSwapL ||
1947             opcode == Op_CompareAndSwapN ||
1948             opcode == Op_CompareAndSwapP);
1949   }
1950 
1951   // leading_to_trailing
1952   //
1953   //graph traversal helper which detects the normal case Mem feed from
1954   // a release membar (or, optionally, its cpuorder child) to a
1955   // dependent volatile membar i.e. it ensures that one or other of
1956   // the following Mem flow subgraph is present.
1957   //
1958   //   MemBarRelease {leading}
1959   //   {MemBarCPUOrder} {optional}
1960   //     Bot |  \      . . .
1961   //         |  StoreN/P[mo_release]  . . .
1962   //         |   /
1963   //        MergeMem
1964   //         |
1965   //   MemBarVolatile {not card mark}
1966   //
1967   //   MemBarRelease {leading}
1968   //   {MemBarCPUOrder} {optional}
1969   //      |       \      . . .
1970   //      |     CompareAndSwapX  . . .
1971   //               |
1972   //     . . .    SCMemProj
1973   //           \   |
1974   //      |    MergeMem
1975   //      |       /
1976   //    MemBarCPUOrder
1977   //    MemBarAcquire {trailing}
1978   //
1979   // the predicate needs to be capable of distinguishing the following
1980   // volatile put graph which may arises when a GC post barrier
1981   // inserts a card mark membar
1982   //
1983   //   MemBarRelease {leading}
1984   //   {MemBarCPUOrder}__
1985   //     Bot |   \       \
1986   //         |   StoreN/P \
1987   //         |    / \     |
1988   //        MergeMem \    |
1989   //         |        \   |
1990   //   MemBarVolatile  \  |
1991   //    {card mark}     \ |
1992   //                  MergeMem
1993   //                      |
1994   // {not card mark} MemBarVolatile
1995   //
1996   // if the correct configuration is present returns the trailing
1997   // membar otherwise NULL.
1998   //
1999   // the input membar is expected to be either a cpuorder membar or a
2000   // release membar. in the latter case it should not have a cpu membar
2001   // child.
2002   //
2003   // the returned value may be a card mark or trailing membar
2004   //
2005 
2006   MemBarNode *leading_to_trailing(MemBarNode *leading)
2007   {
2008     assert((leading->Opcode() == Op_MemBarRelease ||
2009             leading->Opcode() == Op_MemBarCPUOrder),
2010            "expecting a volatile or cpuroder membar!");
2011 
2012     // check the mem flow
2013     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2014 
2015     if (!mem) {
2016       return NULL;
2017     }
2018 
2019     Node *x = NULL;
2020     StoreNode * st = NULL;
2021     LoadStoreNode *cas = NULL;
2022     MergeMemNode *mm = NULL;
2023     MergeMemNode *mm2 = NULL;
2024 
2025     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2026       x = mem->fast_out(i);
2027       if (x->is_MergeMem()) {
2028         if (mm != NULL) {
2029           if (mm2 != NULL) {
2030           // should not see more than 2 merge mems
2031             return NULL;
2032           } else {
2033             mm2 = x->as_MergeMem();
2034           }
2035         } else {
2036           mm = x->as_MergeMem();
2037         }
2038       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2039         // two releasing stores/CAS nodes is one too many
2040         if (st != NULL || cas != NULL) {
2041           return NULL;
2042         }
2043         st = x->as_Store();
2044       } else if (is_CAS(x->Opcode())) {
2045         if (st != NULL || cas != NULL) {
2046           return NULL;
2047         }
2048         cas = x->as_LoadStore();
2049       }
2050     }
2051 
2052     // must have a store or a cas
2053     if (!st && !cas) {
2054       return NULL;
2055     }
2056 
2057     // must have at least one merge if we also have st
2058     if (st && !mm) {
2059       return NULL;
2060     }
2061 
2062     if (cas) {
2063       Node *y = NULL;
2064       // look for an SCMemProj
2065       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2066         x = cas->fast_out(i);
2067         if (x->is_Proj()) {
2068           y = x;
2069           break;
2070         }
2071       }
2072       if (y == NULL) {
2073         return NULL;
2074       }
2075       // the proj must feed a MergeMem
2076       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2077         x = y->fast_out(i);
2078         if (x->is_MergeMem()) {
2079           mm = x->as_MergeMem();
2080           break;
2081         }
2082       }
2083       if (mm == NULL) {
2084         return NULL;
2085       }
2086       MemBarNode *mbar = NULL;
2087       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2088       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2089         x = mm->fast_out(i);
2090         if (x->is_MemBar()) {
2091           int opcode = x->Opcode();
2092           if (opcode == Op_MemBarCPUOrder) {
2093             MemBarNode *z =  x->as_MemBar();
2094             z = child_membar(z);
2095             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2096               mbar = z;
2097             }
2098           }
2099           break;
2100         }
2101       }
2102       return mbar;
2103     } else {
2104       Node *y = NULL;
2105       // ensure the store feeds the first mergemem;
2106       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2107         if (st->fast_out(i) == mm) {
2108           y = st;
2109           break;
2110         }
2111       }
2112       if (y == NULL) {
2113         return NULL;
2114       }
2115       if (mm2 != NULL) {
2116         // ensure the store feeds the second mergemem;
2117         y = NULL;
2118         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2119           if (st->fast_out(i) == mm2) {
2120             y = st;
2121           }
2122         }
2123         if (y == NULL) {
2124           return NULL;
2125         }
2126       }
2127 
2128       MemBarNode *mbar = NULL;
2129       // ensure the first mergemem feeds a volatile membar
2130       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2131         x = mm->fast_out(i);
2132         if (x->is_MemBar()) {
2133           int opcode = x->Opcode();
2134           if (opcode == Op_MemBarVolatile) {
2135             mbar = x->as_MemBar();
2136           }
2137           break;
2138         }
2139       }
2140       if (mm2 == NULL) {
2141         // this is our only option for a trailing membar
2142         return mbar;
2143       }
2144       // ensure the second mergemem feeds a volatile membar
2145       MemBarNode *mbar2 = NULL;
2146       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2147         x = mm2->fast_out(i);
2148         if (x->is_MemBar()) {
2149           int opcode = x->Opcode();
2150           if (opcode == Op_MemBarVolatile) {
2151             mbar2 = x->as_MemBar();
2152           }
2153           break;
2154         }
2155       }
2156       // if we have two merge mems we must have two volatile membars
2157       if (mbar == NULL || mbar2 == NULL) {
2158         return NULL;
2159       }
2160       // return the trailing membar
2161       if (is_card_mark_membar(mbar2)) {
2162         return mbar;
2163       } else {
2164         if (is_card_mark_membar(mbar)) {
2165           return mbar2;
2166         } else {
2167           return NULL;
2168         }
2169       }
2170     }
2171   }
2172 
2173   // trailing_to_leading
2174   //
2175   // graph traversal helper which detects the normal case Mem feed
2176   // from a trailing membar to a preceding release membar (optionally
2177   // its cpuorder child) i.e. it ensures that one or other of the
2178   // following Mem flow subgraphs is present.
2179   //
2180   //   MemBarRelease {leading}
2181   //   MemBarCPUOrder {optional}
2182   //    | Bot |  \      . . .
2183   //    |     |  StoreN/P[mo_release]  . . .
2184   //    |     |   /
2185   //    |    MergeMem
2186   //    |     |
2187   //   MemBarVolatile {not card mark}
2188   //
2189   //   MemBarRelease {leading}
2190   //   MemBarCPUOrder {optional}
2191   //      |       \      . . .
2192   //      |     CompareAndSwapX  . . .
2193   //               |
2194   //     . . .    SCMemProj
2195   //           \   |
2196   //      |    MergeMem
2197   //      |       |
2198   //    MemBarCPUOrder
2199   //    MemBarAcquire {trailing}
2200   //
2201   // this predicate checks for the same flow as the previous predicate
2202   // but starting from the bottom rather than the top.
2203   //
2204   // if the configuration is present returns the cpuorder member for
2205   // preference or when absent the release membar otherwise NULL.
2206   //
2207   // n.b. the input membar is expected to be a MemBarVolatile or
2208   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2209   // mark membar.
2210 
2211   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2212   {
2213     // input must be a volatile membar
2214     assert((barrier->Opcode() == Op_MemBarVolatile ||
2215             barrier->Opcode() == Op_MemBarAcquire),
2216            "expecting a volatile or an acquire membar");
2217 
2218     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2219            !is_card_mark_membar(barrier),
2220            "not expecting a card mark membar");
2221     Node *x;
2222     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2223 
2224     // if we have an acquire membar then it must be fed via a CPUOrder
2225     // membar
2226 
2227     if (is_cas) {
2228       // skip to parent barrier which must be a cpuorder
2229       x = parent_membar(barrier);
2230       if (x->Opcode() != Op_MemBarCPUOrder)
2231         return NULL;
2232     } else {
2233       // start from the supplied barrier
2234       x = (Node *)barrier;
2235     }
2236 
2237     // the Mem feed to the membar should be a merge
2238     x = x ->in(TypeFunc::Memory);
2239     if (!x->is_MergeMem())
2240       return NULL;
2241 
2242     MergeMemNode *mm = x->as_MergeMem();
2243 
2244     if (is_cas) {
2245       // the merge should be fed from the CAS via an SCMemProj node
2246       x = NULL;
2247       for (uint idx = 1; idx < mm->req(); idx++) {
2248         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2249           x = mm->in(idx);
2250           break;
2251         }
2252       }
2253       if (x == NULL) {
2254         return NULL;
2255       }
2256       // check for a CAS feeding this proj
2257       x = x->in(0);
2258       int opcode = x->Opcode();
2259       if (!is_CAS(opcode)) {
2260         return NULL;
2261       }
2262       // the CAS should get its mem feed from the leading membar
2263       x = x->in(MemNode::Memory);
2264     } else {
2265       // the merge should get its Bottom mem feed from the leading membar
2266       x = mm->in(Compile::AliasIdxBot);
2267     }
2268 
2269     // ensure this is a non control projection
2270     if (!x->is_Proj() || x->is_CFG()) {
2271       return NULL;
2272     }
2273     // if it is fed by a membar that's the one we want
2274     x = x->in(0);
2275 
2276     if (!x->is_MemBar()) {
2277       return NULL;
2278     }
2279 
2280     MemBarNode *leading = x->as_MemBar();
2281     // reject invalid candidates
2282     if (!leading_membar(leading)) {
2283       return NULL;
2284     }
2285 
2286     // ok, we have a leading membar, now for the sanity clauses
2287 
2288     // the leading membar must feed Mem to a releasing store or CAS
2289     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2290     StoreNode *st = NULL;
2291     LoadStoreNode *cas = NULL;
2292     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2293       x = mem->fast_out(i);
2294       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2295         // two stores or CASes is one too many
2296         if (st != NULL || cas != NULL) {
2297           return NULL;
2298         }
2299         st = x->as_Store();
2300       } else if (is_CAS(x->Opcode())) {
2301         if (st != NULL || cas != NULL) {
2302           return NULL;
2303         }
2304         cas = x->as_LoadStore();
2305       }
2306     }
2307 
2308     // we should not have both a store and a cas
2309     if (st == NULL & cas == NULL) {
2310       return NULL;
2311     }
2312 
2313     if (st == NULL) {
2314       // nothing more to check
2315       return leading;
2316     } else {
2317       // we should not have a store if we started from an acquire
2318       if (is_cas) {
2319         return NULL;
2320       }
2321 
2322       // the store should feed the merge we used to get here
2323       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2324         if (st->fast_out(i) == mm) {
2325           return leading;
2326         }
2327       }
2328     }
2329 
2330     return NULL;
2331   }
2332 
2333   // card_mark_to_leading
2334   //
2335   // graph traversal helper which traverses from a card mark volatile
2336   // membar to a leading membar i.e. it ensures that the following Mem
2337   // flow subgraph is present.
2338   //
2339   //    MemBarRelease {leading}
2340   //   {MemBarCPUOrder} {optional}
2341   //         |   . . .
2342   //     Bot |   /
2343   //      MergeMem
2344   //         |
2345   //     MemBarVolatile (card mark)
2346   //        |     \
2347   //      . . .   StoreCM
2348   //
2349   // if the configuration is present returns the cpuorder member for
2350   // preference or when absent the release membar otherwise NULL.
2351   //
2352   // n.b. the input membar is expected to be a MemBarVolatile amd must
2353   // be a card mark membar.
2354 
2355   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2356   {
2357     // input must be a card mark volatile membar
2358     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2359 
2360     // the Mem feed to the membar should be a merge
2361     Node *x = barrier->in(TypeFunc::Memory);
2362     if (!x->is_MergeMem()) {
2363       return NULL;
2364     }
2365 
2366     MergeMemNode *mm = x->as_MergeMem();
2367 
2368     x = mm->in(Compile::AliasIdxBot);
2369 
2370     if (!x->is_MemBar()) {
2371       return NULL;
2372     }
2373 
2374     MemBarNode *leading = x->as_MemBar();
2375 
2376     if (leading_membar(leading)) {
2377       return leading;
2378     }
2379 
2380     return NULL;
2381   }
2382 
2383 bool unnecessary_acquire(const Node *barrier)
2384 {
2385   assert(barrier->is_MemBar(), "expecting a membar");
2386 
2387   if (UseBarriersForVolatile) {
2388     // we need to plant a dmb
2389     return false;
2390   }
2391 
2392   // a volatile read derived from bytecode (or also from an inlined
2393   // SHA field read via LibraryCallKit::load_field_from_object)
2394   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2395   // with a bogus read dependency on it's preceding load. so in those
2396   // cases we will find the load node at the PARMS offset of the
2397   // acquire membar.  n.b. there may be an intervening DecodeN node.
2398   //
2399   // a volatile load derived from an inlined unsafe field access
2400   // manifests as a cpuorder membar with Ctl and Mem projections
2401   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2402   // acquire then feeds another cpuorder membar via Ctl and Mem
2403   // projections. The load has no output dependency on these trailing
2404   // membars because subsequent nodes inserted into the graph take
2405   // their control feed from the final membar cpuorder meaning they
2406   // are all ordered after the load.
2407 
2408   Node *x = barrier->lookup(TypeFunc::Parms);
2409   if (x) {
2410     // we are starting from an acquire and it has a fake dependency
2411     //
2412     // need to check for
2413     //
2414     //   LoadX[mo_acquire]
2415     //   {  |1   }
2416     //   {DecodeN}
2417     //      |Parms
2418     //   MemBarAcquire*
2419     //
2420     // where * tags node we were passed
2421     // and |k means input k
2422     if (x->is_DecodeNarrowPtr()) {
2423       x = x->in(1);
2424     }
2425 
2426     return (x->is_Load() && x->as_Load()->is_acquire());
2427   }
2428 
2429   // now check for an unsafe volatile get
2430 
2431   // need to check for
2432   //
2433   //   MemBarCPUOrder
2434   //        ||       \\
2435   //   MemBarAcquire* LoadX[mo_acquire]
2436   //        ||
2437   //   MemBarCPUOrder
2438   //
2439   // where * tags node we were passed
2440   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2441 
2442   // check for a parent MemBarCPUOrder
2443   ProjNode *ctl;
2444   ProjNode *mem;
2445   MemBarNode *parent = parent_membar(barrier);
2446   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2447     return false;
2448   ctl = parent->proj_out(TypeFunc::Control);
2449   mem = parent->proj_out(TypeFunc::Memory);
2450   if (!ctl || !mem) {
2451     return false;
2452   }
2453   // ensure the proj nodes both feed a LoadX[mo_acquire]
2454   LoadNode *ld = NULL;
2455   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2456     x = ctl->fast_out(i);
2457     // if we see a load we keep hold of it and stop searching
2458     if (x->is_Load()) {
2459       ld = x->as_Load();
2460       break;
2461     }
2462   }
2463   // it must be an acquiring load
2464   if (ld && ld->is_acquire()) {
2465 
2466     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2467       x = mem->fast_out(i);
2468       // if we see the same load we drop it and stop searching
2469       if (x == ld) {
2470         ld = NULL;
2471         break;
2472       }
2473     }
2474     // we must have dropped the load
2475     if (ld == NULL) {
2476       // check for a child cpuorder membar
2477       MemBarNode *child  = child_membar(barrier->as_MemBar());
2478       if (child && child->Opcode() == Op_MemBarCPUOrder)
2479         return true;
2480     }
2481   }
2482 
2483   // final option for unnecessary mebar is that it is a trailing node
2484   // belonging to a CAS
2485 
2486   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2487 
2488   return leading != NULL;
2489 }
2490 
2491 bool needs_acquiring_load(const Node *n)
2492 {
2493   assert(n->is_Load(), "expecting a load");
2494   if (UseBarriersForVolatile) {
2495     // we use a normal load and a dmb
2496     return false;
2497   }
2498 
2499   LoadNode *ld = n->as_Load();
2500 
2501   if (!ld->is_acquire()) {
2502     return false;
2503   }
2504 
2505   // check if this load is feeding an acquire membar
2506   //
2507   //   LoadX[mo_acquire]
2508   //   {  |1   }
2509   //   {DecodeN}
2510   //      |Parms
2511   //   MemBarAcquire*
2512   //
2513   // where * tags node we were passed
2514   // and |k means input k
2515 
2516   Node *start = ld;
2517   Node *mbacq = NULL;
2518 
2519   // if we hit a DecodeNarrowPtr we reset the start node and restart
2520   // the search through the outputs
2521  restart:
2522 
2523   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2524     Node *x = start->fast_out(i);
2525     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2526       mbacq = x;
2527     } else if (!mbacq &&
2528                (x->is_DecodeNarrowPtr() ||
2529                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2530       start = x;
2531       goto restart;
2532     }
2533   }
2534 
2535   if (mbacq) {
2536     return true;
2537   }
2538 
2539   // now check for an unsafe volatile get
2540 
2541   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2542   //
2543   //     MemBarCPUOrder
2544   //        ||       \\
2545   //   MemBarAcquire* LoadX[mo_acquire]
2546   //        ||
2547   //   MemBarCPUOrder
2548 
2549   MemBarNode *membar;
2550 
2551   membar = parent_membar(ld);
2552 
2553   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2554     return false;
2555   }
2556 
2557   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2558 
2559   membar = child_membar(membar);
2560 
2561   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2562     return false;
2563   }
2564 
2565   membar = child_membar(membar);
2566 
2567   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2568     return false;
2569   }
2570 
2571   return true;
2572 }
2573 
2574 bool unnecessary_release(const Node *n)
2575 {
2576   assert((n->is_MemBar() &&
2577           n->Opcode() == Op_MemBarRelease),
2578          "expecting a release membar");
2579 
2580   if (UseBarriersForVolatile) {
2581     // we need to plant a dmb
2582     return false;
2583   }
2584 
2585   // if there is a dependent CPUOrder barrier then use that as the
2586   // leading
2587 
2588   MemBarNode *barrier = n->as_MemBar();
2589   // check for an intervening cpuorder membar
2590   MemBarNode *b = child_membar(barrier);
2591   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2592     // ok, so start the check from the dependent cpuorder barrier
2593     barrier = b;
2594   }
2595 
2596   // must start with a normal feed
2597   MemBarNode *trailing = leading_to_trailing(barrier);
2598 
2599   return (trailing != NULL);
2600 }
2601 
2602 bool unnecessary_volatile(const Node *n)
2603 {
2604   // assert n->is_MemBar();
2605   if (UseBarriersForVolatile) {
2606     // we need to plant a dmb
2607     return false;
2608   }
2609 
2610   MemBarNode *mbvol = n->as_MemBar();
2611 
2612   // first we check if this is part of a card mark. if so then we have
2613   // to generate a StoreLoad barrier
2614 
2615   if (is_card_mark_membar(mbvol)) {
2616       return false;
2617   }
2618 
2619   // ok, if it's not a card mark then we still need to check if it is
2620   // a trailing membar of a volatile put graph.
2621 
2622   return (trailing_to_leading(mbvol) != NULL);
2623 }
2624 
2625 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2626 
2627 bool needs_releasing_store(const Node *n)
2628 {
2629   // assert n->is_Store();
2630   if (UseBarriersForVolatile) {
2631     // we use a normal store and dmb combination
2632     return false;
2633   }
2634 
2635   StoreNode *st = n->as_Store();
2636 
2637   // the store must be marked as releasing
2638   if (!st->is_release()) {
2639     return false;
2640   }
2641 
2642   // the store must be fed by a membar
2643 
2644   Node *x = st->lookup(StoreNode::Memory);
2645 
2646   if (! x || !x->is_Proj()) {
2647     return false;
2648   }
2649 
2650   ProjNode *proj = x->as_Proj();
2651 
2652   x = proj->lookup(0);
2653 
2654   if (!x || !x->is_MemBar()) {
2655     return false;
2656   }
2657 
2658   MemBarNode *barrier = x->as_MemBar();
2659 
2660   // if the barrier is a release membar or a cpuorder mmebar fed by a
2661   // release membar then we need to check whether that forms part of a
2662   // volatile put graph.
2663 
2664   // reject invalid candidates
2665   if (!leading_membar(barrier)) {
2666     return false;
2667   }
2668 
2669   // does this lead a normal subgraph?
2670   MemBarNode *trailing = leading_to_trailing(barrier);
2671 
2672   return (trailing != NULL);
2673 }
2674 
2675 // predicate controlling translation of CAS
2676 //
2677 // returns true if CAS needs to use an acquiring load otherwise false
2678 
2679 bool needs_acquiring_load_exclusive(const Node *n)
2680 {
2681   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2682   if (UseBarriersForVolatile) {
2683     return false;
2684   }
2685 
2686   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2687 #ifdef ASSERT
2688   LoadStoreNode *st = n->as_LoadStore();
2689 
2690   // the store must be fed by a membar
2691 
2692   Node *x = st->lookup(StoreNode::Memory);
2693 
2694   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2695 
2696   ProjNode *proj = x->as_Proj();
2697 
2698   x = proj->lookup(0);
2699 
2700   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2701 
2702   MemBarNode *barrier = x->as_MemBar();
2703 
2704   // the barrier must be a cpuorder mmebar fed by a release membar
2705 
2706   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2707          "CAS not fed by cpuorder membar!");
2708 
2709   MemBarNode *b = parent_membar(barrier);
2710   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2711           "CAS not fed by cpuorder+release membar pair!");
2712 
2713   // does this lead a normal subgraph?
2714   MemBarNode *mbar = leading_to_trailing(barrier);
2715 
2716   assert(mbar != NULL, "CAS not embedded in normal graph!");
2717 
2718   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2719 #endif // ASSERT
2720   // so we can just return true here
2721   return true;
2722 }
2723 
2724 // predicate controlling translation of StoreCM
2725 //
2726 // returns true if a StoreStore must precede the card write otherwise
2727 // false
2728 
2729 bool unnecessary_storestore(const Node *storecm)
2730 {
2731   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2732 
2733   // we only ever need to generate a dmb ishst between an object put
2734   // and the associated card mark when we are using CMS without
2735   // conditional card marking. Any other occurence will happen when
2736   // performing a card mark using CMS with conditional card marking or
2737   // G1. In those cases the preceding MamBarVolatile will be
2738   // translated to a dmb ish which guarantes visibility of the
2739   // preceding StoreN/P before this StoreCM
2740 
2741   if (!UseConcMarkSweepGC || UseCondCardMark) {
2742     return true;
2743   }
2744 
2745   // if we are implementing volatile puts using barriers then we must
2746   // insert the dmb ishst
2747 
2748   if (UseBarriersForVolatile) {
2749     return false;
2750   }
2751 
2752   // we must be using CMS with conditional card marking so we ahve to
2753   // generate the StoreStore
2754 
2755   return false;
2756 }
2757 
2758 
2759 #define __ _masm.
2760 
2761 // advance declarations for helper functions to convert register
2762 // indices to register objects
2763 
2764 // the ad file has to provide implementations of certain methods
2765 // expected by the generic code
2766 //
2767 // REQUIRED FUNCTIONALITY
2768 
2769 //=============================================================================
2770 
2771 // !!!!! Special hack to get all types of calls to specify the byte offset
2772 //       from the start of the call to the point where the return address
2773 //       will point.
2774 
2775 int MachCallStaticJavaNode::ret_addr_offset()
2776 {
2777   // call should be a simple bl
2778   int off = 4;
2779   return off;
2780 }
2781 
2782 int MachCallDynamicJavaNode::ret_addr_offset()
2783 {
2784   return 16; // movz, movk, movk, bl
2785 }
2786 
2787 int MachCallRuntimeNode::ret_addr_offset() {
2788   // for generated stubs the call will be
2789   //   far_call(addr)
2790   // for real runtime callouts it will be six instructions
2791   // see aarch64_enc_java_to_runtime
2792   //   adr(rscratch2, retaddr)
2793   //   lea(rscratch1, RuntimeAddress(addr)
2794   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2795   //   blrt rscratch1
2796   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2797   if (cb) {
2798     return MacroAssembler::far_branch_size();
2799   } else {
2800     return 6 * NativeInstruction::instruction_size;
2801   }
2802 }
2803 
2804 // Indicate if the safepoint node needs the polling page as an input
2805 
2806 // the shared code plants the oop data at the start of the generated
2807 // code for the safepoint node and that needs ot be at the load
2808 // instruction itself. so we cannot plant a mov of the safepoint poll
2809 // address followed by a load. setting this to true means the mov is
2810 // scheduled as a prior instruction. that's better for scheduling
2811 // anyway.
2812 
2813 bool SafePointNode::needs_polling_address_input()
2814 {
2815   return true;
2816 }
2817 
2818 //=============================================================================
2819 
2820 #ifndef PRODUCT
2821 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2822   st->print("BREAKPOINT");
2823 }
2824 #endif
2825 
2826 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2827   MacroAssembler _masm(&cbuf);
2828   __ brk(0);
2829 }
2830 
2831 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2832   return MachNode::size(ra_);
2833 }
2834 
2835 //=============================================================================
2836 
2837 #ifndef PRODUCT
2838   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2839     st->print("nop \t# %d bytes pad for loops and calls", _count);
2840   }
2841 #endif
2842 
2843   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2844     MacroAssembler _masm(&cbuf);
2845     for (int i = 0; i < _count; i++) {
2846       __ nop();
2847     }
2848   }
2849 
2850   uint MachNopNode::size(PhaseRegAlloc*) const {
2851     return _count * NativeInstruction::instruction_size;
2852   }
2853 
2854 //=============================================================================
2855 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2856 
2857 int Compile::ConstantTable::calculate_table_base_offset() const {
2858   return 0;  // absolute addressing, no offset
2859 }
2860 
2861 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2862 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2863   ShouldNotReachHere();
2864 }
2865 
2866 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2867   // Empty encoding
2868 }
2869 
2870 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2871   return 0;
2872 }
2873 
2874 #ifndef PRODUCT
2875 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2876   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2877 }
2878 #endif
2879 
2880 #ifndef PRODUCT
2881 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2882   Compile* C = ra_->C;
2883 
2884   int framesize = C->frame_slots() << LogBytesPerInt;
2885 
2886   if (C->need_stack_bang(framesize))
2887     st->print("# stack bang size=%d\n\t", framesize);
2888 
2889   if (framesize < ((1 << 9) + 2 * wordSize)) {
2890     st->print("sub  sp, sp, #%d\n\t", framesize);
2891     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2892     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2893   } else {
2894     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2895     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2896     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2897     st->print("sub  sp, sp, rscratch1");
2898   }
2899 }
2900 #endif
2901 
2902 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2903   Compile* C = ra_->C;
2904   MacroAssembler _masm(&cbuf);
2905 
2906   // n.b. frame size includes space for return pc and rfp
2907   const long framesize = C->frame_size_in_bytes();
2908   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2909 
2910   // insert a nop at the start of the prolog so we can patch in a
2911   // branch if we need to invalidate the method later
2912   __ nop();
2913 
2914   int bangsize = C->bang_size_in_bytes();
2915   if (C->need_stack_bang(bangsize) && UseStackBanging)
2916     __ generate_stack_overflow_check(bangsize);
2917 
2918   __ build_frame(framesize);
2919 
2920   if (NotifySimulator) {
2921     __ notify(Assembler::method_entry);
2922   }
2923 
2924   if (VerifyStackAtCalls) {
2925     Unimplemented();
2926   }
2927 
2928   C->set_frame_complete(cbuf.insts_size());
2929 
2930   if (C->has_mach_constant_base_node()) {
2931     // NOTE: We set the table base offset here because users might be
2932     // emitted before MachConstantBaseNode.
2933     Compile::ConstantTable& constant_table = C->constant_table();
2934     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2935   }
2936 }
2937 
2938 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2939 {
2940   return MachNode::size(ra_); // too many variables; just compute it
2941                               // the hard way
2942 }
2943 
2944 int MachPrologNode::reloc() const
2945 {
2946   return 0;
2947 }
2948 
2949 //=============================================================================
2950 
2951 #ifndef PRODUCT
2952 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2953   Compile* C = ra_->C;
2954   int framesize = C->frame_slots() << LogBytesPerInt;
2955 
2956   st->print("# pop frame %d\n\t",framesize);
2957 
2958   if (framesize == 0) {
2959     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2960   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2961     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2962     st->print("add  sp, sp, #%d\n\t", framesize);
2963   } else {
2964     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2965     st->print("add  sp, sp, rscratch1\n\t");
2966     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2967   }
2968 
2969   if (do_polling() && C->is_method_compilation()) {
2970     st->print("# touch polling page\n\t");
2971     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2972     st->print("ldr zr, [rscratch1]");
2973   }
2974 }
2975 #endif
2976 
2977 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2978   Compile* C = ra_->C;
2979   MacroAssembler _masm(&cbuf);
2980   int framesize = C->frame_slots() << LogBytesPerInt;
2981 
2982   __ remove_frame(framesize);
2983 
2984   if (NotifySimulator) {
2985     __ notify(Assembler::method_reentry);
2986   }
2987 
2988   if (do_polling() && C->is_method_compilation()) {
2989     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2990   }
2991 }
2992 
2993 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2994   // Variable size. Determine dynamically.
2995   return MachNode::size(ra_);
2996 }
2997 
2998 int MachEpilogNode::reloc() const {
2999   // Return number of relocatable values contained in this instruction.
3000   return 1; // 1 for polling page.
3001 }
3002 
3003 const Pipeline * MachEpilogNode::pipeline() const {
3004   return MachNode::pipeline_class();
3005 }
3006 
3007 // This method seems to be obsolete. It is declared in machnode.hpp
3008 // and defined in all *.ad files, but it is never called. Should we
3009 // get rid of it?
3010 int MachEpilogNode::safepoint_offset() const {
3011   assert(do_polling(), "no return for this epilog node");
3012   return 4;
3013 }
3014 
3015 //=============================================================================
3016 
3017 // Figure out which register class each belongs in: rc_int, rc_float or
3018 // rc_stack.
3019 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3020 
3021 static enum RC rc_class(OptoReg::Name reg) {
3022 
3023   if (reg == OptoReg::Bad) {
3024     return rc_bad;
3025   }
3026 
3027   // we have 30 int registers * 2 halves
3028   // (rscratch1 and rscratch2 are omitted)
3029 
3030   if (reg < 60) {
3031     return rc_int;
3032   }
3033 
3034   // we have 32 float register * 2 halves
3035   if (reg < 60 + 128) {
3036     return rc_float;
3037   }
3038 
3039   // Between float regs & stack is the flags regs.
3040   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3041 
3042   return rc_stack;
3043 }
3044 
3045 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3046   Compile* C = ra_->C;
3047 
3048   // Get registers to move.
3049   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3050   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3051   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3052   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3053 
3054   enum RC src_hi_rc = rc_class(src_hi);
3055   enum RC src_lo_rc = rc_class(src_lo);
3056   enum RC dst_hi_rc = rc_class(dst_hi);
3057   enum RC dst_lo_rc = rc_class(dst_lo);
3058 
3059   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3060 
3061   if (src_hi != OptoReg::Bad) {
3062     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3063            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3064            "expected aligned-adjacent pairs");
3065   }
3066 
3067   if (src_lo == dst_lo && src_hi == dst_hi) {
3068     return 0;            // Self copy, no move.
3069   }
3070 
3071   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3072               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3073   int src_offset = ra_->reg2offset(src_lo);
3074   int dst_offset = ra_->reg2offset(dst_lo);
3075 
3076   if (bottom_type()->isa_vect() != NULL) {
3077     uint ireg = ideal_reg();
3078     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3079     if (cbuf) {
3080       MacroAssembler _masm(cbuf);
3081       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3082       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3083         // stack->stack
3084         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3085         if (ireg == Op_VecD) {
3086           __ unspill(rscratch1, true, src_offset);
3087           __ spill(rscratch1, true, dst_offset);
3088         } else {
3089           __ spill_copy128(src_offset, dst_offset);
3090         }
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3092         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3093                ireg == Op_VecD ? __ T8B : __ T16B,
3094                as_FloatRegister(Matcher::_regEncode[src_lo]));
3095       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3096         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(dst_lo));
3099       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3100         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3101                        ireg == Op_VecD ? __ D : __ Q,
3102                        ra_->reg2offset(src_lo));
3103       } else {
3104         ShouldNotReachHere();
3105       }
3106     }
3107   } else if (cbuf) {
3108     MacroAssembler _masm(cbuf);
3109     switch (src_lo_rc) {
3110     case rc_int:
3111       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3112         if (is64) {
3113             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3114                    as_Register(Matcher::_regEncode[src_lo]));
3115         } else {
3116             MacroAssembler _masm(cbuf);
3117             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3118                     as_Register(Matcher::_regEncode[src_lo]));
3119         }
3120       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3121         if (is64) {
3122             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3123                      as_Register(Matcher::_regEncode[src_lo]));
3124         } else {
3125             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3126                      as_Register(Matcher::_regEncode[src_lo]));
3127         }
3128       } else {                    // gpr --> stack spill
3129         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3130         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3131       }
3132       break;
3133     case rc_float:
3134       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3135         if (is64) {
3136             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3137                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3138         } else {
3139             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3140                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3141         }
3142       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3143           if (cbuf) {
3144             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3145                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3146         } else {
3147             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3148                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3149         }
3150       } else {                    // fpr --> stack spill
3151         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3152         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3153                  is64 ? __ D : __ S, dst_offset);
3154       }
3155       break;
3156     case rc_stack:
3157       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3158         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3159       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3160         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3161                    is64 ? __ D : __ S, src_offset);
3162       } else {                    // stack --> stack copy
3163         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3164         __ unspill(rscratch1, is64, src_offset);
3165         __ spill(rscratch1, is64, dst_offset);
3166       }
3167       break;
3168     default:
3169       assert(false, "bad rc_class for spill");
3170       ShouldNotReachHere();
3171     }
3172   }
3173 
3174   if (st) {
3175     st->print("spill ");
3176     if (src_lo_rc == rc_stack) {
3177       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3178     } else {
3179       st->print("%s -> ", Matcher::regName[src_lo]);
3180     }
3181     if (dst_lo_rc == rc_stack) {
3182       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3183     } else {
3184       st->print("%s", Matcher::regName[dst_lo]);
3185     }
3186     if (bottom_type()->isa_vect() != NULL) {
3187       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3188     } else {
3189       st->print("\t# spill size = %d", is64 ? 64:32);
3190     }
3191   }
3192 
3193   return 0;
3194 
3195 }
3196 
3197 #ifndef PRODUCT
3198 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3199   if (!ra_)
3200     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3201   else
3202     implementation(NULL, ra_, false, st);
3203 }
3204 #endif
3205 
3206 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3207   implementation(&cbuf, ra_, false, NULL);
3208 }
3209 
3210 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3211   return MachNode::size(ra_);
3212 }
3213 
3214 //=============================================================================
3215 
3216 #ifndef PRODUCT
3217 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3218   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3219   int reg = ra_->get_reg_first(this);
3220   st->print("add %s, rsp, #%d]\t# box lock",
3221             Matcher::regName[reg], offset);
3222 }
3223 #endif
3224 
3225 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3226   MacroAssembler _masm(&cbuf);
3227 
3228   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3229   int reg    = ra_->get_encode(this);
3230 
3231   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3232     __ add(as_Register(reg), sp, offset);
3233   } else {
3234     ShouldNotReachHere();
3235   }
3236 }
3237 
3238 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3239   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3240   return 4;
3241 }
3242 
3243 //=============================================================================
3244 
3245 #ifndef PRODUCT
3246 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3247 {
3248   st->print_cr("# MachUEPNode");
3249   if (UseCompressedClassPointers) {
3250     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3251     if (Universe::narrow_klass_shift() != 0) {
3252       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3253     }
3254   } else {
3255    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3256   }
3257   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3258   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3259 }
3260 #endif
3261 
3262 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3263 {
3264   // This is the unverified entry point.
3265   MacroAssembler _masm(&cbuf);
3266 
3267   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3268   Label skip;
3269   // TODO
3270   // can we avoid this skip and still use a reloc?
3271   __ br(Assembler::EQ, skip);
3272   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3273   __ bind(skip);
3274 }
3275 
3276 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3277 {
3278   return MachNode::size(ra_);
3279 }
3280 
3281 // REQUIRED EMIT CODE
3282 
3283 //=============================================================================
3284 
3285 // Emit exception handler code.
3286 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3287 {
3288   // mov rscratch1 #exception_blob_entry_point
3289   // br rscratch1
3290   // Note that the code buffer's insts_mark is always relative to insts.
3291   // That's why we must use the macroassembler to generate a handler.
3292   MacroAssembler _masm(&cbuf);
3293   address base = __ start_a_stub(size_exception_handler());
3294   if (base == NULL) {
3295     ciEnv::current()->record_failure("CodeCache is full");
3296     return 0;  // CodeBuffer::expand failed
3297   }
3298   int offset = __ offset();
3299   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3300   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3301   __ end_a_stub();
3302   return offset;
3303 }
3304 
3305 // Emit deopt handler code.
3306 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3307 {
3308   // Note that the code buffer's insts_mark is always relative to insts.
3309   // That's why we must use the macroassembler to generate a handler.
3310   MacroAssembler _masm(&cbuf);
3311   address base = __ start_a_stub(size_deopt_handler());
3312   if (base == NULL) {
3313     ciEnv::current()->record_failure("CodeCache is full");
3314     return 0;  // CodeBuffer::expand failed
3315   }
3316   int offset = __ offset();
3317 
3318   __ adr(lr, __ pc());
3319   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3320 
3321   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3322   __ end_a_stub();
3323   return offset;
3324 }
3325 
3326 // REQUIRED MATCHER CODE
3327 
3328 //=============================================================================
3329 
3330 const bool Matcher::match_rule_supported(int opcode) {
3331 
3332   switch (opcode) {
3333   default:
3334     break;
3335   }
3336 
3337   if (!has_match_rule(opcode)) {
3338     return false;
3339   }
3340 
3341   return true;  // Per default match rules are supported.
3342 }
3343 
3344 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3345 
3346   // TODO
3347   // identify extra cases that we might want to provide match rules for
3348   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3349   bool ret_value = match_rule_supported(opcode);
3350   // Add rules here.
3351 
3352   return ret_value;  // Per default match rules are supported.
3353 }
3354 
3355 const bool Matcher::has_predicated_vectors(void) {
3356   return false;
3357 }
3358 
3359 const int Matcher::float_pressure(int default_pressure_threshold) {
3360   return default_pressure_threshold;
3361 }
3362 
3363 int Matcher::regnum_to_fpu_offset(int regnum)
3364 {
3365   Unimplemented();
3366   return 0;
3367 }
3368 
3369 // Is this branch offset short enough that a short branch can be used?
3370 //
3371 // NOTE: If the platform does not provide any short branch variants, then
3372 //       this method should return false for offset 0.
3373 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3374   // The passed offset is relative to address of the branch.
3375 
3376   return (-32768 <= offset && offset < 32768);
3377 }
3378 
3379 const bool Matcher::isSimpleConstant64(jlong value) {
3380   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3381   // Probably always true, even if a temp register is required.
3382   return true;
3383 }
3384 
3385 // true just means we have fast l2f conversion
3386 const bool Matcher::convL2FSupported(void) {
3387   return true;
3388 }
3389 
3390 // Vector width in bytes.
3391 const int Matcher::vector_width_in_bytes(BasicType bt) {
3392   int size = MIN2(16,(int)MaxVectorSize);
3393   // Minimum 2 values in vector
3394   if (size < 2*type2aelembytes(bt)) size = 0;
3395   // But never < 4
3396   if (size < 4) size = 0;
3397   return size;
3398 }
3399 
3400 // Limits on vector size (number of elements) loaded into vector.
3401 const int Matcher::max_vector_size(const BasicType bt) {
3402   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3403 }
3404 const int Matcher::min_vector_size(const BasicType bt) {
3405 //  For the moment limit the vector size to 8 bytes
3406     int size = 8 / type2aelembytes(bt);
3407     if (size < 2) size = 2;
3408     return size;
3409 }
3410 
3411 // Vector ideal reg.
3412 const int Matcher::vector_ideal_reg(int len) {
3413   switch(len) {
3414     case  8: return Op_VecD;
3415     case 16: return Op_VecX;
3416   }
3417   ShouldNotReachHere();
3418   return 0;
3419 }
3420 
3421 const int Matcher::vector_shift_count_ideal_reg(int size) {
3422   return Op_VecX;
3423 }
3424 
3425 // AES support not yet implemented
3426 const bool Matcher::pass_original_key_for_aes() {
3427   return false;
3428 }
3429 
3430 // x86 supports misaligned vectors store/load.
3431 const bool Matcher::misaligned_vectors_ok() {
3432   return !AlignVector; // can be changed by flag
3433 }
3434 
3435 // false => size gets scaled to BytesPerLong, ok.
3436 const bool Matcher::init_array_count_is_in_bytes = false;
3437 
3438 // Use conditional move (CMOVL)
3439 const int Matcher::long_cmove_cost() {
3440   // long cmoves are no more expensive than int cmoves
3441   return 0;
3442 }
3443 
3444 const int Matcher::float_cmove_cost() {
3445   // float cmoves are no more expensive than int cmoves
3446   return 0;
3447 }
3448 
3449 // Does the CPU require late expand (see block.cpp for description of late expand)?
3450 const bool Matcher::require_postalloc_expand = false;
3451 
3452 // Do we need to mask the count passed to shift instructions or does
3453 // the cpu only look at the lower 5/6 bits anyway?
3454 const bool Matcher::need_masked_shift_count = false;
3455 
3456 // This affects two different things:
3457 //  - how Decode nodes are matched
3458 //  - how ImplicitNullCheck opportunities are recognized
3459 // If true, the matcher will try to remove all Decodes and match them
3460 // (as operands) into nodes. NullChecks are not prepared to deal with
3461 // Decodes by final_graph_reshaping().
3462 // If false, final_graph_reshaping() forces the decode behind the Cmp
3463 // for a NullCheck. The matcher matches the Decode node into a register.
3464 // Implicit_null_check optimization moves the Decode along with the
3465 // memory operation back up before the NullCheck.
3466 bool Matcher::narrow_oop_use_complex_address() {
3467   return Universe::narrow_oop_shift() == 0;
3468 }
3469 
3470 bool Matcher::narrow_klass_use_complex_address() {
3471 // TODO
3472 // decide whether we need to set this to true
3473   return false;
3474 }
3475 
3476 // Is it better to copy float constants, or load them directly from
3477 // memory?  Intel can load a float constant from a direct address,
3478 // requiring no extra registers.  Most RISCs will have to materialize
3479 // an address into a register first, so they would do better to copy
3480 // the constant from stack.
3481 const bool Matcher::rematerialize_float_constants = false;
3482 
3483 // If CPU can load and store mis-aligned doubles directly then no
3484 // fixup is needed.  Else we split the double into 2 integer pieces
3485 // and move it piece-by-piece.  Only happens when passing doubles into
3486 // C code as the Java calling convention forces doubles to be aligned.
3487 const bool Matcher::misaligned_doubles_ok = true;
3488 
3489 // No-op on amd64
3490 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3491   Unimplemented();
3492 }
3493 
3494 // Advertise here if the CPU requires explicit rounding operations to
3495 // implement the UseStrictFP mode.
3496 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3497 
3498 // Are floats converted to double when stored to stack during
3499 // deoptimization?
3500 bool Matcher::float_in_double() { return true; }
3501 
3502 // Do ints take an entire long register or just half?
3503 // The relevant question is how the int is callee-saved:
3504 // the whole long is written but de-opt'ing will have to extract
3505 // the relevant 32 bits.
3506 const bool Matcher::int_in_long = true;
3507 
3508 // Return whether or not this register is ever used as an argument.
3509 // This function is used on startup to build the trampoline stubs in
3510 // generateOptoStub.  Registers not mentioned will be killed by the VM
3511 // call in the trampoline, and arguments in those registers not be
3512 // available to the callee.
3513 bool Matcher::can_be_java_arg(int reg)
3514 {
3515   return
3516     reg ==  R0_num || reg == R0_H_num ||
3517     reg ==  R1_num || reg == R1_H_num ||
3518     reg ==  R2_num || reg == R2_H_num ||
3519     reg ==  R3_num || reg == R3_H_num ||
3520     reg ==  R4_num || reg == R4_H_num ||
3521     reg ==  R5_num || reg == R5_H_num ||
3522     reg ==  R6_num || reg == R6_H_num ||
3523     reg ==  R7_num || reg == R7_H_num ||
3524     reg ==  V0_num || reg == V0_H_num ||
3525     reg ==  V1_num || reg == V1_H_num ||
3526     reg ==  V2_num || reg == V2_H_num ||
3527     reg ==  V3_num || reg == V3_H_num ||
3528     reg ==  V4_num || reg == V4_H_num ||
3529     reg ==  V5_num || reg == V5_H_num ||
3530     reg ==  V6_num || reg == V6_H_num ||
3531     reg ==  V7_num || reg == V7_H_num;
3532 }
3533 
3534 bool Matcher::is_spillable_arg(int reg)
3535 {
3536   return can_be_java_arg(reg);
3537 }
3538 
3539 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3540   return false;
3541 }
3542 
3543 RegMask Matcher::divI_proj_mask() {
3544   ShouldNotReachHere();
3545   return RegMask();
3546 }
3547 
3548 // Register for MODI projection of divmodI.
3549 RegMask Matcher::modI_proj_mask() {
3550   ShouldNotReachHere();
3551   return RegMask();
3552 }
3553 
3554 // Register for DIVL projection of divmodL.
3555 RegMask Matcher::divL_proj_mask() {
3556   ShouldNotReachHere();
3557   return RegMask();
3558 }
3559 
3560 // Register for MODL projection of divmodL.
3561 RegMask Matcher::modL_proj_mask() {
3562   ShouldNotReachHere();
3563   return RegMask();
3564 }
3565 
3566 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3567   return FP_REG_mask();
3568 }
3569 
3570 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3571   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3572     Node* u = addp->fast_out(i);
3573     if (u->is_Mem()) {
3574       int opsize = u->as_Mem()->memory_size();
3575       assert(opsize > 0, "unexpected memory operand size");
3576       if (u->as_Mem()->memory_size() != (1<<shift)) {
3577         return false;
3578       }
3579     }
3580   }
3581   return true;
3582 }
3583 
3584 const bool Matcher::convi2l_type_required = false;
3585 
3586 // Should the Matcher clone shifts on addressing modes, expecting them
3587 // to be subsumed into complex addressing expressions or compute them
3588 // into registers?
3589 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3590   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3591     return true;
3592   }
3593 
3594   Node *off = m->in(AddPNode::Offset);
3595   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3596       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3597       // Are there other uses besides address expressions?
3598       !is_visited(off)) {
3599     address_visited.set(off->_idx); // Flag as address_visited
3600     mstack.push(off->in(2), Visit);
3601     Node *conv = off->in(1);
3602     if (conv->Opcode() == Op_ConvI2L &&
3603         // Are there other uses besides address expressions?
3604         !is_visited(conv)) {
3605       address_visited.set(conv->_idx); // Flag as address_visited
3606       mstack.push(conv->in(1), Pre_Visit);
3607     } else {
3608       mstack.push(conv, Pre_Visit);
3609     }
3610     address_visited.test_set(m->_idx); // Flag as address_visited
3611     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3612     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3613     return true;
3614   } else if (off->Opcode() == Op_ConvI2L &&
3615              // Are there other uses besides address expressions?
3616              !is_visited(off)) {
3617     address_visited.test_set(m->_idx); // Flag as address_visited
3618     address_visited.set(off->_idx); // Flag as address_visited
3619     mstack.push(off->in(1), Pre_Visit);
3620     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3621     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3622     return true;
3623   }
3624   return false;
3625 }
3626 
3627 // Transform:
3628 // (AddP base (AddP base address (LShiftL index con)) offset)
3629 // into:
3630 // (AddP base (AddP base offset) (LShiftL index con))
3631 // to take full advantage of ARM's addressing modes
3632 void Compile::reshape_address(AddPNode* addp) {
3633   Node *addr = addp->in(AddPNode::Address);
3634   if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
3635     const AddPNode *addp2 = addr->as_AddP();
3636     if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
3637          addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
3638          size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
3639         addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
3640 
3641       // Any use that can't embed the address computation?
3642       for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3643         Node* u = addp->fast_out(i);
3644         if (!u->is_Mem() || u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
3645           return;
3646         }
3647       }
3648       
3649       Node* off = addp->in(AddPNode::Offset);
3650       Node* addr2 = addp2->in(AddPNode::Address);
3651       Node* base = addp->in(AddPNode::Base);
3652       
3653       Node* new_addr = NULL;
3654       // Check whether the graph already has the new AddP we need
3655       // before we create one (no GVN available here).
3656       for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
3657         Node* u = addr2->fast_out(i);
3658         if (u->is_AddP() &&
3659             u->in(AddPNode::Base) == base &&
3660             u->in(AddPNode::Address) == addr2 &&
3661             u->in(AddPNode::Offset) == off) {
3662           new_addr = u;
3663           break;
3664         }
3665       }
3666       
3667       if (new_addr == NULL) {
3668         new_addr = new AddPNode(base, addr2, off);
3669       }
3670       Node* new_off = addp2->in(AddPNode::Offset);
3671       addp->set_req(AddPNode::Address, new_addr);
3672       if (addr->outcnt() == 0) {
3673         addr->disconnect_inputs(NULL, this);
3674       }
3675       addp->set_req(AddPNode::Offset, new_off);
3676       if (off->outcnt() == 0) {
3677         off->disconnect_inputs(NULL, this);
3678       }
3679     }
3680   }
3681 }
3682 
3683 // helper for encoding java_to_runtime calls on sim
3684 //
3685 // this is needed to compute the extra arguments required when
3686 // planting a call to the simulator blrt instruction. the TypeFunc
3687 // can be queried to identify the counts for integral, and floating
3688 // arguments and the return type
3689 
3690 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3691 {
3692   int gps = 0;
3693   int fps = 0;
3694   const TypeTuple *domain = tf->domain();
3695   int max = domain->cnt();
3696   for (int i = TypeFunc::Parms; i < max; i++) {
3697     const Type *t = domain->field_at(i);
3698     switch(t->basic_type()) {
3699     case T_FLOAT:
3700     case T_DOUBLE:
3701       fps++;
3702     default:
3703       gps++;
3704     }
3705   }
3706   gpcnt = gps;
3707   fpcnt = fps;
3708   BasicType rt = tf->return_type();
3709   switch (rt) {
3710   case T_VOID:
3711     rtype = MacroAssembler::ret_type_void;
3712     break;
3713   default:
3714     rtype = MacroAssembler::ret_type_integral;
3715     break;
3716   case T_FLOAT:
3717     rtype = MacroAssembler::ret_type_float;
3718     break;
3719   case T_DOUBLE:
3720     rtype = MacroAssembler::ret_type_double;
3721     break;
3722   }
3723 }
3724 
3725 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3726   MacroAssembler _masm(&cbuf);                                          \
3727   {                                                                     \
3728     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3729     guarantee(DISP == 0, "mode not permitted for volatile");            \
3730     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3731     __ INSN(REG, as_Register(BASE));                                    \
3732   }
3733 
3734 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3735 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3736 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3737                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3738 
3739   // Used for all non-volatile memory accesses.  The use of
3740   // $mem->opcode() to discover whether this pattern uses sign-extended
3741   // offsets is something of a kludge.
3742   static void loadStore(MacroAssembler masm, mem_insn insn,
3743                          Register reg, int opcode,
3744                          Register base, int index, int size, int disp)
3745   {
3746     Address::extend scale;
3747 
3748     // Hooboy, this is fugly.  We need a way to communicate to the
3749     // encoder that the index needs to be sign extended, so we have to
3750     // enumerate all the cases.
3751     switch (opcode) {
3752     case INDINDEXSCALEDI2L:
3753     case INDINDEXSCALEDI2LN:
3754     case INDINDEXI2L:
3755     case INDINDEXI2LN:
3756       scale = Address::sxtw(size);
3757       break;
3758     default:
3759       scale = Address::lsl(size);
3760     }
3761 
3762     if (index == -1) {
3763       (masm.*insn)(reg, Address(base, disp));
3764     } else {
3765       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3766       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3767     }
3768   }
3769 
3770   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3771                          FloatRegister reg, int opcode,
3772                          Register base, int index, int size, int disp)
3773   {
3774     Address::extend scale;
3775 
3776     switch (opcode) {
3777     case INDINDEXSCALEDI2L:
3778     case INDINDEXSCALEDI2LN:
3779       scale = Address::sxtw(size);
3780       break;
3781     default:
3782       scale = Address::lsl(size);
3783     }
3784 
3785      if (index == -1) {
3786       (masm.*insn)(reg, Address(base, disp));
3787     } else {
3788       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3789       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3790     }
3791   }
3792 
3793   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3794                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3795                          int opcode, Register base, int index, int size, int disp)
3796   {
3797     if (index == -1) {
3798       (masm.*insn)(reg, T, Address(base, disp));
3799     } else {
3800       assert(disp == 0, "unsupported address mode");
3801       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3802     }
3803   }
3804 
3805 %}
3806 
3807 
3808 
3809 //----------ENCODING BLOCK-----------------------------------------------------
3810 // This block specifies the encoding classes used by the compiler to
3811 // output byte streams.  Encoding classes are parameterized macros
3812 // used by Machine Instruction Nodes in order to generate the bit
3813 // encoding of the instruction.  Operands specify their base encoding
3814 // interface with the interface keyword.  There are currently
3815 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3816 // COND_INTER.  REG_INTER causes an operand to generate a function
3817 // which returns its register number when queried.  CONST_INTER causes
3818 // an operand to generate a function which returns the value of the
3819 // constant when queried.  MEMORY_INTER causes an operand to generate
3820 // four functions which return the Base Register, the Index Register,
3821 // the Scale Value, and the Offset Value of the operand when queried.
3822 // COND_INTER causes an operand to generate six functions which return
3823 // the encoding code (ie - encoding bits for the instruction)
3824 // associated with each basic boolean condition for a conditional
3825 // instruction.
3826 //
3827 // Instructions specify two basic values for encoding.  Again, a
3828 // function is available to check if the constant displacement is an
3829 // oop. They use the ins_encode keyword to specify their encoding
3830 // classes (which must be a sequence of enc_class names, and their
3831 // parameters, specified in the encoding block), and they use the
3832 // opcode keyword to specify, in order, their primary, secondary, and
3833 // tertiary opcode.  Only the opcode sections which a particular
3834 // instruction needs for encoding need to be specified.
3835 encode %{
3836   // Build emit functions for each basic byte or larger field in the
3837   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3838   // from C++ code in the enc_class source block.  Emit functions will
3839   // live in the main source block for now.  In future, we can
3840   // generalize this by adding a syntax that specifies the sizes of
3841   // fields in an order, so that the adlc can build the emit functions
3842   // automagically
3843 
3844   // catch all for unimplemented encodings
3845   enc_class enc_unimplemented %{
3846     MacroAssembler _masm(&cbuf);
3847     __ unimplemented("C2 catch all");
3848   %}
3849 
3850   // BEGIN Non-volatile memory access
3851 
3852   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3853     Register dst_reg = as_Register($dst$$reg);
3854     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3855                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3856   %}
3857 
3858   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3859     Register dst_reg = as_Register($dst$$reg);
3860     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3862   %}
3863 
3864   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3865     Register dst_reg = as_Register($dst$$reg);
3866     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3867                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3868   %}
3869 
3870   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3871     Register dst_reg = as_Register($dst$$reg);
3872     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3874   %}
3875 
3876   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3877     Register dst_reg = as_Register($dst$$reg);
3878     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3879                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3880   %}
3881 
3882   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3883     Register dst_reg = as_Register($dst$$reg);
3884     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3886   %}
3887 
3888   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3889     Register dst_reg = as_Register($dst$$reg);
3890     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3891                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3892   %}
3893 
3894   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3895     Register dst_reg = as_Register($dst$$reg);
3896     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3897                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3898   %}
3899 
3900   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3901     Register dst_reg = as_Register($dst$$reg);
3902     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3903                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3904   %}
3905 
3906   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3907     Register dst_reg = as_Register($dst$$reg);
3908     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3910   %}
3911 
3912   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3913     Register dst_reg = as_Register($dst$$reg);
3914     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3915                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3916   %}
3917 
3918   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3919     Register dst_reg = as_Register($dst$$reg);
3920     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3921                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3922   %}
3923 
3924   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3925     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3926     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3927                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3928   %}
3929 
3930   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3931     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3932     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3934   %}
3935 
3936   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3937     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3938     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3939        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3940   %}
3941 
3942   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3943     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3944     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3945        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3946   %}
3947 
3948   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3949     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3950     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3951        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3952   %}
3953 
3954   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3955     Register src_reg = as_Register($src$$reg);
3956     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3958   %}
3959 
3960   enc_class aarch64_enc_strb0(memory mem) %{
3961     MacroAssembler _masm(&cbuf);
3962     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3963                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3964   %}
3965 
3966   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3967     MacroAssembler _masm(&cbuf);
3968     __ membar(Assembler::StoreStore);
3969     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3970                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3971   %}
3972 
3973   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3974     Register src_reg = as_Register($src$$reg);
3975     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3976                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3977   %}
3978 
3979   enc_class aarch64_enc_strh0(memory mem) %{
3980     MacroAssembler _masm(&cbuf);
3981     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3982                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3983   %}
3984 
3985   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3986     Register src_reg = as_Register($src$$reg);
3987     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_strw0(memory mem) %{
3992     MacroAssembler _masm(&cbuf);
3993     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3994                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3995   %}
3996 
3997   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3998     Register src_reg = as_Register($src$$reg);
3999     // we sometimes get asked to store the stack pointer into the
4000     // current thread -- we cannot do that directly on AArch64
4001     if (src_reg == r31_sp) {
4002       MacroAssembler _masm(&cbuf);
4003       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4004       __ mov(rscratch2, sp);
4005       src_reg = rscratch2;
4006     }
4007     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4008                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4009   %}
4010 
4011   enc_class aarch64_enc_str0(memory mem) %{
4012     MacroAssembler _masm(&cbuf);
4013     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4014                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4015   %}
4016 
4017   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4018     FloatRegister src_reg = as_FloatRegister($src$$reg);
4019     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4020                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4021   %}
4022 
4023   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4024     FloatRegister src_reg = as_FloatRegister($src$$reg);
4025     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4026                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4027   %}
4028 
4029   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4030     FloatRegister src_reg = as_FloatRegister($src$$reg);
4031     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4032        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4033   %}
4034 
4035   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4036     FloatRegister src_reg = as_FloatRegister($src$$reg);
4037     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4038        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4039   %}
4040 
4041   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4042     FloatRegister src_reg = as_FloatRegister($src$$reg);
4043     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4044        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4045   %}
4046 
4047   // END Non-volatile memory access
4048 
4049   // volatile loads and stores
4050 
4051   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4052     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4053                  rscratch1, stlrb);
4054   %}
4055 
4056   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4057     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4058                  rscratch1, stlrh);
4059   %}
4060 
4061   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4062     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4063                  rscratch1, stlrw);
4064   %}
4065 
4066 
4067   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4068     Register dst_reg = as_Register($dst$$reg);
4069     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4070              rscratch1, ldarb);
4071     __ sxtbw(dst_reg, dst_reg);
4072   %}
4073 
4074   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4075     Register dst_reg = as_Register($dst$$reg);
4076     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4077              rscratch1, ldarb);
4078     __ sxtb(dst_reg, dst_reg);
4079   %}
4080 
4081   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4082     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4083              rscratch1, ldarb);
4084   %}
4085 
4086   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4087     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4088              rscratch1, ldarb);
4089   %}
4090 
4091   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4092     Register dst_reg = as_Register($dst$$reg);
4093     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4094              rscratch1, ldarh);
4095     __ sxthw(dst_reg, dst_reg);
4096   %}
4097 
4098   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4099     Register dst_reg = as_Register($dst$$reg);
4100     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4101              rscratch1, ldarh);
4102     __ sxth(dst_reg, dst_reg);
4103   %}
4104 
4105   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4106     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4107              rscratch1, ldarh);
4108   %}
4109 
4110   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4111     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4112              rscratch1, ldarh);
4113   %}
4114 
4115   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4116     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4117              rscratch1, ldarw);
4118   %}
4119 
4120   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4121     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4122              rscratch1, ldarw);
4123   %}
4124 
4125   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4126     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4127              rscratch1, ldar);
4128   %}
4129 
4130   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4131     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4132              rscratch1, ldarw);
4133     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4134   %}
4135 
4136   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4137     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4138              rscratch1, ldar);
4139     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4140   %}
4141 
4142   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4143     Register src_reg = as_Register($src$$reg);
4144     // we sometimes get asked to store the stack pointer into the
4145     // current thread -- we cannot do that directly on AArch64
4146     if (src_reg == r31_sp) {
4147         MacroAssembler _masm(&cbuf);
4148       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4149       __ mov(rscratch2, sp);
4150       src_reg = rscratch2;
4151     }
4152     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4153                  rscratch1, stlr);
4154   %}
4155 
4156   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4157     {
4158       MacroAssembler _masm(&cbuf);
4159       FloatRegister src_reg = as_FloatRegister($src$$reg);
4160       __ fmovs(rscratch2, src_reg);
4161     }
4162     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4163                  rscratch1, stlrw);
4164   %}
4165 
4166   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4167     {
4168       MacroAssembler _masm(&cbuf);
4169       FloatRegister src_reg = as_FloatRegister($src$$reg);
4170       __ fmovd(rscratch2, src_reg);
4171     }
4172     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4173                  rscratch1, stlr);
4174   %}
4175 
4176   // synchronized read/update encodings
4177 
4178   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4179     MacroAssembler _masm(&cbuf);
4180     Register dst_reg = as_Register($dst$$reg);
4181     Register base = as_Register($mem$$base);
4182     int index = $mem$$index;
4183     int scale = $mem$$scale;
4184     int disp = $mem$$disp;
4185     if (index == -1) {
4186        if (disp != 0) {
4187         __ lea(rscratch1, Address(base, disp));
4188         __ ldaxr(dst_reg, rscratch1);
4189       } else {
4190         // TODO
4191         // should we ever get anything other than this case?
4192         __ ldaxr(dst_reg, base);
4193       }
4194     } else {
4195       Register index_reg = as_Register(index);
4196       if (disp == 0) {
4197         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4198         __ ldaxr(dst_reg, rscratch1);
4199       } else {
4200         __ lea(rscratch1, Address(base, disp));
4201         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4202         __ ldaxr(dst_reg, rscratch1);
4203       }
4204     }
4205   %}
4206 
4207   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4208     MacroAssembler _masm(&cbuf);
4209     Register src_reg = as_Register($src$$reg);
4210     Register base = as_Register($mem$$base);
4211     int index = $mem$$index;
4212     int scale = $mem$$scale;
4213     int disp = $mem$$disp;
4214     if (index == -1) {
4215        if (disp != 0) {
4216         __ lea(rscratch2, Address(base, disp));
4217         __ stlxr(rscratch1, src_reg, rscratch2);
4218       } else {
4219         // TODO
4220         // should we ever get anything other than this case?
4221         __ stlxr(rscratch1, src_reg, base);
4222       }
4223     } else {
4224       Register index_reg = as_Register(index);
4225       if (disp == 0) {
4226         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4227         __ stlxr(rscratch1, src_reg, rscratch2);
4228       } else {
4229         __ lea(rscratch2, Address(base, disp));
4230         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4231         __ stlxr(rscratch1, src_reg, rscratch2);
4232       }
4233     }
4234     __ cmpw(rscratch1, zr);
4235   %}
4236 
4237   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4238     MacroAssembler _masm(&cbuf);
4239     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4240     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4241                Assembler::xword, /*acquire*/ false, /*release*/ true);
4242   %}
4243 
4244   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4245     MacroAssembler _masm(&cbuf);
4246     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4247     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4248                Assembler::word, /*acquire*/ false, /*release*/ true);
4249   %}
4250 
4251 
4252   // The only difference between aarch64_enc_cmpxchg and
4253   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4254   // CompareAndSwap sequence to serve as a barrier on acquiring a
4255   // lock.
4256   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4257     MacroAssembler _masm(&cbuf);
4258     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4259     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4260                Assembler::xword, /*acquire*/ true, /*release*/ true);
4261   %}
4262 
4263   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4264     MacroAssembler _masm(&cbuf);
4265     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4266     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4267                Assembler::word, /*acquire*/ true, /*release*/ true);
4268   %}
4269 
4270 
4271   // auxiliary used for CompareAndSwapX to set result register
4272   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4273     MacroAssembler _masm(&cbuf);
4274     Register res_reg = as_Register($res$$reg);
4275     __ cset(res_reg, Assembler::EQ);
4276   %}
4277 
4278   // prefetch encodings
4279 
4280   enc_class aarch64_enc_prefetchw(memory mem) %{
4281     MacroAssembler _masm(&cbuf);
4282     Register base = as_Register($mem$$base);
4283     int index = $mem$$index;
4284     int scale = $mem$$scale;
4285     int disp = $mem$$disp;
4286     if (index == -1) {
4287       __ prfm(Address(base, disp), PSTL1KEEP);
4288     } else {
4289       Register index_reg = as_Register(index);
4290       if (disp == 0) {
4291         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4292       } else {
4293         __ lea(rscratch1, Address(base, disp));
4294         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4295       }
4296     }
4297   %}
4298 
4299   /// mov envcodings
4300 
4301   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4302     MacroAssembler _masm(&cbuf);
4303     u_int32_t con = (u_int32_t)$src$$constant;
4304     Register dst_reg = as_Register($dst$$reg);
4305     if (con == 0) {
4306       __ movw(dst_reg, zr);
4307     } else {
4308       __ movw(dst_reg, con);
4309     }
4310   %}
4311 
4312   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4313     MacroAssembler _masm(&cbuf);
4314     Register dst_reg = as_Register($dst$$reg);
4315     u_int64_t con = (u_int64_t)$src$$constant;
4316     if (con == 0) {
4317       __ mov(dst_reg, zr);
4318     } else {
4319       __ mov(dst_reg, con);
4320     }
4321   %}
4322 
4323   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4324     MacroAssembler _masm(&cbuf);
4325     Register dst_reg = as_Register($dst$$reg);
4326     address con = (address)$src$$constant;
4327     if (con == NULL || con == (address)1) {
4328       ShouldNotReachHere();
4329     } else {
4330       relocInfo::relocType rtype = $src->constant_reloc();
4331       if (rtype == relocInfo::oop_type) {
4332         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4333       } else if (rtype == relocInfo::metadata_type) {
4334         __ mov_metadata(dst_reg, (Metadata*)con);
4335       } else {
4336         assert(rtype == relocInfo::none, "unexpected reloc type");
4337         if (con < (address)(uintptr_t)os::vm_page_size()) {
4338           __ mov(dst_reg, con);
4339         } else {
4340           unsigned long offset;
4341           __ adrp(dst_reg, con, offset);
4342           __ add(dst_reg, dst_reg, offset);
4343         }
4344       }
4345     }
4346   %}
4347 
4348   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4349     MacroAssembler _masm(&cbuf);
4350     Register dst_reg = as_Register($dst$$reg);
4351     __ mov(dst_reg, zr);
4352   %}
4353 
4354   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4355     MacroAssembler _masm(&cbuf);
4356     Register dst_reg = as_Register($dst$$reg);
4357     __ mov(dst_reg, (u_int64_t)1);
4358   %}
4359 
4360   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4361     MacroAssembler _masm(&cbuf);
4362     address page = (address)$src$$constant;
4363     Register dst_reg = as_Register($dst$$reg);
4364     unsigned long off;
4365     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4366     assert(off == 0, "assumed offset == 0");
4367   %}
4368 
4369   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4370     MacroAssembler _masm(&cbuf);
4371     __ load_byte_map_base($dst$$Register);
4372   %}
4373 
4374   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4375     MacroAssembler _masm(&cbuf);
4376     Register dst_reg = as_Register($dst$$reg);
4377     address con = (address)$src$$constant;
4378     if (con == NULL) {
4379       ShouldNotReachHere();
4380     } else {
4381       relocInfo::relocType rtype = $src->constant_reloc();
4382       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4383       __ set_narrow_oop(dst_reg, (jobject)con);
4384     }
4385   %}
4386 
4387   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4388     MacroAssembler _masm(&cbuf);
4389     Register dst_reg = as_Register($dst$$reg);
4390     __ mov(dst_reg, zr);
4391   %}
4392 
4393   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4394     MacroAssembler _masm(&cbuf);
4395     Register dst_reg = as_Register($dst$$reg);
4396     address con = (address)$src$$constant;
4397     if (con == NULL) {
4398       ShouldNotReachHere();
4399     } else {
4400       relocInfo::relocType rtype = $src->constant_reloc();
4401       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4402       __ set_narrow_klass(dst_reg, (Klass *)con);
4403     }
4404   %}
4405 
4406   // arithmetic encodings
4407 
4408   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4409     MacroAssembler _masm(&cbuf);
4410     Register dst_reg = as_Register($dst$$reg);
4411     Register src_reg = as_Register($src1$$reg);
4412     int32_t con = (int32_t)$src2$$constant;
4413     // add has primary == 0, subtract has primary == 1
4414     if ($primary) { con = -con; }
4415     if (con < 0) {
4416       __ subw(dst_reg, src_reg, -con);
4417     } else {
4418       __ addw(dst_reg, src_reg, con);
4419     }
4420   %}
4421 
4422   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4423     MacroAssembler _masm(&cbuf);
4424     Register dst_reg = as_Register($dst$$reg);
4425     Register src_reg = as_Register($src1$$reg);
4426     int32_t con = (int32_t)$src2$$constant;
4427     // add has primary == 0, subtract has primary == 1
4428     if ($primary) { con = -con; }
4429     if (con < 0) {
4430       __ sub(dst_reg, src_reg, -con);
4431     } else {
4432       __ add(dst_reg, src_reg, con);
4433     }
4434   %}
4435 
4436   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4437     MacroAssembler _masm(&cbuf);
4438    Register dst_reg = as_Register($dst$$reg);
4439    Register src1_reg = as_Register($src1$$reg);
4440    Register src2_reg = as_Register($src2$$reg);
4441     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4442   %}
4443 
4444   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4445     MacroAssembler _masm(&cbuf);
4446    Register dst_reg = as_Register($dst$$reg);
4447    Register src1_reg = as_Register($src1$$reg);
4448    Register src2_reg = as_Register($src2$$reg);
4449     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4450   %}
4451 
4452   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4453     MacroAssembler _masm(&cbuf);
4454    Register dst_reg = as_Register($dst$$reg);
4455    Register src1_reg = as_Register($src1$$reg);
4456    Register src2_reg = as_Register($src2$$reg);
4457     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4458   %}
4459 
4460   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4461     MacroAssembler _masm(&cbuf);
4462    Register dst_reg = as_Register($dst$$reg);
4463    Register src1_reg = as_Register($src1$$reg);
4464    Register src2_reg = as_Register($src2$$reg);
4465     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4466   %}
4467 
4468   // compare instruction encodings
4469 
4470   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4471     MacroAssembler _masm(&cbuf);
4472     Register reg1 = as_Register($src1$$reg);
4473     Register reg2 = as_Register($src2$$reg);
4474     __ cmpw(reg1, reg2);
4475   %}
4476 
4477   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4478     MacroAssembler _masm(&cbuf);
4479     Register reg = as_Register($src1$$reg);
4480     int32_t val = $src2$$constant;
4481     if (val >= 0) {
4482       __ subsw(zr, reg, val);
4483     } else {
4484       __ addsw(zr, reg, -val);
4485     }
4486   %}
4487 
4488   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4489     MacroAssembler _masm(&cbuf);
4490     Register reg1 = as_Register($src1$$reg);
4491     u_int32_t val = (u_int32_t)$src2$$constant;
4492     __ movw(rscratch1, val);
4493     __ cmpw(reg1, rscratch1);
4494   %}
4495 
4496   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4497     MacroAssembler _masm(&cbuf);
4498     Register reg1 = as_Register($src1$$reg);
4499     Register reg2 = as_Register($src2$$reg);
4500     __ cmp(reg1, reg2);
4501   %}
4502 
4503   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4504     MacroAssembler _masm(&cbuf);
4505     Register reg = as_Register($src1$$reg);
4506     int64_t val = $src2$$constant;
4507     if (val >= 0) {
4508       __ subs(zr, reg, val);
4509     } else if (val != -val) {
4510       __ adds(zr, reg, -val);
4511     } else {
4512     // aargh, Long.MIN_VALUE is a special case
4513       __ orr(rscratch1, zr, (u_int64_t)val);
4514       __ subs(zr, reg, rscratch1);
4515     }
4516   %}
4517 
4518   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4519     MacroAssembler _masm(&cbuf);
4520     Register reg1 = as_Register($src1$$reg);
4521     u_int64_t val = (u_int64_t)$src2$$constant;
4522     __ mov(rscratch1, val);
4523     __ cmp(reg1, rscratch1);
4524   %}
4525 
4526   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4527     MacroAssembler _masm(&cbuf);
4528     Register reg1 = as_Register($src1$$reg);
4529     Register reg2 = as_Register($src2$$reg);
4530     __ cmp(reg1, reg2);
4531   %}
4532 
4533   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4534     MacroAssembler _masm(&cbuf);
4535     Register reg1 = as_Register($src1$$reg);
4536     Register reg2 = as_Register($src2$$reg);
4537     __ cmpw(reg1, reg2);
4538   %}
4539 
4540   enc_class aarch64_enc_testp(iRegP src) %{
4541     MacroAssembler _masm(&cbuf);
4542     Register reg = as_Register($src$$reg);
4543     __ cmp(reg, zr);
4544   %}
4545 
4546   enc_class aarch64_enc_testn(iRegN src) %{
4547     MacroAssembler _masm(&cbuf);
4548     Register reg = as_Register($src$$reg);
4549     __ cmpw(reg, zr);
4550   %}
4551 
4552   enc_class aarch64_enc_b(label lbl) %{
4553     MacroAssembler _masm(&cbuf);
4554     Label *L = $lbl$$label;
4555     __ b(*L);
4556   %}
4557 
4558   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4559     MacroAssembler _masm(&cbuf);
4560     Label *L = $lbl$$label;
4561     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4562   %}
4563 
4564   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4565     MacroAssembler _masm(&cbuf);
4566     Label *L = $lbl$$label;
4567     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4568   %}
4569 
4570   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4571   %{
4572      Register sub_reg = as_Register($sub$$reg);
4573      Register super_reg = as_Register($super$$reg);
4574      Register temp_reg = as_Register($temp$$reg);
4575      Register result_reg = as_Register($result$$reg);
4576 
4577      Label miss;
4578      MacroAssembler _masm(&cbuf);
4579      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4580                                      NULL, &miss,
4581                                      /*set_cond_codes:*/ true);
4582      if ($primary) {
4583        __ mov(result_reg, zr);
4584      }
4585      __ bind(miss);
4586   %}
4587 
4588   enc_class aarch64_enc_java_static_call(method meth) %{
4589     MacroAssembler _masm(&cbuf);
4590 
4591     address addr = (address)$meth$$method;
4592     address call;
4593     if (!_method) {
4594       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4595       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4596     } else {
4597       int method_index = resolved_method_index(cbuf);
4598       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4599                                                   : static_call_Relocation::spec(method_index);
4600       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4601 
4602       // Emit stub for static call
4603       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4604       if (stub == NULL) {
4605         ciEnv::current()->record_failure("CodeCache is full");
4606         return;
4607       }
4608     }
4609     if (call == NULL) {
4610       ciEnv::current()->record_failure("CodeCache is full");
4611       return;
4612     }
4613   %}
4614 
4615   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4616     MacroAssembler _masm(&cbuf);
4617     int method_index = resolved_method_index(cbuf);
4618     address call = __ ic_call((address)$meth$$method, method_index);
4619     if (call == NULL) {
4620       ciEnv::current()->record_failure("CodeCache is full");
4621       return;
4622     }
4623   %}
4624 
4625   enc_class aarch64_enc_call_epilog() %{
4626     MacroAssembler _masm(&cbuf);
4627     if (VerifyStackAtCalls) {
4628       // Check that stack depth is unchanged: find majik cookie on stack
4629       __ call_Unimplemented();
4630     }
4631   %}
4632 
4633   enc_class aarch64_enc_java_to_runtime(method meth) %{
4634     MacroAssembler _masm(&cbuf);
4635 
4636     // some calls to generated routines (arraycopy code) are scheduled
4637     // by C2 as runtime calls. if so we can call them using a br (they
4638     // will be in a reachable segment) otherwise we have to use a blrt
4639     // which loads the absolute address into a register.
4640     address entry = (address)$meth$$method;
4641     CodeBlob *cb = CodeCache::find_blob(entry);
4642     if (cb) {
4643       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4644       if (call == NULL) {
4645         ciEnv::current()->record_failure("CodeCache is full");
4646         return;
4647       }
4648     } else {
4649       int gpcnt;
4650       int fpcnt;
4651       int rtype;
4652       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4653       Label retaddr;
4654       __ adr(rscratch2, retaddr);
4655       __ lea(rscratch1, RuntimeAddress(entry));
4656       // Leave a breadcrumb for JavaThread::pd_last_frame().
4657       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4658       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4659       __ bind(retaddr);
4660       __ add(sp, sp, 2 * wordSize);
4661     }
4662   %}
4663 
4664   enc_class aarch64_enc_rethrow() %{
4665     MacroAssembler _masm(&cbuf);
4666     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4667   %}
4668 
4669   enc_class aarch64_enc_ret() %{
4670     MacroAssembler _masm(&cbuf);
4671     __ ret(lr);
4672   %}
4673 
4674   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4675     MacroAssembler _masm(&cbuf);
4676     Register target_reg = as_Register($jump_target$$reg);
4677     __ br(target_reg);
4678   %}
4679 
4680   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4681     MacroAssembler _masm(&cbuf);
4682     Register target_reg = as_Register($jump_target$$reg);
4683     // exception oop should be in r0
4684     // ret addr has been popped into lr
4685     // callee expects it in r3
4686     __ mov(r3, lr);
4687     __ br(target_reg);
4688   %}
4689 
4690   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4691     MacroAssembler _masm(&cbuf);
4692     Register oop = as_Register($object$$reg);
4693     Register box = as_Register($box$$reg);
4694     Register disp_hdr = as_Register($tmp$$reg);
4695     Register tmp = as_Register($tmp2$$reg);
4696     Label cont;
4697     Label object_has_monitor;
4698     Label cas_failed;
4699 
4700     assert_different_registers(oop, box, tmp, disp_hdr);
4701 
4702     // Load markOop from object into displaced_header.
4703     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4704 
4705     // Always do locking in runtime.
4706     if (EmitSync & 0x01) {
4707       __ cmp(oop, zr);
4708       return;
4709     }
4710 
4711     if (UseBiasedLocking && !UseOptoBiasInlining) {
4712       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4713     }
4714 
4715     // Handle existing monitor
4716     if ((EmitSync & 0x02) == 0) {
4717       // we can use AArch64's bit test and branch here but
4718       // markoopDesc does not define a bit index just the bit value
4719       // so assert in case the bit pos changes
4720 #     define __monitor_value_log2 1
4721       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4722       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4723 #     undef __monitor_value_log2
4724     }
4725 
4726     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4727     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4728 
4729     // Load Compare Value application register.
4730 
4731     // Initialize the box. (Must happen before we update the object mark!)
4732     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4733 
4734     // Compare object markOop with mark and if equal exchange scratch1
4735     // with object markOop.
4736     if (UseLSE) {
4737       __ mov(tmp, disp_hdr);
4738       __ casal(Assembler::xword, tmp, box, oop);
4739       __ cmp(tmp, disp_hdr);
4740       __ br(Assembler::EQ, cont);
4741     } else {
4742       Label retry_load;
4743       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4744         __ prfm(Address(oop), PSTL1STRM);
4745       __ bind(retry_load);
4746       __ ldaxr(tmp, oop);
4747       __ cmp(tmp, disp_hdr);
4748       __ br(Assembler::NE, cas_failed);
4749       // use stlxr to ensure update is immediately visible
4750       __ stlxr(tmp, box, oop);
4751       __ cbzw(tmp, cont);
4752       __ b(retry_load);
4753     }
4754 
4755     // Formerly:
4756     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4757     //               /*newv=*/box,
4758     //               /*addr=*/oop,
4759     //               /*tmp=*/tmp,
4760     //               cont,
4761     //               /*fail*/NULL);
4762 
4763     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4764 
4765     // If the compare-and-exchange succeeded, then we found an unlocked
4766     // object, will have now locked it will continue at label cont
4767 
4768     __ bind(cas_failed);
4769     // We did not see an unlocked object so try the fast recursive case.
4770 
4771     // Check if the owner is self by comparing the value in the
4772     // markOop of object (disp_hdr) with the stack pointer.
4773     __ mov(rscratch1, sp);
4774     __ sub(disp_hdr, disp_hdr, rscratch1);
4775     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4776     // If condition is true we are cont and hence we can store 0 as the
4777     // displaced header in the box, which indicates that it is a recursive lock.
4778     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4779     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4780 
4781     // Handle existing monitor.
4782     if ((EmitSync & 0x02) == 0) {
4783       __ b(cont);
4784 
4785       __ bind(object_has_monitor);
4786       // The object's monitor m is unlocked iff m->owner == NULL,
4787       // otherwise m->owner may contain a thread or a stack address.
4788       //
4789       // Try to CAS m->owner from NULL to current thread.
4790       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4791       __ mov(disp_hdr, zr);
4792 
4793       if (UseLSE) {
4794         __ mov(rscratch1, disp_hdr);
4795         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4796         __ cmp(rscratch1, disp_hdr);
4797       } else {
4798         Label retry_load, fail;
4799         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4800           __ prfm(Address(tmp), PSTL1STRM);
4801         __ bind(retry_load);
4802         __ ldaxr(rscratch1, tmp);
4803         __ cmp(disp_hdr, rscratch1);
4804         __ br(Assembler::NE, fail);
4805         // use stlxr to ensure update is immediately visible
4806         __ stlxr(rscratch1, rthread, tmp);
4807         __ cbnzw(rscratch1, retry_load);
4808         __ bind(fail);
4809       }
4810 
4811       // Label next;
4812       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4813       //               /*newv=*/rthread,
4814       //               /*addr=*/tmp,
4815       //               /*tmp=*/rscratch1,
4816       //               /*succeed*/next,
4817       //               /*fail*/NULL);
4818       // __ bind(next);
4819 
4820       // store a non-null value into the box.
4821       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4822 
4823       // PPC port checks the following invariants
4824       // #ifdef ASSERT
4825       // bne(flag, cont);
4826       // We have acquired the monitor, check some invariants.
4827       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4828       // Invariant 1: _recursions should be 0.
4829       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4830       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4831       //                        "monitor->_recursions should be 0", -1);
4832       // Invariant 2: OwnerIsThread shouldn't be 0.
4833       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4834       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4835       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4836       // #endif
4837     }
4838 
4839     __ bind(cont);
4840     // flag == EQ indicates success
4841     // flag == NE indicates failure
4842 
4843   %}
4844 
4845   // TODO
4846   // reimplement this with custom cmpxchgptr code
4847   // which avoids some of the unnecessary branching
4848   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4849     MacroAssembler _masm(&cbuf);
4850     Register oop = as_Register($object$$reg);
4851     Register box = as_Register($box$$reg);
4852     Register disp_hdr = as_Register($tmp$$reg);
4853     Register tmp = as_Register($tmp2$$reg);
4854     Label cont;
4855     Label object_has_monitor;
4856     Label cas_failed;
4857 
4858     assert_different_registers(oop, box, tmp, disp_hdr);
4859 
4860     // Always do locking in runtime.
4861     if (EmitSync & 0x01) {
4862       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4863       return;
4864     }
4865 
4866     if (UseBiasedLocking && !UseOptoBiasInlining) {
4867       __ biased_locking_exit(oop, tmp, cont);
4868     }
4869 
4870     // Find the lock address and load the displaced header from the stack.
4871     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4872 
4873     // If the displaced header is 0, we have a recursive unlock.
4874     __ cmp(disp_hdr, zr);
4875     __ br(Assembler::EQ, cont);
4876 
4877 
4878     // Handle existing monitor.
4879     if ((EmitSync & 0x02) == 0) {
4880       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4881       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4882     }
4883 
4884     // Check if it is still a light weight lock, this is is true if we
4885     // see the stack address of the basicLock in the markOop of the
4886     // object.
4887 
4888       if (UseLSE) {
4889         __ mov(tmp, box);
4890         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4891         __ cmp(tmp, box);
4892       } else {
4893         Label retry_load;
4894         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4895           __ prfm(Address(oop), PSTL1STRM);
4896         __ bind(retry_load);
4897         __ ldxr(tmp, oop);
4898         __ cmp(box, tmp);
4899         __ br(Assembler::NE, cas_failed);
4900         // use stlxr to ensure update is immediately visible
4901         __ stlxr(tmp, disp_hdr, oop);
4902         __ cbzw(tmp, cont);
4903         __ b(retry_load);
4904       }
4905 
4906     // __ cmpxchgptr(/*compare_value=*/box,
4907     //               /*exchange_value=*/disp_hdr,
4908     //               /*where=*/oop,
4909     //               /*result=*/tmp,
4910     //               cont,
4911     //               /*cas_failed*/NULL);
4912     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4913 
4914     __ bind(cas_failed);
4915 
4916     // Handle existing monitor.
4917     if ((EmitSync & 0x02) == 0) {
4918       __ b(cont);
4919 
4920       __ bind(object_has_monitor);
4921       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4922       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4923       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4924       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4925       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4926       __ cmp(rscratch1, zr);
4927       __ br(Assembler::NE, cont);
4928 
4929       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4930       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4931       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4932       __ cmp(rscratch1, zr);
4933       __ cbnz(rscratch1, cont);
4934       // need a release store here
4935       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4936       __ stlr(rscratch1, tmp); // rscratch1 is zero
4937     }
4938 
4939     __ bind(cont);
4940     // flag == EQ indicates success
4941     // flag == NE indicates failure
4942   %}
4943 
4944 %}
4945 
4946 //----------FRAME--------------------------------------------------------------
4947 // Definition of frame structure and management information.
4948 //
4949 //  S T A C K   L A Y O U T    Allocators stack-slot number
4950 //                             |   (to get allocators register number
4951 //  G  Owned by    |        |  v    add OptoReg::stack0())
4952 //  r   CALLER     |        |
4953 //  o     |        +--------+      pad to even-align allocators stack-slot
4954 //  w     V        |  pad0  |        numbers; owned by CALLER
4955 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4956 //  h     ^        |   in   |  5
4957 //        |        |  args  |  4   Holes in incoming args owned by SELF
4958 //  |     |        |        |  3
4959 //  |     |        +--------+
4960 //  V     |        | old out|      Empty on Intel, window on Sparc
4961 //        |    old |preserve|      Must be even aligned.
4962 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4963 //        |        |   in   |  3   area for Intel ret address
4964 //     Owned by    |preserve|      Empty on Sparc.
4965 //       SELF      +--------+
4966 //        |        |  pad2  |  2   pad to align old SP
4967 //        |        +--------+  1
4968 //        |        | locks  |  0
4969 //        |        +--------+----> OptoReg::stack0(), even aligned
4970 //        |        |  pad1  | 11   pad to align new SP
4971 //        |        +--------+
4972 //        |        |        | 10
4973 //        |        | spills |  9   spills
4974 //        V        |        |  8   (pad0 slot for callee)
4975 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4976 //        ^        |  out   |  7
4977 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4978 //     Owned by    +--------+
4979 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4980 //        |    new |preserve|      Must be even-aligned.
4981 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4982 //        |        |        |
4983 //
4984 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4985 //         known from SELF's arguments and the Java calling convention.
4986 //         Region 6-7 is determined per call site.
4987 // Note 2: If the calling convention leaves holes in the incoming argument
4988 //         area, those holes are owned by SELF.  Holes in the outgoing area
4989 //         are owned by the CALLEE.  Holes should not be nessecary in the
4990 //         incoming area, as the Java calling convention is completely under
4991 //         the control of the AD file.  Doubles can be sorted and packed to
4992 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4993 //         varargs C calling conventions.
4994 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4995 //         even aligned with pad0 as needed.
4996 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4997 //           (the latter is true on Intel but is it false on AArch64?)
4998 //         region 6-11 is even aligned; it may be padded out more so that
4999 //         the region from SP to FP meets the minimum stack alignment.
5000 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5001 //         alignment.  Region 11, pad1, may be dynamically extended so that
5002 //         SP meets the minimum alignment.
5003 
5004 frame %{
5005   // What direction does stack grow in (assumed to be same for C & Java)
5006   stack_direction(TOWARDS_LOW);
5007 
5008   // These three registers define part of the calling convention
5009   // between compiled code and the interpreter.
5010 
5011   // Inline Cache Register or methodOop for I2C.
5012   inline_cache_reg(R12);
5013 
5014   // Method Oop Register when calling interpreter.
5015   interpreter_method_oop_reg(R12);
5016 
5017   // Number of stack slots consumed by locking an object
5018   sync_stack_slots(2);
5019 
5020   // Compiled code's Frame Pointer
5021   frame_pointer(R31);
5022 
5023   // Interpreter stores its frame pointer in a register which is
5024   // stored to the stack by I2CAdaptors.
5025   // I2CAdaptors convert from interpreted java to compiled java.
5026   interpreter_frame_pointer(R29);
5027 
5028   // Stack alignment requirement
5029   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5030 
5031   // Number of stack slots between incoming argument block and the start of
5032   // a new frame.  The PROLOG must add this many slots to the stack.  The
5033   // EPILOG must remove this many slots. aarch64 needs two slots for
5034   // return address and fp.
5035   // TODO think this is correct but check
5036   in_preserve_stack_slots(4);
5037 
5038   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5039   // for calls to C.  Supports the var-args backing area for register parms.
5040   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5041 
5042   // The after-PROLOG location of the return address.  Location of
5043   // return address specifies a type (REG or STACK) and a number
5044   // representing the register number (i.e. - use a register name) or
5045   // stack slot.
5046   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5047   // Otherwise, it is above the locks and verification slot and alignment word
5048   // TODO this may well be correct but need to check why that - 2 is there
5049   // ppc port uses 0 but we definitely need to allow for fixed_slots
5050   // which folds in the space used for monitors
5051   return_addr(STACK - 2 +
5052               round_to((Compile::current()->in_preserve_stack_slots() +
5053                         Compile::current()->fixed_slots()),
5054                        stack_alignment_in_slots()));
5055 
5056   // Body of function which returns an integer array locating
5057   // arguments either in registers or in stack slots.  Passed an array
5058   // of ideal registers called "sig" and a "length" count.  Stack-slot
5059   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5060   // arguments for a CALLEE.  Incoming stack arguments are
5061   // automatically biased by the preserve_stack_slots field above.
5062 
5063   calling_convention
5064   %{
5065     // No difference between ingoing/outgoing just pass false
5066     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5067   %}
5068 
5069   c_calling_convention
5070   %{
5071     // This is obviously always outgoing
5072     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5073   %}
5074 
5075   // Location of compiled Java return values.  Same as C for now.
5076   return_value
5077   %{
5078     // TODO do we allow ideal_reg == Op_RegN???
5079     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5080            "only return normal values");
5081 
5082     static const int lo[Op_RegL + 1] = { // enum name
5083       0,                                 // Op_Node
5084       0,                                 // Op_Set
5085       R0_num,                            // Op_RegN
5086       R0_num,                            // Op_RegI
5087       R0_num,                            // Op_RegP
5088       V0_num,                            // Op_RegF
5089       V0_num,                            // Op_RegD
5090       R0_num                             // Op_RegL
5091     };
5092 
5093     static const int hi[Op_RegL + 1] = { // enum name
5094       0,                                 // Op_Node
5095       0,                                 // Op_Set
5096       OptoReg::Bad,                       // Op_RegN
5097       OptoReg::Bad,                      // Op_RegI
5098       R0_H_num,                          // Op_RegP
5099       OptoReg::Bad,                      // Op_RegF
5100       V0_H_num,                          // Op_RegD
5101       R0_H_num                           // Op_RegL
5102     };
5103 
5104     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5105   %}
5106 %}
5107 
5108 //----------ATTRIBUTES---------------------------------------------------------
5109 //----------Operand Attributes-------------------------------------------------
5110 op_attrib op_cost(1);        // Required cost attribute
5111 
5112 //----------Instruction Attributes---------------------------------------------
5113 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5114 ins_attrib ins_size(32);        // Required size attribute (in bits)
5115 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5116                                 // a non-matching short branch variant
5117                                 // of some long branch?
5118 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5119                                 // be a power of 2) specifies the
5120                                 // alignment that some part of the
5121                                 // instruction (not necessarily the
5122                                 // start) requires.  If > 1, a
5123                                 // compute_padding() function must be
5124                                 // provided for the instruction
5125 
5126 //----------OPERANDS-----------------------------------------------------------
5127 // Operand definitions must precede instruction definitions for correct parsing
5128 // in the ADLC because operands constitute user defined types which are used in
5129 // instruction definitions.
5130 
5131 //----------Simple Operands----------------------------------------------------
5132 
5133 // Integer operands 32 bit
5134 // 32 bit immediate
5135 operand immI()
5136 %{
5137   match(ConI);
5138 
5139   op_cost(0);
5140   format %{ %}
5141   interface(CONST_INTER);
5142 %}
5143 
5144 // 32 bit zero
5145 operand immI0()
5146 %{
5147   predicate(n->get_int() == 0);
5148   match(ConI);
5149 
5150   op_cost(0);
5151   format %{ %}
5152   interface(CONST_INTER);
5153 %}
5154 
5155 // 32 bit unit increment
5156 operand immI_1()
5157 %{
5158   predicate(n->get_int() == 1);
5159   match(ConI);
5160 
5161   op_cost(0);
5162   format %{ %}
5163   interface(CONST_INTER);
5164 %}
5165 
5166 // 32 bit unit decrement
5167 operand immI_M1()
5168 %{
5169   predicate(n->get_int() == -1);
5170   match(ConI);
5171 
5172   op_cost(0);
5173   format %{ %}
5174   interface(CONST_INTER);
5175 %}
5176 
5177 operand immI_le_4()
5178 %{
5179   predicate(n->get_int() <= 4);
5180   match(ConI);
5181 
5182   op_cost(0);
5183   format %{ %}
5184   interface(CONST_INTER);
5185 %}
5186 
5187 operand immI_31()
5188 %{
5189   predicate(n->get_int() == 31);
5190   match(ConI);
5191 
5192   op_cost(0);
5193   format %{ %}
5194   interface(CONST_INTER);
5195 %}
5196 
5197 operand immI_8()
5198 %{
5199   predicate(n->get_int() == 8);
5200   match(ConI);
5201 
5202   op_cost(0);
5203   format %{ %}
5204   interface(CONST_INTER);
5205 %}
5206 
5207 operand immI_16()
5208 %{
5209   predicate(n->get_int() == 16);
5210   match(ConI);
5211 
5212   op_cost(0);
5213   format %{ %}
5214   interface(CONST_INTER);
5215 %}
5216 
5217 operand immI_24()
5218 %{
5219   predicate(n->get_int() == 24);
5220   match(ConI);
5221 
5222   op_cost(0);
5223   format %{ %}
5224   interface(CONST_INTER);
5225 %}
5226 
5227 operand immI_32()
5228 %{
5229   predicate(n->get_int() == 32);
5230   match(ConI);
5231 
5232   op_cost(0);
5233   format %{ %}
5234   interface(CONST_INTER);
5235 %}
5236 
5237 operand immI_48()
5238 %{
5239   predicate(n->get_int() == 48);
5240   match(ConI);
5241 
5242   op_cost(0);
5243   format %{ %}
5244   interface(CONST_INTER);
5245 %}
5246 
5247 operand immI_56()
5248 %{
5249   predicate(n->get_int() == 56);
5250   match(ConI);
5251 
5252   op_cost(0);
5253   format %{ %}
5254   interface(CONST_INTER);
5255 %}
5256 
5257 operand immI_64()
5258 %{
5259   predicate(n->get_int() == 64);
5260   match(ConI);
5261 
5262   op_cost(0);
5263   format %{ %}
5264   interface(CONST_INTER);
5265 %}
5266 
5267 operand immI_255()
5268 %{
5269   predicate(n->get_int() == 255);
5270   match(ConI);
5271 
5272   op_cost(0);
5273   format %{ %}
5274   interface(CONST_INTER);
5275 %}
5276 
5277 operand immI_65535()
5278 %{
5279   predicate(n->get_int() == 65535);
5280   match(ConI);
5281 
5282   op_cost(0);
5283   format %{ %}
5284   interface(CONST_INTER);
5285 %}
5286 
5287 operand immL_63()
5288 %{
5289   predicate(n->get_int() == 63);
5290   match(ConI);
5291 
5292   op_cost(0);
5293   format %{ %}
5294   interface(CONST_INTER);
5295 %}
5296 
5297 operand immL_255()
5298 %{
5299   predicate(n->get_int() == 255);
5300   match(ConI);
5301 
5302   op_cost(0);
5303   format %{ %}
5304   interface(CONST_INTER);
5305 %}
5306 
5307 operand immL_65535()
5308 %{
5309   predicate(n->get_long() == 65535L);
5310   match(ConL);
5311 
5312   op_cost(0);
5313   format %{ %}
5314   interface(CONST_INTER);
5315 %}
5316 
5317 operand immL_4294967295()
5318 %{
5319   predicate(n->get_long() == 4294967295L);
5320   match(ConL);
5321 
5322   op_cost(0);
5323   format %{ %}
5324   interface(CONST_INTER);
5325 %}
5326 
5327 operand immL_bitmask()
5328 %{
5329   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5330             && is_power_of_2(n->get_long() + 1));
5331   match(ConL);
5332 
5333   op_cost(0);
5334   format %{ %}
5335   interface(CONST_INTER);
5336 %}
5337 
5338 operand immI_bitmask()
5339 %{
5340   predicate(((n->get_int() & 0xc0000000) == 0)
5341             && is_power_of_2(n->get_int() + 1));
5342   match(ConI);
5343 
5344   op_cost(0);
5345   format %{ %}
5346   interface(CONST_INTER);
5347 %}
5348 
5349 // Scale values for scaled offset addressing modes (up to long but not quad)
5350 operand immIScale()
5351 %{
5352   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5353   match(ConI);
5354 
5355   op_cost(0);
5356   format %{ %}
5357   interface(CONST_INTER);
5358 %}
5359 
5360 // 26 bit signed offset -- for pc-relative branches
5361 operand immI26()
5362 %{
5363   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5364   match(ConI);
5365 
5366   op_cost(0);
5367   format %{ %}
5368   interface(CONST_INTER);
5369 %}
5370 
5371 // 19 bit signed offset -- for pc-relative loads
5372 operand immI19()
5373 %{
5374   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5375   match(ConI);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 // 12 bit unsigned offset -- for base plus immediate loads
5383 operand immIU12()
5384 %{
5385   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5386   match(ConI);
5387 
5388   op_cost(0);
5389   format %{ %}
5390   interface(CONST_INTER);
5391 %}
5392 
5393 operand immLU12()
5394 %{
5395   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5396   match(ConL);
5397 
5398   op_cost(0);
5399   format %{ %}
5400   interface(CONST_INTER);
5401 %}
5402 
5403 // Offset for scaled or unscaled immediate loads and stores
5404 operand immIOffset()
5405 %{
5406   predicate(Address::offset_ok_for_immed(n->get_int()));
5407   match(ConI);
5408 
5409   op_cost(0);
5410   format %{ %}
5411   interface(CONST_INTER);
5412 %}
5413 
5414 operand immIOffset4()
5415 %{
5416   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5417   match(ConI);
5418 
5419   op_cost(0);
5420   format %{ %}
5421   interface(CONST_INTER);
5422 %}
5423 
5424 operand immIOffset8()
5425 %{
5426   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5427   match(ConI);
5428 
5429   op_cost(0);
5430   format %{ %}
5431   interface(CONST_INTER);
5432 %}
5433 
5434 operand immIOffset16()
5435 %{
5436   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5437   match(ConI);
5438 
5439   op_cost(0);
5440   format %{ %}
5441   interface(CONST_INTER);
5442 %}
5443 
5444 operand immLoffset()
5445 %{
5446   predicate(Address::offset_ok_for_immed(n->get_long()));
5447   match(ConL);
5448 
5449   op_cost(0);
5450   format %{ %}
5451   interface(CONST_INTER);
5452 %}
5453 
5454 operand immLoffset4()
5455 %{
5456   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5457   match(ConL);
5458 
5459   op_cost(0);
5460   format %{ %}
5461   interface(CONST_INTER);
5462 %}
5463 
5464 operand immLoffset8()
5465 %{
5466   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5467   match(ConL);
5468 
5469   op_cost(0);
5470   format %{ %}
5471   interface(CONST_INTER);
5472 %}
5473 
5474 operand immLoffset16()
5475 %{
5476   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5477   match(ConL);
5478 
5479   op_cost(0);
5480   format %{ %}
5481   interface(CONST_INTER);
5482 %}
5483 
5484 // 32 bit integer valid for add sub immediate
5485 operand immIAddSub()
5486 %{
5487   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5488   match(ConI);
5489   op_cost(0);
5490   format %{ %}
5491   interface(CONST_INTER);
5492 %}
5493 
5494 // 32 bit unsigned integer valid for logical immediate
5495 // TODO -- check this is right when e.g the mask is 0x80000000
5496 operand immILog()
5497 %{
5498   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5499   match(ConI);
5500 
5501   op_cost(0);
5502   format %{ %}
5503   interface(CONST_INTER);
5504 %}
5505 
5506 // Integer operands 64 bit
5507 // 64 bit immediate
5508 operand immL()
5509 %{
5510   match(ConL);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // 64 bit zero
5518 operand immL0()
5519 %{
5520   predicate(n->get_long() == 0);
5521   match(ConL);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 // 64 bit unit increment
5529 operand immL_1()
5530 %{
5531   predicate(n->get_long() == 1);
5532   match(ConL);
5533 
5534   op_cost(0);
5535   format %{ %}
5536   interface(CONST_INTER);
5537 %}
5538 
5539 // 64 bit unit decrement
5540 operand immL_M1()
5541 %{
5542   predicate(n->get_long() == -1);
5543   match(ConL);
5544 
5545   op_cost(0);
5546   format %{ %}
5547   interface(CONST_INTER);
5548 %}
5549 
5550 // 32 bit offset of pc in thread anchor
5551 
5552 operand immL_pc_off()
5553 %{
5554   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5555                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5556   match(ConL);
5557 
5558   op_cost(0);
5559   format %{ %}
5560   interface(CONST_INTER);
5561 %}
5562 
5563 // 64 bit integer valid for add sub immediate
5564 operand immLAddSub()
5565 %{
5566   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5567   match(ConL);
5568   op_cost(0);
5569   format %{ %}
5570   interface(CONST_INTER);
5571 %}
5572 
5573 // 64 bit integer valid for logical immediate
5574 operand immLLog()
5575 %{
5576   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5577   match(ConL);
5578   op_cost(0);
5579   format %{ %}
5580   interface(CONST_INTER);
5581 %}
5582 
5583 // Long Immediate: low 32-bit mask
5584 operand immL_32bits()
5585 %{
5586   predicate(n->get_long() == 0xFFFFFFFFL);
5587   match(ConL);
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 // Pointer operands
5594 // Pointer Immediate
5595 operand immP()
5596 %{
5597   match(ConP);
5598 
5599   op_cost(0);
5600   format %{ %}
5601   interface(CONST_INTER);
5602 %}
5603 
5604 // NULL Pointer Immediate
5605 operand immP0()
5606 %{
5607   predicate(n->get_ptr() == 0);
5608   match(ConP);
5609 
5610   op_cost(0);
5611   format %{ %}
5612   interface(CONST_INTER);
5613 %}
5614 
5615 // Pointer Immediate One
5616 // this is used in object initialization (initial object header)
5617 operand immP_1()
5618 %{
5619   predicate(n->get_ptr() == 1);
5620   match(ConP);
5621 
5622   op_cost(0);
5623   format %{ %}
5624   interface(CONST_INTER);
5625 %}
5626 
5627 // Polling Page Pointer Immediate
5628 operand immPollPage()
5629 %{
5630   predicate((address)n->get_ptr() == os::get_polling_page());
5631   match(ConP);
5632 
5633   op_cost(0);
5634   format %{ %}
5635   interface(CONST_INTER);
5636 %}
5637 
5638 // Card Table Byte Map Base
5639 operand immByteMapBase()
5640 %{
5641   // Get base of card map
5642   predicate((jbyte*)n->get_ptr() ==
5643         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5644   match(ConP);
5645 
5646   op_cost(0);
5647   format %{ %}
5648   interface(CONST_INTER);
5649 %}
5650 
5651 // Pointer Immediate Minus One
5652 // this is used when we want to write the current PC to the thread anchor
5653 operand immP_M1()
5654 %{
5655   predicate(n->get_ptr() == -1);
5656   match(ConP);
5657 
5658   op_cost(0);
5659   format %{ %}
5660   interface(CONST_INTER);
5661 %}
5662 
5663 // Pointer Immediate Minus Two
5664 // this is used when we want to write the current PC to the thread anchor
5665 operand immP_M2()
5666 %{
5667   predicate(n->get_ptr() == -2);
5668   match(ConP);
5669 
5670   op_cost(0);
5671   format %{ %}
5672   interface(CONST_INTER);
5673 %}
5674 
5675 // Float and Double operands
5676 // Double Immediate
5677 operand immD()
5678 %{
5679   match(ConD);
5680   op_cost(0);
5681   format %{ %}
5682   interface(CONST_INTER);
5683 %}
5684 
5685 // Double Immediate: +0.0d
5686 operand immD0()
5687 %{
5688   predicate(jlong_cast(n->getd()) == 0);
5689   match(ConD);
5690 
5691   op_cost(0);
5692   format %{ %}
5693   interface(CONST_INTER);
5694 %}
5695 
5696 // constant 'double +0.0'.
5697 operand immDPacked()
5698 %{
5699   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5700   match(ConD);
5701   op_cost(0);
5702   format %{ %}
5703   interface(CONST_INTER);
5704 %}
5705 
5706 // Float Immediate
5707 operand immF()
5708 %{
5709   match(ConF);
5710   op_cost(0);
5711   format %{ %}
5712   interface(CONST_INTER);
5713 %}
5714 
5715 // Float Immediate: +0.0f.
5716 operand immF0()
5717 %{
5718   predicate(jint_cast(n->getf()) == 0);
5719   match(ConF);
5720 
5721   op_cost(0);
5722   format %{ %}
5723   interface(CONST_INTER);
5724 %}
5725 
5726 //
5727 operand immFPacked()
5728 %{
5729   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5730   match(ConF);
5731   op_cost(0);
5732   format %{ %}
5733   interface(CONST_INTER);
5734 %}
5735 
5736 // Narrow pointer operands
5737 // Narrow Pointer Immediate
5738 operand immN()
5739 %{
5740   match(ConN);
5741 
5742   op_cost(0);
5743   format %{ %}
5744   interface(CONST_INTER);
5745 %}
5746 
5747 // Narrow NULL Pointer Immediate
5748 operand immN0()
5749 %{
5750   predicate(n->get_narrowcon() == 0);
5751   match(ConN);
5752 
5753   op_cost(0);
5754   format %{ %}
5755   interface(CONST_INTER);
5756 %}
5757 
5758 operand immNKlass()
5759 %{
5760   match(ConNKlass);
5761 
5762   op_cost(0);
5763   format %{ %}
5764   interface(CONST_INTER);
5765 %}
5766 
5767 // Integer 32 bit Register Operands
5768 // Integer 32 bitRegister (excludes SP)
5769 operand iRegI()
5770 %{
5771   constraint(ALLOC_IN_RC(any_reg32));
5772   match(RegI);
5773   match(iRegINoSp);
5774   op_cost(0);
5775   format %{ %}
5776   interface(REG_INTER);
5777 %}
5778 
5779 // Integer 32 bit Register not Special
5780 operand iRegINoSp()
5781 %{
5782   constraint(ALLOC_IN_RC(no_special_reg32));
5783   match(RegI);
5784   op_cost(0);
5785   format %{ %}
5786   interface(REG_INTER);
5787 %}
5788 
5789 // Integer 64 bit Register Operands
5790 // Integer 64 bit Register (includes SP)
5791 operand iRegL()
5792 %{
5793   constraint(ALLOC_IN_RC(any_reg));
5794   match(RegL);
5795   match(iRegLNoSp);
5796   op_cost(0);
5797   format %{ %}
5798   interface(REG_INTER);
5799 %}
5800 
5801 // Integer 64 bit Register not Special
5802 operand iRegLNoSp()
5803 %{
5804   constraint(ALLOC_IN_RC(no_special_reg));
5805   match(RegL);
5806   format %{ %}
5807   interface(REG_INTER);
5808 %}
5809 
5810 // Pointer Register Operands
5811 // Pointer Register
5812 operand iRegP()
5813 %{
5814   constraint(ALLOC_IN_RC(ptr_reg));
5815   match(RegP);
5816   match(iRegPNoSp);
5817   match(iRegP_R0);
5818   //match(iRegP_R2);
5819   //match(iRegP_R4);
5820   //match(iRegP_R5);
5821   match(thread_RegP);
5822   op_cost(0);
5823   format %{ %}
5824   interface(REG_INTER);
5825 %}
5826 
5827 // Pointer 64 bit Register not Special
5828 operand iRegPNoSp()
5829 %{
5830   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5831   match(RegP);
5832   // match(iRegP);
5833   // match(iRegP_R0);
5834   // match(iRegP_R2);
5835   // match(iRegP_R4);
5836   // match(iRegP_R5);
5837   // match(thread_RegP);
5838   op_cost(0);
5839   format %{ %}
5840   interface(REG_INTER);
5841 %}
5842 
5843 // Pointer 64 bit Register R0 only
5844 operand iRegP_R0()
5845 %{
5846   constraint(ALLOC_IN_RC(r0_reg));
5847   match(RegP);
5848   // match(iRegP);
5849   match(iRegPNoSp);
5850   op_cost(0);
5851   format %{ %}
5852   interface(REG_INTER);
5853 %}
5854 
5855 // Pointer 64 bit Register R1 only
5856 operand iRegP_R1()
5857 %{
5858   constraint(ALLOC_IN_RC(r1_reg));
5859   match(RegP);
5860   // match(iRegP);
5861   match(iRegPNoSp);
5862   op_cost(0);
5863   format %{ %}
5864   interface(REG_INTER);
5865 %}
5866 
5867 // Pointer 64 bit Register R2 only
5868 operand iRegP_R2()
5869 %{
5870   constraint(ALLOC_IN_RC(r2_reg));
5871   match(RegP);
5872   // match(iRegP);
5873   match(iRegPNoSp);
5874   op_cost(0);
5875   format %{ %}
5876   interface(REG_INTER);
5877 %}
5878 
5879 // Pointer 64 bit Register R3 only
5880 operand iRegP_R3()
5881 %{
5882   constraint(ALLOC_IN_RC(r3_reg));
5883   match(RegP);
5884   // match(iRegP);
5885   match(iRegPNoSp);
5886   op_cost(0);
5887   format %{ %}
5888   interface(REG_INTER);
5889 %}
5890 
5891 // Pointer 64 bit Register R4 only
5892 operand iRegP_R4()
5893 %{
5894   constraint(ALLOC_IN_RC(r4_reg));
5895   match(RegP);
5896   // match(iRegP);
5897   match(iRegPNoSp);
5898   op_cost(0);
5899   format %{ %}
5900   interface(REG_INTER);
5901 %}
5902 
5903 // Pointer 64 bit Register R5 only
5904 operand iRegP_R5()
5905 %{
5906   constraint(ALLOC_IN_RC(r5_reg));
5907   match(RegP);
5908   // match(iRegP);
5909   match(iRegPNoSp);
5910   op_cost(0);
5911   format %{ %}
5912   interface(REG_INTER);
5913 %}
5914 
5915 // Pointer 64 bit Register R10 only
5916 operand iRegP_R10()
5917 %{
5918   constraint(ALLOC_IN_RC(r10_reg));
5919   match(RegP);
5920   // match(iRegP);
5921   match(iRegPNoSp);
5922   op_cost(0);
5923   format %{ %}
5924   interface(REG_INTER);
5925 %}
5926 
5927 // Long 64 bit Register R11 only
5928 operand iRegL_R11()
5929 %{
5930   constraint(ALLOC_IN_RC(r11_reg));
5931   match(RegL);
5932   match(iRegLNoSp);
5933   op_cost(0);
5934   format %{ %}
5935   interface(REG_INTER);
5936 %}
5937 
5938 // Pointer 64 bit Register FP only
5939 operand iRegP_FP()
5940 %{
5941   constraint(ALLOC_IN_RC(fp_reg));
5942   match(RegP);
5943   // match(iRegP);
5944   op_cost(0);
5945   format %{ %}
5946   interface(REG_INTER);
5947 %}
5948 
5949 // Register R0 only
5950 operand iRegI_R0()
5951 %{
5952   constraint(ALLOC_IN_RC(int_r0_reg));
5953   match(RegI);
5954   match(iRegINoSp);
5955   op_cost(0);
5956   format %{ %}
5957   interface(REG_INTER);
5958 %}
5959 
5960 // Register R2 only
5961 operand iRegI_R2()
5962 %{
5963   constraint(ALLOC_IN_RC(int_r2_reg));
5964   match(RegI);
5965   match(iRegINoSp);
5966   op_cost(0);
5967   format %{ %}
5968   interface(REG_INTER);
5969 %}
5970 
5971 // Register R3 only
5972 operand iRegI_R3()
5973 %{
5974   constraint(ALLOC_IN_RC(int_r3_reg));
5975   match(RegI);
5976   match(iRegINoSp);
5977   op_cost(0);
5978   format %{ %}
5979   interface(REG_INTER);
5980 %}
5981 
5982 
5983 // Register R2 only
5984 operand iRegI_R4()
5985 %{
5986   constraint(ALLOC_IN_RC(int_r4_reg));
5987   match(RegI);
5988   match(iRegINoSp);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 
5995 // Pointer Register Operands
5996 // Narrow Pointer Register
5997 operand iRegN()
5998 %{
5999   constraint(ALLOC_IN_RC(any_reg32));
6000   match(RegN);
6001   match(iRegNNoSp);
6002   op_cost(0);
6003   format %{ %}
6004   interface(REG_INTER);
6005 %}
6006 
6007 // Integer 64 bit Register not Special
6008 operand iRegNNoSp()
6009 %{
6010   constraint(ALLOC_IN_RC(no_special_reg32));
6011   match(RegN);
6012   op_cost(0);
6013   format %{ %}
6014   interface(REG_INTER);
6015 %}
6016 
6017 // heap base register -- used for encoding immN0
6018 
6019 operand iRegIHeapbase()
6020 %{
6021   constraint(ALLOC_IN_RC(heapbase_reg));
6022   match(RegI);
6023   op_cost(0);
6024   format %{ %}
6025   interface(REG_INTER);
6026 %}
6027 
6028 // Float Register
6029 // Float register operands
6030 operand vRegF()
6031 %{
6032   constraint(ALLOC_IN_RC(float_reg));
6033   match(RegF);
6034 
6035   op_cost(0);
6036   format %{ %}
6037   interface(REG_INTER);
6038 %}
6039 
6040 // Double Register
6041 // Double register operands
6042 operand vRegD()
6043 %{
6044   constraint(ALLOC_IN_RC(double_reg));
6045   match(RegD);
6046 
6047   op_cost(0);
6048   format %{ %}
6049   interface(REG_INTER);
6050 %}
6051 
6052 operand vecD()
6053 %{
6054   constraint(ALLOC_IN_RC(vectord_reg));
6055   match(VecD);
6056 
6057   op_cost(0);
6058   format %{ %}
6059   interface(REG_INTER);
6060 %}
6061 
6062 operand vecX()
6063 %{
6064   constraint(ALLOC_IN_RC(vectorx_reg));
6065   match(VecX);
6066 
6067   op_cost(0);
6068   format %{ %}
6069   interface(REG_INTER);
6070 %}
6071 
6072 operand vRegD_V0()
6073 %{
6074   constraint(ALLOC_IN_RC(v0_reg));
6075   match(RegD);
6076   op_cost(0);
6077   format %{ %}
6078   interface(REG_INTER);
6079 %}
6080 
6081 operand vRegD_V1()
6082 %{
6083   constraint(ALLOC_IN_RC(v1_reg));
6084   match(RegD);
6085   op_cost(0);
6086   format %{ %}
6087   interface(REG_INTER);
6088 %}
6089 
6090 operand vRegD_V2()
6091 %{
6092   constraint(ALLOC_IN_RC(v2_reg));
6093   match(RegD);
6094   op_cost(0);
6095   format %{ %}
6096   interface(REG_INTER);
6097 %}
6098 
6099 operand vRegD_V3()
6100 %{
6101   constraint(ALLOC_IN_RC(v3_reg));
6102   match(RegD);
6103   op_cost(0);
6104   format %{ %}
6105   interface(REG_INTER);
6106 %}
6107 
6108 // Flags register, used as output of signed compare instructions
6109 
6110 // note that on AArch64 we also use this register as the output for
6111 // for floating point compare instructions (CmpF CmpD). this ensures
6112 // that ordered inequality tests use GT, GE, LT or LE none of which
6113 // pass through cases where the result is unordered i.e. one or both
6114 // inputs to the compare is a NaN. this means that the ideal code can
6115 // replace e.g. a GT with an LE and not end up capturing the NaN case
6116 // (where the comparison should always fail). EQ and NE tests are
6117 // always generated in ideal code so that unordered folds into the NE
6118 // case, matching the behaviour of AArch64 NE.
6119 //
6120 // This differs from x86 where the outputs of FP compares use a
6121 // special FP flags registers and where compares based on this
6122 // register are distinguished into ordered inequalities (cmpOpUCF) and
6123 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6124 // to explicitly handle the unordered case in branches. x86 also has
6125 // to include extra CMoveX rules to accept a cmpOpUCF input.
6126 
6127 operand rFlagsReg()
6128 %{
6129   constraint(ALLOC_IN_RC(int_flags));
6130   match(RegFlags);
6131 
6132   op_cost(0);
6133   format %{ "RFLAGS" %}
6134   interface(REG_INTER);
6135 %}
6136 
6137 // Flags register, used as output of unsigned compare instructions
6138 operand rFlagsRegU()
6139 %{
6140   constraint(ALLOC_IN_RC(int_flags));
6141   match(RegFlags);
6142 
6143   op_cost(0);
6144   format %{ "RFLAGSU" %}
6145   interface(REG_INTER);
6146 %}
6147 
6148 // Special Registers
6149 
6150 // Method Register
6151 operand inline_cache_RegP(iRegP reg)
6152 %{
6153   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6154   match(reg);
6155   match(iRegPNoSp);
6156   op_cost(0);
6157   format %{ %}
6158   interface(REG_INTER);
6159 %}
6160 
6161 operand interpreter_method_oop_RegP(iRegP reg)
6162 %{
6163   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6164   match(reg);
6165   match(iRegPNoSp);
6166   op_cost(0);
6167   format %{ %}
6168   interface(REG_INTER);
6169 %}
6170 
6171 // Thread Register
6172 operand thread_RegP(iRegP reg)
6173 %{
6174   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6175   match(reg);
6176   op_cost(0);
6177   format %{ %}
6178   interface(REG_INTER);
6179 %}
6180 
6181 operand lr_RegP(iRegP reg)
6182 %{
6183   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6184   match(reg);
6185   op_cost(0);
6186   format %{ %}
6187   interface(REG_INTER);
6188 %}
6189 
6190 //----------Memory Operands----------------------------------------------------
6191 
6192 operand indirect(iRegP reg)
6193 %{
6194   constraint(ALLOC_IN_RC(ptr_reg));
6195   match(reg);
6196   op_cost(0);
6197   format %{ "[$reg]" %}
6198   interface(MEMORY_INTER) %{
6199     base($reg);
6200     index(0xffffffff);
6201     scale(0x0);
6202     disp(0x0);
6203   %}
6204 %}
6205 
6206 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6207 %{
6208   constraint(ALLOC_IN_RC(ptr_reg));
6209   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6210   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6211   op_cost(0);
6212   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6213   interface(MEMORY_INTER) %{
6214     base($reg);
6215     index($ireg);
6216     scale($scale);
6217     disp(0x0);
6218   %}
6219 %}
6220 
6221 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6222 %{
6223   constraint(ALLOC_IN_RC(ptr_reg));
6224   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6225   match(AddP reg (LShiftL lreg scale));
6226   op_cost(0);
6227   format %{ "$reg, $lreg lsl($scale)" %}
6228   interface(MEMORY_INTER) %{
6229     base($reg);
6230     index($lreg);
6231     scale($scale);
6232     disp(0x0);
6233   %}
6234 %}
6235 
6236 operand indIndexI2L(iRegP reg, iRegI ireg)
6237 %{
6238   constraint(ALLOC_IN_RC(ptr_reg));
6239   match(AddP reg (ConvI2L ireg));
6240   op_cost(0);
6241   format %{ "$reg, $ireg, 0, I2L" %}
6242   interface(MEMORY_INTER) %{
6243     base($reg);
6244     index($ireg);
6245     scale(0x0);
6246     disp(0x0);
6247   %}
6248 %}
6249 
6250 operand indIndex(iRegP reg, iRegL lreg)
6251 %{
6252   constraint(ALLOC_IN_RC(ptr_reg));
6253   match(AddP reg lreg);
6254   op_cost(0);
6255   format %{ "$reg, $lreg" %}
6256   interface(MEMORY_INTER) %{
6257     base($reg);
6258     index($lreg);
6259     scale(0x0);
6260     disp(0x0);
6261   %}
6262 %}
6263 
6264 operand indOffI(iRegP reg, immIOffset off)
6265 %{
6266   constraint(ALLOC_IN_RC(ptr_reg));
6267   match(AddP reg off);
6268   op_cost(0);
6269   format %{ "[$reg, $off]" %}
6270   interface(MEMORY_INTER) %{
6271     base($reg);
6272     index(0xffffffff);
6273     scale(0x0);
6274     disp($off);
6275   %}
6276 %}
6277 
6278 operand indOffI4(iRegP reg, immIOffset4 off)
6279 %{
6280   constraint(ALLOC_IN_RC(ptr_reg));
6281   match(AddP reg off);
6282   op_cost(0);
6283   format %{ "[$reg, $off]" %}
6284   interface(MEMORY_INTER) %{
6285     base($reg);
6286     index(0xffffffff);
6287     scale(0x0);
6288     disp($off);
6289   %}
6290 %}
6291 
6292 operand indOffI8(iRegP reg, immIOffset8 off)
6293 %{
6294   constraint(ALLOC_IN_RC(ptr_reg));
6295   match(AddP reg off);
6296   op_cost(0);
6297   format %{ "[$reg, $off]" %}
6298   interface(MEMORY_INTER) %{
6299     base($reg);
6300     index(0xffffffff);
6301     scale(0x0);
6302     disp($off);
6303   %}
6304 %}
6305 
6306 operand indOffI16(iRegP reg, immIOffset16 off)
6307 %{
6308   constraint(ALLOC_IN_RC(ptr_reg));
6309   match(AddP reg off);
6310   op_cost(0);
6311   format %{ "[$reg, $off]" %}
6312   interface(MEMORY_INTER) %{
6313     base($reg);
6314     index(0xffffffff);
6315     scale(0x0);
6316     disp($off);
6317   %}
6318 %}
6319 
6320 operand indOffL(iRegP reg, immLoffset off)
6321 %{
6322   constraint(ALLOC_IN_RC(ptr_reg));
6323   match(AddP reg off);
6324   op_cost(0);
6325   format %{ "[$reg, $off]" %}
6326   interface(MEMORY_INTER) %{
6327     base($reg);
6328     index(0xffffffff);
6329     scale(0x0);
6330     disp($off);
6331   %}
6332 %}
6333 
6334 operand indOffL4(iRegP reg, immLoffset4 off)
6335 %{
6336   constraint(ALLOC_IN_RC(ptr_reg));
6337   match(AddP reg off);
6338   op_cost(0);
6339   format %{ "[$reg, $off]" %}
6340   interface(MEMORY_INTER) %{
6341     base($reg);
6342     index(0xffffffff);
6343     scale(0x0);
6344     disp($off);
6345   %}
6346 %}
6347 
6348 operand indOffL8(iRegP reg, immLoffset8 off)
6349 %{
6350   constraint(ALLOC_IN_RC(ptr_reg));
6351   match(AddP reg off);
6352   op_cost(0);
6353   format %{ "[$reg, $off]" %}
6354   interface(MEMORY_INTER) %{
6355     base($reg);
6356     index(0xffffffff);
6357     scale(0x0);
6358     disp($off);
6359   %}
6360 %}
6361 
6362 operand indOffL16(iRegP reg, immLoffset16 off)
6363 %{
6364   constraint(ALLOC_IN_RC(ptr_reg));
6365   match(AddP reg off);
6366   op_cost(0);
6367   format %{ "[$reg, $off]" %}
6368   interface(MEMORY_INTER) %{
6369     base($reg);
6370     index(0xffffffff);
6371     scale(0x0);
6372     disp($off);
6373   %}
6374 %}
6375 
6376 operand indirectN(iRegN reg)
6377 %{
6378   predicate(Universe::narrow_oop_shift() == 0);
6379   constraint(ALLOC_IN_RC(ptr_reg));
6380   match(DecodeN reg);
6381   op_cost(0);
6382   format %{ "[$reg]\t# narrow" %}
6383   interface(MEMORY_INTER) %{
6384     base($reg);
6385     index(0xffffffff);
6386     scale(0x0);
6387     disp(0x0);
6388   %}
6389 %}
6390 
6391 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6392 %{
6393   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6394   constraint(ALLOC_IN_RC(ptr_reg));
6395   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6396   op_cost(0);
6397   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6398   interface(MEMORY_INTER) %{
6399     base($reg);
6400     index($ireg);
6401     scale($scale);
6402     disp(0x0);
6403   %}
6404 %}
6405 
6406 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6407 %{
6408   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6409   constraint(ALLOC_IN_RC(ptr_reg));
6410   match(AddP (DecodeN reg) (LShiftL lreg scale));
6411   op_cost(0);
6412   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6413   interface(MEMORY_INTER) %{
6414     base($reg);
6415     index($lreg);
6416     scale($scale);
6417     disp(0x0);
6418   %}
6419 %}
6420 
6421 operand indIndexI2LN(iRegN reg, iRegI ireg)
6422 %{
6423   predicate(Universe::narrow_oop_shift() == 0);
6424   constraint(ALLOC_IN_RC(ptr_reg));
6425   match(AddP (DecodeN reg) (ConvI2L ireg));
6426   op_cost(0);
6427   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6428   interface(MEMORY_INTER) %{
6429     base($reg);
6430     index($ireg);
6431     scale(0x0);
6432     disp(0x0);
6433   %}
6434 %}
6435 
6436 operand indIndexN(iRegN reg, iRegL lreg)
6437 %{
6438   predicate(Universe::narrow_oop_shift() == 0);
6439   constraint(ALLOC_IN_RC(ptr_reg));
6440   match(AddP (DecodeN reg) lreg);
6441   op_cost(0);
6442   format %{ "$reg, $lreg\t# narrow" %}
6443   interface(MEMORY_INTER) %{
6444     base($reg);
6445     index($lreg);
6446     scale(0x0);
6447     disp(0x0);
6448   %}
6449 %}
6450 
6451 operand indOffIN(iRegN reg, immIOffset off)
6452 %{
6453   predicate(Universe::narrow_oop_shift() == 0);
6454   constraint(ALLOC_IN_RC(ptr_reg));
6455   match(AddP (DecodeN reg) off);
6456   op_cost(0);
6457   format %{ "[$reg, $off]\t# narrow" %}
6458   interface(MEMORY_INTER) %{
6459     base($reg);
6460     index(0xffffffff);
6461     scale(0x0);
6462     disp($off);
6463   %}
6464 %}
6465 
6466 operand indOffLN(iRegN reg, immLoffset off)
6467 %{
6468   predicate(Universe::narrow_oop_shift() == 0);
6469   constraint(ALLOC_IN_RC(ptr_reg));
6470   match(AddP (DecodeN reg) off);
6471   op_cost(0);
6472   format %{ "[$reg, $off]\t# narrow" %}
6473   interface(MEMORY_INTER) %{
6474     base($reg);
6475     index(0xffffffff);
6476     scale(0x0);
6477     disp($off);
6478   %}
6479 %}
6480 
6481 
6482 
6483 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6484 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6485 %{
6486   constraint(ALLOC_IN_RC(ptr_reg));
6487   match(AddP reg off);
6488   op_cost(0);
6489   format %{ "[$reg, $off]" %}
6490   interface(MEMORY_INTER) %{
6491     base($reg);
6492     index(0xffffffff);
6493     scale(0x0);
6494     disp($off);
6495   %}
6496 %}
6497 
6498 //----------Special Memory Operands--------------------------------------------
6499 // Stack Slot Operand - This operand is used for loading and storing temporary
6500 //                      values on the stack where a match requires a value to
6501 //                      flow through memory.
6502 operand stackSlotP(sRegP reg)
6503 %{
6504   constraint(ALLOC_IN_RC(stack_slots));
6505   op_cost(100);
6506   // No match rule because this operand is only generated in matching
6507   // match(RegP);
6508   format %{ "[$reg]" %}
6509   interface(MEMORY_INTER) %{
6510     base(0x1e);  // RSP
6511     index(0x0);  // No Index
6512     scale(0x0);  // No Scale
6513     disp($reg);  // Stack Offset
6514   %}
6515 %}
6516 
6517 operand stackSlotI(sRegI reg)
6518 %{
6519   constraint(ALLOC_IN_RC(stack_slots));
6520   // No match rule because this operand is only generated in matching
6521   // match(RegI);
6522   format %{ "[$reg]" %}
6523   interface(MEMORY_INTER) %{
6524     base(0x1e);  // RSP
6525     index(0x0);  // No Index
6526     scale(0x0);  // No Scale
6527     disp($reg);  // Stack Offset
6528   %}
6529 %}
6530 
6531 operand stackSlotF(sRegF reg)
6532 %{
6533   constraint(ALLOC_IN_RC(stack_slots));
6534   // No match rule because this operand is only generated in matching
6535   // match(RegF);
6536   format %{ "[$reg]" %}
6537   interface(MEMORY_INTER) %{
6538     base(0x1e);  // RSP
6539     index(0x0);  // No Index
6540     scale(0x0);  // No Scale
6541     disp($reg);  // Stack Offset
6542   %}
6543 %}
6544 
6545 operand stackSlotD(sRegD reg)
6546 %{
6547   constraint(ALLOC_IN_RC(stack_slots));
6548   // No match rule because this operand is only generated in matching
6549   // match(RegD);
6550   format %{ "[$reg]" %}
6551   interface(MEMORY_INTER) %{
6552     base(0x1e);  // RSP
6553     index(0x0);  // No Index
6554     scale(0x0);  // No Scale
6555     disp($reg);  // Stack Offset
6556   %}
6557 %}
6558 
6559 operand stackSlotL(sRegL reg)
6560 %{
6561   constraint(ALLOC_IN_RC(stack_slots));
6562   // No match rule because this operand is only generated in matching
6563   // match(RegL);
6564   format %{ "[$reg]" %}
6565   interface(MEMORY_INTER) %{
6566     base(0x1e);  // RSP
6567     index(0x0);  // No Index
6568     scale(0x0);  // No Scale
6569     disp($reg);  // Stack Offset
6570   %}
6571 %}
6572 
6573 // Operands for expressing Control Flow
6574 // NOTE: Label is a predefined operand which should not be redefined in
6575 //       the AD file. It is generically handled within the ADLC.
6576 
6577 //----------Conditional Branch Operands----------------------------------------
6578 // Comparison Op  - This is the operation of the comparison, and is limited to
6579 //                  the following set of codes:
6580 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6581 //
6582 // Other attributes of the comparison, such as unsignedness, are specified
6583 // by the comparison instruction that sets a condition code flags register.
6584 // That result is represented by a flags operand whose subtype is appropriate
6585 // to the unsignedness (etc.) of the comparison.
6586 //
6587 // Later, the instruction which matches both the Comparison Op (a Bool) and
6588 // the flags (produced by the Cmp) specifies the coding of the comparison op
6589 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6590 
6591 // used for signed integral comparisons and fp comparisons
6592 
6593 operand cmpOp()
6594 %{
6595   match(Bool);
6596 
6597   format %{ "" %}
6598   interface(COND_INTER) %{
6599     equal(0x0, "eq");
6600     not_equal(0x1, "ne");
6601     less(0xb, "lt");
6602     greater_equal(0xa, "ge");
6603     less_equal(0xd, "le");
6604     greater(0xc, "gt");
6605     overflow(0x6, "vs");
6606     no_overflow(0x7, "vc");
6607   %}
6608 %}
6609 
6610 // used for unsigned integral comparisons
6611 
6612 operand cmpOpU()
6613 %{
6614   match(Bool);
6615 
6616   format %{ "" %}
6617   interface(COND_INTER) %{
6618     equal(0x0, "eq");
6619     not_equal(0x1, "ne");
6620     less(0x3, "lo");
6621     greater_equal(0x2, "hs");
6622     less_equal(0x9, "ls");
6623     greater(0x8, "hi");
6624     overflow(0x6, "vs");
6625     no_overflow(0x7, "vc");
6626   %}
6627 %}
6628 
6629 // used for certain integral comparisons which can be
6630 // converted to cbxx or tbxx instructions
6631 
6632 operand cmpOpEqNe()
6633 %{
6634   match(Bool);
6635   match(CmpOp);
6636   op_cost(0);
6637   predicate(n->as_Bool()->_test._test == BoolTest::ne
6638             || n->as_Bool()->_test._test == BoolTest::eq);
6639 
6640   format %{ "" %}
6641   interface(COND_INTER) %{
6642     equal(0x0, "eq");
6643     not_equal(0x1, "ne");
6644     less(0xb, "lt");
6645     greater_equal(0xa, "ge");
6646     less_equal(0xd, "le");
6647     greater(0xc, "gt");
6648     overflow(0x6, "vs");
6649     no_overflow(0x7, "vc");
6650   %}
6651 %}
6652 
6653 // used for certain integral comparisons which can be
6654 // converted to cbxx or tbxx instructions
6655 
6656 operand cmpOpLtGe()
6657 %{
6658   match(Bool);
6659   match(CmpOp);
6660   op_cost(0);
6661 
6662   predicate(n->as_Bool()->_test._test == BoolTest::lt
6663             || n->as_Bool()->_test._test == BoolTest::ge);
6664 
6665   format %{ "" %}
6666   interface(COND_INTER) %{
6667     equal(0x0, "eq");
6668     not_equal(0x1, "ne");
6669     less(0xb, "lt");
6670     greater_equal(0xa, "ge");
6671     less_equal(0xd, "le");
6672     greater(0xc, "gt");
6673     overflow(0x6, "vs");
6674     no_overflow(0x7, "vc");
6675   %}
6676 %}
6677 
6678 // used for certain unsigned integral comparisons which can be
6679 // converted to cbxx or tbxx instructions
6680 
6681 operand cmpOpUEqNeLtGe()
6682 %{
6683   match(Bool);
6684   match(CmpOp);
6685   op_cost(0);
6686 
6687   predicate(n->as_Bool()->_test._test == BoolTest::eq
6688             || n->as_Bool()->_test._test == BoolTest::ne
6689             || n->as_Bool()->_test._test == BoolTest::lt
6690             || n->as_Bool()->_test._test == BoolTest::ge);
6691 
6692   format %{ "" %}
6693   interface(COND_INTER) %{
6694     equal(0x0, "eq");
6695     not_equal(0x1, "ne");
6696     less(0xb, "lt");
6697     greater_equal(0xa, "ge");
6698     less_equal(0xd, "le");
6699     greater(0xc, "gt");
6700     overflow(0x6, "vs");
6701     no_overflow(0x7, "vc");
6702   %}
6703 %}
6704 
6705 // Special operand allowing long args to int ops to be truncated for free
6706 
6707 operand iRegL2I(iRegL reg) %{
6708 
6709   op_cost(0);
6710 
6711   match(ConvL2I reg);
6712 
6713   format %{ "l2i($reg)" %}
6714 
6715   interface(REG_INTER)
6716 %}
6717 
6718 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6719 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6720 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6721 
6722 //----------OPERAND CLASSES----------------------------------------------------
6723 // Operand Classes are groups of operands that are used as to simplify
6724 // instruction definitions by not requiring the AD writer to specify
6725 // separate instructions for every form of operand when the
6726 // instruction accepts multiple operand types with the same basic
6727 // encoding and format. The classic case of this is memory operands.
6728 
6729 // memory is used to define read/write location for load/store
6730 // instruction defs. we can turn a memory op into an Address
6731 
6732 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6733                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6734 
6735 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6736 // operations. it allows the src to be either an iRegI or a (ConvL2I
6737 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6738 // can be elided because the 32-bit instruction will just employ the
6739 // lower 32 bits anyway.
6740 //
6741 // n.b. this does not elide all L2I conversions. if the truncated
6742 // value is consumed by more than one operation then the ConvL2I
6743 // cannot be bundled into the consuming nodes so an l2i gets planted
6744 // (actually a movw $dst $src) and the downstream instructions consume
6745 // the result of the l2i as an iRegI input. That's a shame since the
6746 // movw is actually redundant but its not too costly.
6747 
6748 opclass iRegIorL2I(iRegI, iRegL2I);
6749 
6750 //----------PIPELINE-----------------------------------------------------------
6751 // Rules which define the behavior of the target architectures pipeline.
6752 
6753 // For specific pipelines, eg A53, define the stages of that pipeline
6754 //pipe_desc(ISS, EX1, EX2, WR);
6755 #define ISS S0
6756 #define EX1 S1
6757 #define EX2 S2
6758 #define WR  S3
6759 
6760 // Integer ALU reg operation
6761 pipeline %{
6762 
6763 attributes %{
6764   // ARM instructions are of fixed length
6765   fixed_size_instructions;        // Fixed size instructions TODO does
6766   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6767   // ARM instructions come in 32-bit word units
6768   instruction_unit_size = 4;         // An instruction is 4 bytes long
6769   instruction_fetch_unit_size = 64;  // The processor fetches one line
6770   instruction_fetch_units = 1;       // of 64 bytes
6771 
6772   // List of nop instructions
6773   nops( MachNop );
6774 %}
6775 
6776 // We don't use an actual pipeline model so don't care about resources
6777 // or description. we do use pipeline classes to introduce fixed
6778 // latencies
6779 
6780 //----------RESOURCES----------------------------------------------------------
6781 // Resources are the functional units available to the machine
6782 
6783 resources( INS0, INS1, INS01 = INS0 | INS1,
6784            ALU0, ALU1, ALU = ALU0 | ALU1,
6785            MAC,
6786            DIV,
6787            BRANCH,
6788            LDST,
6789            NEON_FP);
6790 
6791 //----------PIPELINE DESCRIPTION-----------------------------------------------
6792 // Pipeline Description specifies the stages in the machine's pipeline
6793 
6794 // Define the pipeline as a generic 6 stage pipeline
6795 pipe_desc(S0, S1, S2, S3, S4, S5);
6796 
6797 //----------PIPELINE CLASSES---------------------------------------------------
6798 // Pipeline Classes describe the stages in which input and output are
6799 // referenced by the hardware pipeline.
6800 
6801 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6802 %{
6803   single_instruction;
6804   src1   : S1(read);
6805   src2   : S2(read);
6806   dst    : S5(write);
6807   INS01  : ISS;
6808   NEON_FP : S5;
6809 %}
6810 
6811 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6812 %{
6813   single_instruction;
6814   src1   : S1(read);
6815   src2   : S2(read);
6816   dst    : S5(write);
6817   INS01  : ISS;
6818   NEON_FP : S5;
6819 %}
6820 
6821 pipe_class fp_uop_s(vRegF dst, vRegF src)
6822 %{
6823   single_instruction;
6824   src    : S1(read);
6825   dst    : S5(write);
6826   INS01  : ISS;
6827   NEON_FP : S5;
6828 %}
6829 
6830 pipe_class fp_uop_d(vRegD dst, vRegD src)
6831 %{
6832   single_instruction;
6833   src    : S1(read);
6834   dst    : S5(write);
6835   INS01  : ISS;
6836   NEON_FP : S5;
6837 %}
6838 
6839 pipe_class fp_d2f(vRegF dst, vRegD src)
6840 %{
6841   single_instruction;
6842   src    : S1(read);
6843   dst    : S5(write);
6844   INS01  : ISS;
6845   NEON_FP : S5;
6846 %}
6847 
6848 pipe_class fp_f2d(vRegD dst, vRegF src)
6849 %{
6850   single_instruction;
6851   src    : S1(read);
6852   dst    : S5(write);
6853   INS01  : ISS;
6854   NEON_FP : S5;
6855 %}
6856 
6857 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6858 %{
6859   single_instruction;
6860   src    : S1(read);
6861   dst    : S5(write);
6862   INS01  : ISS;
6863   NEON_FP : S5;
6864 %}
6865 
6866 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6867 %{
6868   single_instruction;
6869   src    : S1(read);
6870   dst    : S5(write);
6871   INS01  : ISS;
6872   NEON_FP : S5;
6873 %}
6874 
6875 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6876 %{
6877   single_instruction;
6878   src    : S1(read);
6879   dst    : S5(write);
6880   INS01  : ISS;
6881   NEON_FP : S5;
6882 %}
6883 
6884 pipe_class fp_l2f(vRegF dst, iRegL src)
6885 %{
6886   single_instruction;
6887   src    : S1(read);
6888   dst    : S5(write);
6889   INS01  : ISS;
6890   NEON_FP : S5;
6891 %}
6892 
6893 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6894 %{
6895   single_instruction;
6896   src    : S1(read);
6897   dst    : S5(write);
6898   INS01  : ISS;
6899   NEON_FP : S5;
6900 %}
6901 
6902 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6903 %{
6904   single_instruction;
6905   src    : S1(read);
6906   dst    : S5(write);
6907   INS01  : ISS;
6908   NEON_FP : S5;
6909 %}
6910 
6911 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6912 %{
6913   single_instruction;
6914   src    : S1(read);
6915   dst    : S5(write);
6916   INS01  : ISS;
6917   NEON_FP : S5;
6918 %}
6919 
6920 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6921 %{
6922   single_instruction;
6923   src    : S1(read);
6924   dst    : S5(write);
6925   INS01  : ISS;
6926   NEON_FP : S5;
6927 %}
6928 
6929 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6930 %{
6931   single_instruction;
6932   src1   : S1(read);
6933   src2   : S2(read);
6934   dst    : S5(write);
6935   INS0   : ISS;
6936   NEON_FP : S5;
6937 %}
6938 
6939 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6940 %{
6941   single_instruction;
6942   src1   : S1(read);
6943   src2   : S2(read);
6944   dst    : S5(write);
6945   INS0   : ISS;
6946   NEON_FP : S5;
6947 %}
6948 
6949 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6950 %{
6951   single_instruction;
6952   cr     : S1(read);
6953   src1   : S1(read);
6954   src2   : S1(read);
6955   dst    : S3(write);
6956   INS01  : ISS;
6957   NEON_FP : S3;
6958 %}
6959 
6960 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6961 %{
6962   single_instruction;
6963   cr     : S1(read);
6964   src1   : S1(read);
6965   src2   : S1(read);
6966   dst    : S3(write);
6967   INS01  : ISS;
6968   NEON_FP : S3;
6969 %}
6970 
6971 pipe_class fp_imm_s(vRegF dst)
6972 %{
6973   single_instruction;
6974   dst    : S3(write);
6975   INS01  : ISS;
6976   NEON_FP : S3;
6977 %}
6978 
6979 pipe_class fp_imm_d(vRegD dst)
6980 %{
6981   single_instruction;
6982   dst    : S3(write);
6983   INS01  : ISS;
6984   NEON_FP : S3;
6985 %}
6986 
6987 pipe_class fp_load_constant_s(vRegF dst)
6988 %{
6989   single_instruction;
6990   dst    : S4(write);
6991   INS01  : ISS;
6992   NEON_FP : S4;
6993 %}
6994 
6995 pipe_class fp_load_constant_d(vRegD dst)
6996 %{
6997   single_instruction;
6998   dst    : S4(write);
6999   INS01  : ISS;
7000   NEON_FP : S4;
7001 %}
7002 
7003 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7004 %{
7005   single_instruction;
7006   dst    : S5(write);
7007   src1   : S1(read);
7008   src2   : S1(read);
7009   INS01  : ISS;
7010   NEON_FP : S5;
7011 %}
7012 
7013 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7014 %{
7015   single_instruction;
7016   dst    : S5(write);
7017   src1   : S1(read);
7018   src2   : S1(read);
7019   INS0   : ISS;
7020   NEON_FP : S5;
7021 %}
7022 
7023 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7024 %{
7025   single_instruction;
7026   dst    : S5(write);
7027   src1   : S1(read);
7028   src2   : S1(read);
7029   dst    : S1(read);
7030   INS01  : ISS;
7031   NEON_FP : S5;
7032 %}
7033 
7034 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7035 %{
7036   single_instruction;
7037   dst    : S5(write);
7038   src1   : S1(read);
7039   src2   : S1(read);
7040   dst    : S1(read);
7041   INS0   : ISS;
7042   NEON_FP : S5;
7043 %}
7044 
7045 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7046 %{
7047   single_instruction;
7048   dst    : S4(write);
7049   src1   : S2(read);
7050   src2   : S2(read);
7051   INS01  : ISS;
7052   NEON_FP : S4;
7053 %}
7054 
7055 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7056 %{
7057   single_instruction;
7058   dst    : S4(write);
7059   src1   : S2(read);
7060   src2   : S2(read);
7061   INS0   : ISS;
7062   NEON_FP : S4;
7063 %}
7064 
7065 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7066 %{
7067   single_instruction;
7068   dst    : S3(write);
7069   src1   : S2(read);
7070   src2   : S2(read);
7071   INS01  : ISS;
7072   NEON_FP : S3;
7073 %}
7074 
7075 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7076 %{
7077   single_instruction;
7078   dst    : S3(write);
7079   src1   : S2(read);
7080   src2   : S2(read);
7081   INS0   : ISS;
7082   NEON_FP : S3;
7083 %}
7084 
7085 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7086 %{
7087   single_instruction;
7088   dst    : S3(write);
7089   src    : S1(read);
7090   shift  : S1(read);
7091   INS01  : ISS;
7092   NEON_FP : S3;
7093 %}
7094 
7095 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7096 %{
7097   single_instruction;
7098   dst    : S3(write);
7099   src    : S1(read);
7100   shift  : S1(read);
7101   INS0   : ISS;
7102   NEON_FP : S3;
7103 %}
7104 
7105 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7106 %{
7107   single_instruction;
7108   dst    : S3(write);
7109   src    : S1(read);
7110   INS01  : ISS;
7111   NEON_FP : S3;
7112 %}
7113 
7114 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7115 %{
7116   single_instruction;
7117   dst    : S3(write);
7118   src    : S1(read);
7119   INS0   : ISS;
7120   NEON_FP : S3;
7121 %}
7122 
7123 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7124 %{
7125   single_instruction;
7126   dst    : S5(write);
7127   src1   : S1(read);
7128   src2   : S1(read);
7129   INS01  : ISS;
7130   NEON_FP : S5;
7131 %}
7132 
7133 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7134 %{
7135   single_instruction;
7136   dst    : S5(write);
7137   src1   : S1(read);
7138   src2   : S1(read);
7139   INS0   : ISS;
7140   NEON_FP : S5;
7141 %}
7142 
7143 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7144 %{
7145   single_instruction;
7146   dst    : S5(write);
7147   src1   : S1(read);
7148   src2   : S1(read);
7149   INS0   : ISS;
7150   NEON_FP : S5;
7151 %}
7152 
7153 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7154 %{
7155   single_instruction;
7156   dst    : S5(write);
7157   src1   : S1(read);
7158   src2   : S1(read);
7159   INS0   : ISS;
7160   NEON_FP : S5;
7161 %}
7162 
7163 pipe_class vsqrt_fp128(vecX dst, vecX src)
7164 %{
7165   single_instruction;
7166   dst    : S5(write);
7167   src    : S1(read);
7168   INS0   : ISS;
7169   NEON_FP : S5;
7170 %}
7171 
7172 pipe_class vunop_fp64(vecD dst, vecD src)
7173 %{
7174   single_instruction;
7175   dst    : S5(write);
7176   src    : S1(read);
7177   INS01  : ISS;
7178   NEON_FP : S5;
7179 %}
7180 
7181 pipe_class vunop_fp128(vecX dst, vecX src)
7182 %{
7183   single_instruction;
7184   dst    : S5(write);
7185   src    : S1(read);
7186   INS0   : ISS;
7187   NEON_FP : S5;
7188 %}
7189 
7190 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7191 %{
7192   single_instruction;
7193   dst    : S3(write);
7194   src    : S1(read);
7195   INS01  : ISS;
7196   NEON_FP : S3;
7197 %}
7198 
7199 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7200 %{
7201   single_instruction;
7202   dst    : S3(write);
7203   src    : S1(read);
7204   INS01  : ISS;
7205   NEON_FP : S3;
7206 %}
7207 
7208 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7209 %{
7210   single_instruction;
7211   dst    : S3(write);
7212   src    : S1(read);
7213   INS01  : ISS;
7214   NEON_FP : S3;
7215 %}
7216 
7217 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7218 %{
7219   single_instruction;
7220   dst    : S3(write);
7221   src    : S1(read);
7222   INS01  : ISS;
7223   NEON_FP : S3;
7224 %}
7225 
7226 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7227 %{
7228   single_instruction;
7229   dst    : S3(write);
7230   src    : S1(read);
7231   INS01  : ISS;
7232   NEON_FP : S3;
7233 %}
7234 
7235 pipe_class vmovi_reg_imm64(vecD dst)
7236 %{
7237   single_instruction;
7238   dst    : S3(write);
7239   INS01  : ISS;
7240   NEON_FP : S3;
7241 %}
7242 
7243 pipe_class vmovi_reg_imm128(vecX dst)
7244 %{
7245   single_instruction;
7246   dst    : S3(write);
7247   INS0   : ISS;
7248   NEON_FP : S3;
7249 %}
7250 
7251 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7252 %{
7253   single_instruction;
7254   dst    : S5(write);
7255   mem    : ISS(read);
7256   INS01  : ISS;
7257   NEON_FP : S3;
7258 %}
7259 
7260 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7261 %{
7262   single_instruction;
7263   dst    : S5(write);
7264   mem    : ISS(read);
7265   INS01  : ISS;
7266   NEON_FP : S3;
7267 %}
7268 
7269 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7270 %{
7271   single_instruction;
7272   mem    : ISS(read);
7273   src    : S2(read);
7274   INS01  : ISS;
7275   NEON_FP : S3;
7276 %}
7277 
7278 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7279 %{
7280   single_instruction;
7281   mem    : ISS(read);
7282   src    : S2(read);
7283   INS01  : ISS;
7284   NEON_FP : S3;
7285 %}
7286 
7287 //------- Integer ALU operations --------------------------
7288 
7289 // Integer ALU reg-reg operation
7290 // Operands needed in EX1, result generated in EX2
7291 // Eg.  ADD     x0, x1, x2
7292 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7293 %{
7294   single_instruction;
7295   dst    : EX2(write);
7296   src1   : EX1(read);
7297   src2   : EX1(read);
7298   INS01  : ISS; // Dual issue as instruction 0 or 1
7299   ALU    : EX2;
7300 %}
7301 
7302 // Integer ALU reg-reg operation with constant shift
7303 // Shifted register must be available in LATE_ISS instead of EX1
7304 // Eg.  ADD     x0, x1, x2, LSL #2
7305 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7306 %{
7307   single_instruction;
7308   dst    : EX2(write);
7309   src1   : EX1(read);
7310   src2   : ISS(read);
7311   INS01  : ISS;
7312   ALU    : EX2;
7313 %}
7314 
7315 // Integer ALU reg operation with constant shift
7316 // Eg.  LSL     x0, x1, #shift
7317 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7318 %{
7319   single_instruction;
7320   dst    : EX2(write);
7321   src1   : ISS(read);
7322   INS01  : ISS;
7323   ALU    : EX2;
7324 %}
7325 
7326 // Integer ALU reg-reg operation with variable shift
7327 // Both operands must be available in LATE_ISS instead of EX1
7328 // Result is available in EX1 instead of EX2
7329 // Eg.  LSLV    x0, x1, x2
7330 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7331 %{
7332   single_instruction;
7333   dst    : EX1(write);
7334   src1   : ISS(read);
7335   src2   : ISS(read);
7336   INS01  : ISS;
7337   ALU    : EX1;
7338 %}
7339 
7340 // Integer ALU reg-reg operation with extract
7341 // As for _vshift above, but result generated in EX2
7342 // Eg.  EXTR    x0, x1, x2, #N
7343 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7344 %{
7345   single_instruction;
7346   dst    : EX2(write);
7347   src1   : ISS(read);
7348   src2   : ISS(read);
7349   INS1   : ISS; // Can only dual issue as Instruction 1
7350   ALU    : EX1;
7351 %}
7352 
7353 // Integer ALU reg operation
7354 // Eg.  NEG     x0, x1
7355 pipe_class ialu_reg(iRegI dst, iRegI src)
7356 %{
7357   single_instruction;
7358   dst    : EX2(write);
7359   src    : EX1(read);
7360   INS01  : ISS;
7361   ALU    : EX2;
7362 %}
7363 
7364 // Integer ALU reg mmediate operation
7365 // Eg.  ADD     x0, x1, #N
7366 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7367 %{
7368   single_instruction;
7369   dst    : EX2(write);
7370   src1   : EX1(read);
7371   INS01  : ISS;
7372   ALU    : EX2;
7373 %}
7374 
7375 // Integer ALU immediate operation (no source operands)
7376 // Eg.  MOV     x0, #N
7377 pipe_class ialu_imm(iRegI dst)
7378 %{
7379   single_instruction;
7380   dst    : EX1(write);
7381   INS01  : ISS;
7382   ALU    : EX1;
7383 %}
7384 
7385 //------- Compare operation -------------------------------
7386 
7387 // Compare reg-reg
7388 // Eg.  CMP     x0, x1
7389 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7390 %{
7391   single_instruction;
7392 //  fixed_latency(16);
7393   cr     : EX2(write);
7394   op1    : EX1(read);
7395   op2    : EX1(read);
7396   INS01  : ISS;
7397   ALU    : EX2;
7398 %}
7399 
7400 // Compare reg-reg
7401 // Eg.  CMP     x0, #N
7402 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7403 %{
7404   single_instruction;
7405 //  fixed_latency(16);
7406   cr     : EX2(write);
7407   op1    : EX1(read);
7408   INS01  : ISS;
7409   ALU    : EX2;
7410 %}
7411 
7412 //------- Conditional instructions ------------------------
7413 
7414 // Conditional no operands
7415 // Eg.  CSINC   x0, zr, zr, <cond>
7416 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7417 %{
7418   single_instruction;
7419   cr     : EX1(read);
7420   dst    : EX2(write);
7421   INS01  : ISS;
7422   ALU    : EX2;
7423 %}
7424 
7425 // Conditional 2 operand
7426 // EG.  CSEL    X0, X1, X2, <cond>
7427 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7428 %{
7429   single_instruction;
7430   cr     : EX1(read);
7431   src1   : EX1(read);
7432   src2   : EX1(read);
7433   dst    : EX2(write);
7434   INS01  : ISS;
7435   ALU    : EX2;
7436 %}
7437 
7438 // Conditional 2 operand
7439 // EG.  CSEL    X0, X1, X2, <cond>
7440 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7441 %{
7442   single_instruction;
7443   cr     : EX1(read);
7444   src    : EX1(read);
7445   dst    : EX2(write);
7446   INS01  : ISS;
7447   ALU    : EX2;
7448 %}
7449 
7450 //------- Multiply pipeline operations --------------------
7451 
7452 // Multiply reg-reg
7453 // Eg.  MUL     w0, w1, w2
7454 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7455 %{
7456   single_instruction;
7457   dst    : WR(write);
7458   src1   : ISS(read);
7459   src2   : ISS(read);
7460   INS01  : ISS;
7461   MAC    : WR;
7462 %}
7463 
7464 // Multiply accumulate
7465 // Eg.  MADD    w0, w1, w2, w3
7466 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7467 %{
7468   single_instruction;
7469   dst    : WR(write);
7470   src1   : ISS(read);
7471   src2   : ISS(read);
7472   src3   : ISS(read);
7473   INS01  : ISS;
7474   MAC    : WR;
7475 %}
7476 
7477 // Eg.  MUL     w0, w1, w2
7478 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7479 %{
7480   single_instruction;
7481   fixed_latency(3); // Maximum latency for 64 bit mul
7482   dst    : WR(write);
7483   src1   : ISS(read);
7484   src2   : ISS(read);
7485   INS01  : ISS;
7486   MAC    : WR;
7487 %}
7488 
7489 // Multiply accumulate
7490 // Eg.  MADD    w0, w1, w2, w3
7491 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7492 %{
7493   single_instruction;
7494   fixed_latency(3); // Maximum latency for 64 bit mul
7495   dst    : WR(write);
7496   src1   : ISS(read);
7497   src2   : ISS(read);
7498   src3   : ISS(read);
7499   INS01  : ISS;
7500   MAC    : WR;
7501 %}
7502 
7503 //------- Divide pipeline operations --------------------
7504 
7505 // Eg.  SDIV    w0, w1, w2
7506 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7507 %{
7508   single_instruction;
7509   fixed_latency(8); // Maximum latency for 32 bit divide
7510   dst    : WR(write);
7511   src1   : ISS(read);
7512   src2   : ISS(read);
7513   INS0   : ISS; // Can only dual issue as instruction 0
7514   DIV    : WR;
7515 %}
7516 
7517 // Eg.  SDIV    x0, x1, x2
7518 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7519 %{
7520   single_instruction;
7521   fixed_latency(16); // Maximum latency for 64 bit divide
7522   dst    : WR(write);
7523   src1   : ISS(read);
7524   src2   : ISS(read);
7525   INS0   : ISS; // Can only dual issue as instruction 0
7526   DIV    : WR;
7527 %}
7528 
7529 //------- Load pipeline operations ------------------------
7530 
7531 // Load - prefetch
7532 // Eg.  PFRM    <mem>
7533 pipe_class iload_prefetch(memory mem)
7534 %{
7535   single_instruction;
7536   mem    : ISS(read);
7537   INS01  : ISS;
7538   LDST   : WR;
7539 %}
7540 
7541 // Load - reg, mem
7542 // Eg.  LDR     x0, <mem>
7543 pipe_class iload_reg_mem(iRegI dst, memory mem)
7544 %{
7545   single_instruction;
7546   dst    : WR(write);
7547   mem    : ISS(read);
7548   INS01  : ISS;
7549   LDST   : WR;
7550 %}
7551 
7552 // Load - reg, reg
7553 // Eg.  LDR     x0, [sp, x1]
7554 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7555 %{
7556   single_instruction;
7557   dst    : WR(write);
7558   src    : ISS(read);
7559   INS01  : ISS;
7560   LDST   : WR;
7561 %}
7562 
7563 //------- Store pipeline operations -----------------------
7564 
7565 // Store - zr, mem
7566 // Eg.  STR     zr, <mem>
7567 pipe_class istore_mem(memory mem)
7568 %{
7569   single_instruction;
7570   mem    : ISS(read);
7571   INS01  : ISS;
7572   LDST   : WR;
7573 %}
7574 
7575 // Store - reg, mem
7576 // Eg.  STR     x0, <mem>
7577 pipe_class istore_reg_mem(iRegI src, memory mem)
7578 %{
7579   single_instruction;
7580   mem    : ISS(read);
7581   src    : EX2(read);
7582   INS01  : ISS;
7583   LDST   : WR;
7584 %}
7585 
7586 // Store - reg, reg
7587 // Eg. STR      x0, [sp, x1]
7588 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7589 %{
7590   single_instruction;
7591   dst    : ISS(read);
7592   src    : EX2(read);
7593   INS01  : ISS;
7594   LDST   : WR;
7595 %}
7596 
7597 //------- Store pipeline operations -----------------------
7598 
7599 // Branch
7600 pipe_class pipe_branch()
7601 %{
7602   single_instruction;
7603   INS01  : ISS;
7604   BRANCH : EX1;
7605 %}
7606 
7607 // Conditional branch
7608 pipe_class pipe_branch_cond(rFlagsReg cr)
7609 %{
7610   single_instruction;
7611   cr     : EX1(read);
7612   INS01  : ISS;
7613   BRANCH : EX1;
7614 %}
7615 
7616 // Compare & Branch
7617 // EG.  CBZ/CBNZ
7618 pipe_class pipe_cmp_branch(iRegI op1)
7619 %{
7620   single_instruction;
7621   op1    : EX1(read);
7622   INS01  : ISS;
7623   BRANCH : EX1;
7624 %}
7625 
7626 //------- Synchronisation operations ----------------------
7627 
7628 // Any operation requiring serialization.
7629 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7630 pipe_class pipe_serial()
7631 %{
7632   single_instruction;
7633   force_serialization;
7634   fixed_latency(16);
7635   INS01  : ISS(2); // Cannot dual issue with any other instruction
7636   LDST   : WR;
7637 %}
7638 
7639 // Generic big/slow expanded idiom - also serialized
7640 pipe_class pipe_slow()
7641 %{
7642   instruction_count(10);
7643   multiple_bundles;
7644   force_serialization;
7645   fixed_latency(16);
7646   INS01  : ISS(2); // Cannot dual issue with any other instruction
7647   LDST   : WR;
7648 %}
7649 
7650 // Empty pipeline class
7651 pipe_class pipe_class_empty()
7652 %{
7653   single_instruction;
7654   fixed_latency(0);
7655 %}
7656 
7657 // Default pipeline class.
7658 pipe_class pipe_class_default()
7659 %{
7660   single_instruction;
7661   fixed_latency(2);
7662 %}
7663 
7664 // Pipeline class for compares.
7665 pipe_class pipe_class_compare()
7666 %{
7667   single_instruction;
7668   fixed_latency(16);
7669 %}
7670 
7671 // Pipeline class for memory operations.
7672 pipe_class pipe_class_memory()
7673 %{
7674   single_instruction;
7675   fixed_latency(16);
7676 %}
7677 
7678 // Pipeline class for call.
7679 pipe_class pipe_class_call()
7680 %{
7681   single_instruction;
7682   fixed_latency(100);
7683 %}
7684 
7685 // Define the class for the Nop node.
7686 define %{
7687    MachNop = pipe_class_empty;
7688 %}
7689 
7690 %}
7691 //----------INSTRUCTIONS-------------------------------------------------------
7692 //
7693 // match      -- States which machine-independent subtree may be replaced
7694 //               by this instruction.
7695 // ins_cost   -- The estimated cost of this instruction is used by instruction
7696 //               selection to identify a minimum cost tree of machine
7697 //               instructions that matches a tree of machine-independent
7698 //               instructions.
7699 // format     -- A string providing the disassembly for this instruction.
7700 //               The value of an instruction's operand may be inserted
7701 //               by referring to it with a '$' prefix.
7702 // opcode     -- Three instruction opcodes may be provided.  These are referred
7703 //               to within an encode class as $primary, $secondary, and $tertiary
7704 //               rrspectively.  The primary opcode is commonly used to
7705 //               indicate the type of machine instruction, while secondary
7706 //               and tertiary are often used for prefix options or addressing
7707 //               modes.
7708 // ins_encode -- A list of encode classes with parameters. The encode class
7709 //               name must have been defined in an 'enc_class' specification
7710 //               in the encode section of the architecture description.
7711 
7712 // ============================================================================
7713 // Memory (Load/Store) Instructions
7714 
7715 // Load Instructions
7716 
7717 // Load Byte (8 bit signed)
7718 instruct loadB(iRegINoSp dst, memory mem)
7719 %{
7720   match(Set dst (LoadB mem));
7721   predicate(!needs_acquiring_load(n));
7722 
7723   ins_cost(4 * INSN_COST);
7724   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7725 
7726   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7727 
7728   ins_pipe(iload_reg_mem);
7729 %}
7730 
7731 // Load Byte (8 bit signed) into long
7732 instruct loadB2L(iRegLNoSp dst, memory mem)
7733 %{
7734   match(Set dst (ConvI2L (LoadB mem)));
7735   predicate(!needs_acquiring_load(n->in(1)));
7736 
7737   ins_cost(4 * INSN_COST);
7738   format %{ "ldrsb  $dst, $mem\t# byte" %}
7739 
7740   ins_encode(aarch64_enc_ldrsb(dst, mem));
7741 
7742   ins_pipe(iload_reg_mem);
7743 %}
7744 
7745 // Load Byte (8 bit unsigned)
7746 instruct loadUB(iRegINoSp dst, memory mem)
7747 %{
7748   match(Set dst (LoadUB mem));
7749   predicate(!needs_acquiring_load(n));
7750 
7751   ins_cost(4 * INSN_COST);
7752   format %{ "ldrbw  $dst, $mem\t# byte" %}
7753 
7754   ins_encode(aarch64_enc_ldrb(dst, mem));
7755 
7756   ins_pipe(iload_reg_mem);
7757 %}
7758 
7759 // Load Byte (8 bit unsigned) into long
7760 instruct loadUB2L(iRegLNoSp dst, memory mem)
7761 %{
7762   match(Set dst (ConvI2L (LoadUB mem)));
7763   predicate(!needs_acquiring_load(n->in(1)));
7764 
7765   ins_cost(4 * INSN_COST);
7766   format %{ "ldrb  $dst, $mem\t# byte" %}
7767 
7768   ins_encode(aarch64_enc_ldrb(dst, mem));
7769 
7770   ins_pipe(iload_reg_mem);
7771 %}
7772 
7773 // Load Short (16 bit signed)
7774 instruct loadS(iRegINoSp dst, memory mem)
7775 %{
7776   match(Set dst (LoadS mem));
7777   predicate(!needs_acquiring_load(n));
7778 
7779   ins_cost(4 * INSN_COST);
7780   format %{ "ldrshw  $dst, $mem\t# short" %}
7781 
7782   ins_encode(aarch64_enc_ldrshw(dst, mem));
7783 
7784   ins_pipe(iload_reg_mem);
7785 %}
7786 
7787 // Load Short (16 bit signed) into long
7788 instruct loadS2L(iRegLNoSp dst, memory mem)
7789 %{
7790   match(Set dst (ConvI2L (LoadS mem)));
7791   predicate(!needs_acquiring_load(n->in(1)));
7792 
7793   ins_cost(4 * INSN_COST);
7794   format %{ "ldrsh  $dst, $mem\t# short" %}
7795 
7796   ins_encode(aarch64_enc_ldrsh(dst, mem));
7797 
7798   ins_pipe(iload_reg_mem);
7799 %}
7800 
7801 // Load Char (16 bit unsigned)
7802 instruct loadUS(iRegINoSp dst, memory mem)
7803 %{
7804   match(Set dst (LoadUS mem));
7805   predicate(!needs_acquiring_load(n));
7806 
7807   ins_cost(4 * INSN_COST);
7808   format %{ "ldrh  $dst, $mem\t# short" %}
7809 
7810   ins_encode(aarch64_enc_ldrh(dst, mem));
7811 
7812   ins_pipe(iload_reg_mem);
7813 %}
7814 
7815 // Load Short/Char (16 bit unsigned) into long
7816 instruct loadUS2L(iRegLNoSp dst, memory mem)
7817 %{
7818   match(Set dst (ConvI2L (LoadUS mem)));
7819   predicate(!needs_acquiring_load(n->in(1)));
7820 
7821   ins_cost(4 * INSN_COST);
7822   format %{ "ldrh  $dst, $mem\t# short" %}
7823 
7824   ins_encode(aarch64_enc_ldrh(dst, mem));
7825 
7826   ins_pipe(iload_reg_mem);
7827 %}
7828 
7829 // Load Integer (32 bit signed)
7830 instruct loadI(iRegINoSp dst, memory mem)
7831 %{
7832   match(Set dst (LoadI mem));
7833   predicate(!needs_acquiring_load(n));
7834 
7835   ins_cost(4 * INSN_COST);
7836   format %{ "ldrw  $dst, $mem\t# int" %}
7837 
7838   ins_encode(aarch64_enc_ldrw(dst, mem));
7839 
7840   ins_pipe(iload_reg_mem);
7841 %}
7842 
7843 // Load Integer (32 bit signed) into long
7844 instruct loadI2L(iRegLNoSp dst, memory mem)
7845 %{
7846   match(Set dst (ConvI2L (LoadI mem)));
7847   predicate(!needs_acquiring_load(n->in(1)));
7848 
7849   ins_cost(4 * INSN_COST);
7850   format %{ "ldrsw  $dst, $mem\t# int" %}
7851 
7852   ins_encode(aarch64_enc_ldrsw(dst, mem));
7853 
7854   ins_pipe(iload_reg_mem);
7855 %}
7856 
7857 // Load Integer (32 bit unsigned) into long
7858 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7859 %{
7860   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7861   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7862 
7863   ins_cost(4 * INSN_COST);
7864   format %{ "ldrw  $dst, $mem\t# int" %}
7865 
7866   ins_encode(aarch64_enc_ldrw(dst, mem));
7867 
7868   ins_pipe(iload_reg_mem);
7869 %}
7870 
7871 // Load Long (64 bit signed)
7872 instruct loadL(iRegLNoSp dst, memory mem)
7873 %{
7874   match(Set dst (LoadL mem));
7875   predicate(!needs_acquiring_load(n));
7876 
7877   ins_cost(4 * INSN_COST);
7878   format %{ "ldr  $dst, $mem\t# int" %}
7879 
7880   ins_encode(aarch64_enc_ldr(dst, mem));
7881 
7882   ins_pipe(iload_reg_mem);
7883 %}
7884 
7885 // Load Range
7886 instruct loadRange(iRegINoSp dst, memory mem)
7887 %{
7888   match(Set dst (LoadRange mem));
7889 
7890   ins_cost(4 * INSN_COST);
7891   format %{ "ldrw  $dst, $mem\t# range" %}
7892 
7893   ins_encode(aarch64_enc_ldrw(dst, mem));
7894 
7895   ins_pipe(iload_reg_mem);
7896 %}
7897 
7898 // Load Pointer
7899 instruct loadP(iRegPNoSp dst, memory mem)
7900 %{
7901   match(Set dst (LoadP mem));
7902   predicate(!needs_acquiring_load(n));
7903 
7904   ins_cost(4 * INSN_COST);
7905   format %{ "ldr  $dst, $mem\t# ptr" %}
7906 
7907   ins_encode(aarch64_enc_ldr(dst, mem));
7908 
7909   ins_pipe(iload_reg_mem);
7910 %}
7911 
7912 // Load Compressed Pointer
7913 instruct loadN(iRegNNoSp dst, memory mem)
7914 %{
7915   match(Set dst (LoadN mem));
7916   predicate(!needs_acquiring_load(n));
7917 
7918   ins_cost(4 * INSN_COST);
7919   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7920 
7921   ins_encode(aarch64_enc_ldrw(dst, mem));
7922 
7923   ins_pipe(iload_reg_mem);
7924 %}
7925 
7926 // Load Klass Pointer
7927 instruct loadKlass(iRegPNoSp dst, memory mem)
7928 %{
7929   match(Set dst (LoadKlass mem));
7930   predicate(!needs_acquiring_load(n));
7931 
7932   ins_cost(4 * INSN_COST);
7933   format %{ "ldr  $dst, $mem\t# class" %}
7934 
7935   ins_encode(aarch64_enc_ldr(dst, mem));
7936 
7937   ins_pipe(iload_reg_mem);
7938 %}
7939 
7940 // Load Narrow Klass Pointer
7941 instruct loadNKlass(iRegNNoSp dst, memory mem)
7942 %{
7943   match(Set dst (LoadNKlass mem));
7944   predicate(!needs_acquiring_load(n));
7945 
7946   ins_cost(4 * INSN_COST);
7947   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7948 
7949   ins_encode(aarch64_enc_ldrw(dst, mem));
7950 
7951   ins_pipe(iload_reg_mem);
7952 %}
7953 
7954 // Load Float
7955 instruct loadF(vRegF dst, memory mem)
7956 %{
7957   match(Set dst (LoadF mem));
7958   predicate(!needs_acquiring_load(n));
7959 
7960   ins_cost(4 * INSN_COST);
7961   format %{ "ldrs  $dst, $mem\t# float" %}
7962 
7963   ins_encode( aarch64_enc_ldrs(dst, mem) );
7964 
7965   ins_pipe(pipe_class_memory);
7966 %}
7967 
7968 // Load Double
7969 instruct loadD(vRegD dst, memory mem)
7970 %{
7971   match(Set dst (LoadD mem));
7972   predicate(!needs_acquiring_load(n));
7973 
7974   ins_cost(4 * INSN_COST);
7975   format %{ "ldrd  $dst, $mem\t# double" %}
7976 
7977   ins_encode( aarch64_enc_ldrd(dst, mem) );
7978 
7979   ins_pipe(pipe_class_memory);
7980 %}
7981 
7982 
7983 // Load Int Constant
7984 instruct loadConI(iRegINoSp dst, immI src)
7985 %{
7986   match(Set dst src);
7987 
7988   ins_cost(INSN_COST);
7989   format %{ "mov $dst, $src\t# int" %}
7990 
7991   ins_encode( aarch64_enc_movw_imm(dst, src) );
7992 
7993   ins_pipe(ialu_imm);
7994 %}
7995 
7996 // Load Long Constant
7997 instruct loadConL(iRegLNoSp dst, immL src)
7998 %{
7999   match(Set dst src);
8000 
8001   ins_cost(INSN_COST);
8002   format %{ "mov $dst, $src\t# long" %}
8003 
8004   ins_encode( aarch64_enc_mov_imm(dst, src) );
8005 
8006   ins_pipe(ialu_imm);
8007 %}
8008 
8009 // Load Pointer Constant
8010 
8011 instruct loadConP(iRegPNoSp dst, immP con)
8012 %{
8013   match(Set dst con);
8014 
8015   ins_cost(INSN_COST * 4);
8016   format %{
8017     "mov  $dst, $con\t# ptr\n\t"
8018   %}
8019 
8020   ins_encode(aarch64_enc_mov_p(dst, con));
8021 
8022   ins_pipe(ialu_imm);
8023 %}
8024 
8025 // Load Null Pointer Constant
8026 
8027 instruct loadConP0(iRegPNoSp dst, immP0 con)
8028 %{
8029   match(Set dst con);
8030 
8031   ins_cost(INSN_COST);
8032   format %{ "mov  $dst, $con\t# NULL ptr" %}
8033 
8034   ins_encode(aarch64_enc_mov_p0(dst, con));
8035 
8036   ins_pipe(ialu_imm);
8037 %}
8038 
8039 // Load Pointer Constant One
8040 
8041 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8042 %{
8043   match(Set dst con);
8044 
8045   ins_cost(INSN_COST);
8046   format %{ "mov  $dst, $con\t# NULL ptr" %}
8047 
8048   ins_encode(aarch64_enc_mov_p1(dst, con));
8049 
8050   ins_pipe(ialu_imm);
8051 %}
8052 
8053 // Load Poll Page Constant
8054 
8055 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8056 %{
8057   match(Set dst con);
8058 
8059   ins_cost(INSN_COST);
8060   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8061 
8062   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8063 
8064   ins_pipe(ialu_imm);
8065 %}
8066 
8067 // Load Byte Map Base Constant
8068 
8069 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8070 %{
8071   match(Set dst con);
8072 
8073   ins_cost(INSN_COST);
8074   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8075 
8076   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8077 
8078   ins_pipe(ialu_imm);
8079 %}
8080 
8081 // Load Narrow Pointer Constant
8082 
8083 instruct loadConN(iRegNNoSp dst, immN con)
8084 %{
8085   match(Set dst con);
8086 
8087   ins_cost(INSN_COST * 4);
8088   format %{ "mov  $dst, $con\t# compressed ptr" %}
8089 
8090   ins_encode(aarch64_enc_mov_n(dst, con));
8091 
8092   ins_pipe(ialu_imm);
8093 %}
8094 
8095 // Load Narrow Null Pointer Constant
8096 
8097 instruct loadConN0(iRegNNoSp dst, immN0 con)
8098 %{
8099   match(Set dst con);
8100 
8101   ins_cost(INSN_COST);
8102   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8103 
8104   ins_encode(aarch64_enc_mov_n0(dst, con));
8105 
8106   ins_pipe(ialu_imm);
8107 %}
8108 
8109 // Load Narrow Klass Constant
8110 
8111 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8112 %{
8113   match(Set dst con);
8114 
8115   ins_cost(INSN_COST);
8116   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8117 
8118   ins_encode(aarch64_enc_mov_nk(dst, con));
8119 
8120   ins_pipe(ialu_imm);
8121 %}
8122 
8123 // Load Packed Float Constant
8124 
8125 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8126   match(Set dst con);
8127   ins_cost(INSN_COST * 4);
8128   format %{ "fmovs  $dst, $con"%}
8129   ins_encode %{
8130     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8131   %}
8132 
8133   ins_pipe(fp_imm_s);
8134 %}
8135 
8136 // Load Float Constant
8137 
8138 instruct loadConF(vRegF dst, immF con) %{
8139   match(Set dst con);
8140 
8141   ins_cost(INSN_COST * 4);
8142 
8143   format %{
8144     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8145   %}
8146 
8147   ins_encode %{
8148     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8149   %}
8150 
8151   ins_pipe(fp_load_constant_s);
8152 %}
8153 
8154 // Load Packed Double Constant
8155 
8156 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8157   match(Set dst con);
8158   ins_cost(INSN_COST);
8159   format %{ "fmovd  $dst, $con"%}
8160   ins_encode %{
8161     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8162   %}
8163 
8164   ins_pipe(fp_imm_d);
8165 %}
8166 
8167 // Load Double Constant
8168 
8169 instruct loadConD(vRegD dst, immD con) %{
8170   match(Set dst con);
8171 
8172   ins_cost(INSN_COST * 5);
8173   format %{
8174     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8175   %}
8176 
8177   ins_encode %{
8178     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8179   %}
8180 
8181   ins_pipe(fp_load_constant_d);
8182 %}
8183 
8184 // Store Instructions
8185 
8186 // Store CMS card-mark Immediate
8187 instruct storeimmCM0(immI0 zero, memory mem)
8188 %{
8189   match(Set mem (StoreCM mem zero));
8190   predicate(unnecessary_storestore(n));
8191 
8192   ins_cost(INSN_COST);
8193   format %{ "strb zr, $mem\t# byte" %}
8194 
8195   ins_encode(aarch64_enc_strb0(mem));
8196 
8197   ins_pipe(istore_mem);
8198 %}
8199 
8200 // Store CMS card-mark Immediate with intervening StoreStore
8201 // needed when using CMS with no conditional card marking
8202 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8203 %{
8204   match(Set mem (StoreCM mem zero));
8205 
8206   ins_cost(INSN_COST * 2);
8207   format %{ "dmb ishst"
8208       "\n\tstrb zr, $mem\t# byte" %}
8209 
8210   ins_encode(aarch64_enc_strb0_ordered(mem));
8211 
8212   ins_pipe(istore_mem);
8213 %}
8214 
8215 // Store Byte
8216 instruct storeB(iRegIorL2I src, memory mem)
8217 %{
8218   match(Set mem (StoreB mem src));
8219   predicate(!needs_releasing_store(n));
8220 
8221   ins_cost(INSN_COST);
8222   format %{ "strb  $src, $mem\t# byte" %}
8223 
8224   ins_encode(aarch64_enc_strb(src, mem));
8225 
8226   ins_pipe(istore_reg_mem);
8227 %}
8228 
8229 
8230 instruct storeimmB0(immI0 zero, memory mem)
8231 %{
8232   match(Set mem (StoreB mem zero));
8233   predicate(!needs_releasing_store(n));
8234 
8235   ins_cost(INSN_COST);
8236   format %{ "strb rscractch2, $mem\t# byte" %}
8237 
8238   ins_encode(aarch64_enc_strb0(mem));
8239 
8240   ins_pipe(istore_mem);
8241 %}
8242 
8243 // Store Char/Short
8244 instruct storeC(iRegIorL2I src, memory mem)
8245 %{
8246   match(Set mem (StoreC mem src));
8247   predicate(!needs_releasing_store(n));
8248 
8249   ins_cost(INSN_COST);
8250   format %{ "strh  $src, $mem\t# short" %}
8251 
8252   ins_encode(aarch64_enc_strh(src, mem));
8253 
8254   ins_pipe(istore_reg_mem);
8255 %}
8256 
8257 instruct storeimmC0(immI0 zero, memory mem)
8258 %{
8259   match(Set mem (StoreC mem zero));
8260   predicate(!needs_releasing_store(n));
8261 
8262   ins_cost(INSN_COST);
8263   format %{ "strh  zr, $mem\t# short" %}
8264 
8265   ins_encode(aarch64_enc_strh0(mem));
8266 
8267   ins_pipe(istore_mem);
8268 %}
8269 
8270 // Store Integer
8271 
8272 instruct storeI(iRegIorL2I src, memory mem)
8273 %{
8274   match(Set mem(StoreI mem src));
8275   predicate(!needs_releasing_store(n));
8276 
8277   ins_cost(INSN_COST);
8278   format %{ "strw  $src, $mem\t# int" %}
8279 
8280   ins_encode(aarch64_enc_strw(src, mem));
8281 
8282   ins_pipe(istore_reg_mem);
8283 %}
8284 
8285 instruct storeimmI0(immI0 zero, memory mem)
8286 %{
8287   match(Set mem(StoreI mem zero));
8288   predicate(!needs_releasing_store(n));
8289 
8290   ins_cost(INSN_COST);
8291   format %{ "strw  zr, $mem\t# int" %}
8292 
8293   ins_encode(aarch64_enc_strw0(mem));
8294 
8295   ins_pipe(istore_mem);
8296 %}
8297 
8298 // Store Long (64 bit signed)
8299 instruct storeL(iRegL src, memory mem)
8300 %{
8301   match(Set mem (StoreL mem src));
8302   predicate(!needs_releasing_store(n));
8303 
8304   ins_cost(INSN_COST);
8305   format %{ "str  $src, $mem\t# int" %}
8306 
8307   ins_encode(aarch64_enc_str(src, mem));
8308 
8309   ins_pipe(istore_reg_mem);
8310 %}
8311 
8312 // Store Long (64 bit signed)
8313 instruct storeimmL0(immL0 zero, memory mem)
8314 %{
8315   match(Set mem (StoreL mem zero));
8316   predicate(!needs_releasing_store(n));
8317 
8318   ins_cost(INSN_COST);
8319   format %{ "str  zr, $mem\t# int" %}
8320 
8321   ins_encode(aarch64_enc_str0(mem));
8322 
8323   ins_pipe(istore_mem);
8324 %}
8325 
8326 // Store Pointer
8327 instruct storeP(iRegP src, memory mem)
8328 %{
8329   match(Set mem (StoreP mem src));
8330   predicate(!needs_releasing_store(n));
8331 
8332   ins_cost(INSN_COST);
8333   format %{ "str  $src, $mem\t# ptr" %}
8334 
8335   ins_encode(aarch64_enc_str(src, mem));
8336 
8337   ins_pipe(istore_reg_mem);
8338 %}
8339 
8340 // Store Pointer
8341 instruct storeimmP0(immP0 zero, memory mem)
8342 %{
8343   match(Set mem (StoreP mem zero));
8344   predicate(!needs_releasing_store(n));
8345 
8346   ins_cost(INSN_COST);
8347   format %{ "str zr, $mem\t# ptr" %}
8348 
8349   ins_encode(aarch64_enc_str0(mem));
8350 
8351   ins_pipe(istore_mem);
8352 %}
8353 
8354 // Store Compressed Pointer
8355 instruct storeN(iRegN src, memory mem)
8356 %{
8357   match(Set mem (StoreN mem src));
8358   predicate(!needs_releasing_store(n));
8359 
8360   ins_cost(INSN_COST);
8361   format %{ "strw  $src, $mem\t# compressed ptr" %}
8362 
8363   ins_encode(aarch64_enc_strw(src, mem));
8364 
8365   ins_pipe(istore_reg_mem);
8366 %}
8367 
8368 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8369 %{
8370   match(Set mem (StoreN mem zero));
8371   predicate(Universe::narrow_oop_base() == NULL &&
8372             Universe::narrow_klass_base() == NULL &&
8373             (!needs_releasing_store(n)));
8374 
8375   ins_cost(INSN_COST);
8376   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8377 
8378   ins_encode(aarch64_enc_strw(heapbase, mem));
8379 
8380   ins_pipe(istore_reg_mem);
8381 %}
8382 
8383 // Store Float
8384 instruct storeF(vRegF src, memory mem)
8385 %{
8386   match(Set mem (StoreF mem src));
8387   predicate(!needs_releasing_store(n));
8388 
8389   ins_cost(INSN_COST);
8390   format %{ "strs  $src, $mem\t# float" %}
8391 
8392   ins_encode( aarch64_enc_strs(src, mem) );
8393 
8394   ins_pipe(pipe_class_memory);
8395 %}
8396 
8397 // TODO
8398 // implement storeImmF0 and storeFImmPacked
8399 
8400 // Store Double
8401 instruct storeD(vRegD src, memory mem)
8402 %{
8403   match(Set mem (StoreD mem src));
8404   predicate(!needs_releasing_store(n));
8405 
8406   ins_cost(INSN_COST);
8407   format %{ "strd  $src, $mem\t# double" %}
8408 
8409   ins_encode( aarch64_enc_strd(src, mem) );
8410 
8411   ins_pipe(pipe_class_memory);
8412 %}
8413 
8414 // Store Compressed Klass Pointer
8415 instruct storeNKlass(iRegN src, memory mem)
8416 %{
8417   predicate(!needs_releasing_store(n));
8418   match(Set mem (StoreNKlass mem src));
8419 
8420   ins_cost(INSN_COST);
8421   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8422 
8423   ins_encode(aarch64_enc_strw(src, mem));
8424 
8425   ins_pipe(istore_reg_mem);
8426 %}
8427 
8428 // TODO
8429 // implement storeImmD0 and storeDImmPacked
8430 
8431 // prefetch instructions
8432 // Must be safe to execute with invalid address (cannot fault).
8433 
8434 instruct prefetchalloc( memory mem ) %{
8435   match(PrefetchAllocation mem);
8436 
8437   ins_cost(INSN_COST);
8438   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8439 
8440   ins_encode( aarch64_enc_prefetchw(mem) );
8441 
8442   ins_pipe(iload_prefetch);
8443 %}
8444 
8445 //  ---------------- volatile loads and stores ----------------
8446 
8447 // Load Byte (8 bit signed)
8448 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8449 %{
8450   match(Set dst (LoadB mem));
8451 
8452   ins_cost(VOLATILE_REF_COST);
8453   format %{ "ldarsb  $dst, $mem\t# byte" %}
8454 
8455   ins_encode(aarch64_enc_ldarsb(dst, mem));
8456 
8457   ins_pipe(pipe_serial);
8458 %}
8459 
8460 // Load Byte (8 bit signed) into long
8461 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8462 %{
8463   match(Set dst (ConvI2L (LoadB mem)));
8464 
8465   ins_cost(VOLATILE_REF_COST);
8466   format %{ "ldarsb  $dst, $mem\t# byte" %}
8467 
8468   ins_encode(aarch64_enc_ldarsb(dst, mem));
8469 
8470   ins_pipe(pipe_serial);
8471 %}
8472 
8473 // Load Byte (8 bit unsigned)
8474 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8475 %{
8476   match(Set dst (LoadUB mem));
8477 
8478   ins_cost(VOLATILE_REF_COST);
8479   format %{ "ldarb  $dst, $mem\t# byte" %}
8480 
8481   ins_encode(aarch64_enc_ldarb(dst, mem));
8482 
8483   ins_pipe(pipe_serial);
8484 %}
8485 
8486 // Load Byte (8 bit unsigned) into long
8487 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8488 %{
8489   match(Set dst (ConvI2L (LoadUB mem)));
8490 
8491   ins_cost(VOLATILE_REF_COST);
8492   format %{ "ldarb  $dst, $mem\t# byte" %}
8493 
8494   ins_encode(aarch64_enc_ldarb(dst, mem));
8495 
8496   ins_pipe(pipe_serial);
8497 %}
8498 
8499 // Load Short (16 bit signed)
8500 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8501 %{
8502   match(Set dst (LoadS mem));
8503 
8504   ins_cost(VOLATILE_REF_COST);
8505   format %{ "ldarshw  $dst, $mem\t# short" %}
8506 
8507   ins_encode(aarch64_enc_ldarshw(dst, mem));
8508 
8509   ins_pipe(pipe_serial);
8510 %}
8511 
8512 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8513 %{
8514   match(Set dst (LoadUS mem));
8515 
8516   ins_cost(VOLATILE_REF_COST);
8517   format %{ "ldarhw  $dst, $mem\t# short" %}
8518 
8519   ins_encode(aarch64_enc_ldarhw(dst, mem));
8520 
8521   ins_pipe(pipe_serial);
8522 %}
8523 
8524 // Load Short/Char (16 bit unsigned) into long
8525 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8526 %{
8527   match(Set dst (ConvI2L (LoadUS mem)));
8528 
8529   ins_cost(VOLATILE_REF_COST);
8530   format %{ "ldarh  $dst, $mem\t# short" %}
8531 
8532   ins_encode(aarch64_enc_ldarh(dst, mem));
8533 
8534   ins_pipe(pipe_serial);
8535 %}
8536 
8537 // Load Short/Char (16 bit signed) into long
8538 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8539 %{
8540   match(Set dst (ConvI2L (LoadS mem)));
8541 
8542   ins_cost(VOLATILE_REF_COST);
8543   format %{ "ldarh  $dst, $mem\t# short" %}
8544 
8545   ins_encode(aarch64_enc_ldarsh(dst, mem));
8546 
8547   ins_pipe(pipe_serial);
8548 %}
8549 
8550 // Load Integer (32 bit signed)
8551 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8552 %{
8553   match(Set dst (LoadI mem));
8554 
8555   ins_cost(VOLATILE_REF_COST);
8556   format %{ "ldarw  $dst, $mem\t# int" %}
8557 
8558   ins_encode(aarch64_enc_ldarw(dst, mem));
8559 
8560   ins_pipe(pipe_serial);
8561 %}
8562 
8563 // Load Integer (32 bit unsigned) into long
8564 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8565 %{
8566   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8567 
8568   ins_cost(VOLATILE_REF_COST);
8569   format %{ "ldarw  $dst, $mem\t# int" %}
8570 
8571   ins_encode(aarch64_enc_ldarw(dst, mem));
8572 
8573   ins_pipe(pipe_serial);
8574 %}
8575 
8576 // Load Long (64 bit signed)
8577 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8578 %{
8579   match(Set dst (LoadL mem));
8580 
8581   ins_cost(VOLATILE_REF_COST);
8582   format %{ "ldar  $dst, $mem\t# int" %}
8583 
8584   ins_encode(aarch64_enc_ldar(dst, mem));
8585 
8586   ins_pipe(pipe_serial);
8587 %}
8588 
8589 // Load Pointer
8590 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8591 %{
8592   match(Set dst (LoadP mem));
8593 
8594   ins_cost(VOLATILE_REF_COST);
8595   format %{ "ldar  $dst, $mem\t# ptr" %}
8596 
8597   ins_encode(aarch64_enc_ldar(dst, mem));
8598 
8599   ins_pipe(pipe_serial);
8600 %}
8601 
8602 // Load Compressed Pointer
8603 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8604 %{
8605   match(Set dst (LoadN mem));
8606 
8607   ins_cost(VOLATILE_REF_COST);
8608   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8609 
8610   ins_encode(aarch64_enc_ldarw(dst, mem));
8611 
8612   ins_pipe(pipe_serial);
8613 %}
8614 
8615 // Load Float
8616 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8617 %{
8618   match(Set dst (LoadF mem));
8619 
8620   ins_cost(VOLATILE_REF_COST);
8621   format %{ "ldars  $dst, $mem\t# float" %}
8622 
8623   ins_encode( aarch64_enc_fldars(dst, mem) );
8624 
8625   ins_pipe(pipe_serial);
8626 %}
8627 
8628 // Load Double
8629 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8630 %{
8631   match(Set dst (LoadD mem));
8632 
8633   ins_cost(VOLATILE_REF_COST);
8634   format %{ "ldard  $dst, $mem\t# double" %}
8635 
8636   ins_encode( aarch64_enc_fldard(dst, mem) );
8637 
8638   ins_pipe(pipe_serial);
8639 %}
8640 
8641 // Store Byte
8642 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8643 %{
8644   match(Set mem (StoreB mem src));
8645 
8646   ins_cost(VOLATILE_REF_COST);
8647   format %{ "stlrb  $src, $mem\t# byte" %}
8648 
8649   ins_encode(aarch64_enc_stlrb(src, mem));
8650 
8651   ins_pipe(pipe_class_memory);
8652 %}
8653 
8654 // Store Char/Short
8655 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8656 %{
8657   match(Set mem (StoreC mem src));
8658 
8659   ins_cost(VOLATILE_REF_COST);
8660   format %{ "stlrh  $src, $mem\t# short" %}
8661 
8662   ins_encode(aarch64_enc_stlrh(src, mem));
8663 
8664   ins_pipe(pipe_class_memory);
8665 %}
8666 
8667 // Store Integer
8668 
8669 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8670 %{
8671   match(Set mem(StoreI mem src));
8672 
8673   ins_cost(VOLATILE_REF_COST);
8674   format %{ "stlrw  $src, $mem\t# int" %}
8675 
8676   ins_encode(aarch64_enc_stlrw(src, mem));
8677 
8678   ins_pipe(pipe_class_memory);
8679 %}
8680 
8681 // Store Long (64 bit signed)
8682 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8683 %{
8684   match(Set mem (StoreL mem src));
8685 
8686   ins_cost(VOLATILE_REF_COST);
8687   format %{ "stlr  $src, $mem\t# int" %}
8688 
8689   ins_encode(aarch64_enc_stlr(src, mem));
8690 
8691   ins_pipe(pipe_class_memory);
8692 %}
8693 
8694 // Store Pointer
8695 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8696 %{
8697   match(Set mem (StoreP mem src));
8698 
8699   ins_cost(VOLATILE_REF_COST);
8700   format %{ "stlr  $src, $mem\t# ptr" %}
8701 
8702   ins_encode(aarch64_enc_stlr(src, mem));
8703 
8704   ins_pipe(pipe_class_memory);
8705 %}
8706 
8707 // Store Compressed Pointer
8708 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8709 %{
8710   match(Set mem (StoreN mem src));
8711 
8712   ins_cost(VOLATILE_REF_COST);
8713   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8714 
8715   ins_encode(aarch64_enc_stlrw(src, mem));
8716 
8717   ins_pipe(pipe_class_memory);
8718 %}
8719 
8720 // Store Float
8721 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8722 %{
8723   match(Set mem (StoreF mem src));
8724 
8725   ins_cost(VOLATILE_REF_COST);
8726   format %{ "stlrs  $src, $mem\t# float" %}
8727 
8728   ins_encode( aarch64_enc_fstlrs(src, mem) );
8729 
8730   ins_pipe(pipe_class_memory);
8731 %}
8732 
8733 // TODO
8734 // implement storeImmF0 and storeFImmPacked
8735 
8736 // Store Double
8737 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8738 %{
8739   match(Set mem (StoreD mem src));
8740 
8741   ins_cost(VOLATILE_REF_COST);
8742   format %{ "stlrd  $src, $mem\t# double" %}
8743 
8744   ins_encode( aarch64_enc_fstlrd(src, mem) );
8745 
8746   ins_pipe(pipe_class_memory);
8747 %}
8748 
8749 //  ---------------- end of volatile loads and stores ----------------
8750 
8751 // ============================================================================
8752 // BSWAP Instructions
8753 
8754 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8755   match(Set dst (ReverseBytesI src));
8756 
8757   ins_cost(INSN_COST);
8758   format %{ "revw  $dst, $src" %}
8759 
8760   ins_encode %{
8761     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8762   %}
8763 
8764   ins_pipe(ialu_reg);
8765 %}
8766 
8767 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8768   match(Set dst (ReverseBytesL src));
8769 
8770   ins_cost(INSN_COST);
8771   format %{ "rev  $dst, $src" %}
8772 
8773   ins_encode %{
8774     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8775   %}
8776 
8777   ins_pipe(ialu_reg);
8778 %}
8779 
8780 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8781   match(Set dst (ReverseBytesUS src));
8782 
8783   ins_cost(INSN_COST);
8784   format %{ "rev16w  $dst, $src" %}
8785 
8786   ins_encode %{
8787     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8788   %}
8789 
8790   ins_pipe(ialu_reg);
8791 %}
8792 
8793 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8794   match(Set dst (ReverseBytesS src));
8795 
8796   ins_cost(INSN_COST);
8797   format %{ "rev16w  $dst, $src\n\t"
8798             "sbfmw $dst, $dst, #0, #15" %}
8799 
8800   ins_encode %{
8801     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8802     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8803   %}
8804 
8805   ins_pipe(ialu_reg);
8806 %}
8807 
8808 // ============================================================================
8809 // Zero Count Instructions
8810 
8811 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8812   match(Set dst (CountLeadingZerosI src));
8813 
8814   ins_cost(INSN_COST);
8815   format %{ "clzw  $dst, $src" %}
8816   ins_encode %{
8817     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8818   %}
8819 
8820   ins_pipe(ialu_reg);
8821 %}
8822 
8823 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8824   match(Set dst (CountLeadingZerosL src));
8825 
8826   ins_cost(INSN_COST);
8827   format %{ "clz   $dst, $src" %}
8828   ins_encode %{
8829     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8830   %}
8831 
8832   ins_pipe(ialu_reg);
8833 %}
8834 
8835 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8836   match(Set dst (CountTrailingZerosI src));
8837 
8838   ins_cost(INSN_COST * 2);
8839   format %{ "rbitw  $dst, $src\n\t"
8840             "clzw   $dst, $dst" %}
8841   ins_encode %{
8842     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8843     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8844   %}
8845 
8846   ins_pipe(ialu_reg);
8847 %}
8848 
8849 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8850   match(Set dst (CountTrailingZerosL src));
8851 
8852   ins_cost(INSN_COST * 2);
8853   format %{ "rbit   $dst, $src\n\t"
8854             "clz    $dst, $dst" %}
8855   ins_encode %{
8856     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8857     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8858   %}
8859 
8860   ins_pipe(ialu_reg);
8861 %}
8862 
8863 //---------- Population Count Instructions -------------------------------------
8864 //
8865 
8866 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8867   predicate(UsePopCountInstruction);
8868   match(Set dst (PopCountI src));
8869   effect(TEMP tmp);
8870   ins_cost(INSN_COST * 13);
8871 
8872   format %{ "movw   $src, $src\n\t"
8873             "mov    $tmp, $src\t# vector (1D)\n\t"
8874             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8875             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8876             "mov    $dst, $tmp\t# vector (1D)" %}
8877   ins_encode %{
8878     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8879     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8880     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8881     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8882     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8883   %}
8884 
8885   ins_pipe(pipe_class_default);
8886 %}
8887 
8888 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8889   predicate(UsePopCountInstruction);
8890   match(Set dst (PopCountI (LoadI mem)));
8891   effect(TEMP tmp);
8892   ins_cost(INSN_COST * 13);
8893 
8894   format %{ "ldrs   $tmp, $mem\n\t"
8895             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8896             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8897             "mov    $dst, $tmp\t# vector (1D)" %}
8898   ins_encode %{
8899     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8900     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8902     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8903     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8904     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8905   %}
8906 
8907   ins_pipe(pipe_class_default);
8908 %}
8909 
8910 // Note: Long.bitCount(long) returns an int.
8911 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8912   predicate(UsePopCountInstruction);
8913   match(Set dst (PopCountL src));
8914   effect(TEMP tmp);
8915   ins_cost(INSN_COST * 13);
8916 
8917   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8918             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8919             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8920             "mov    $dst, $tmp\t# vector (1D)" %}
8921   ins_encode %{
8922     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8923     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8924     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8925     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8926   %}
8927 
8928   ins_pipe(pipe_class_default);
8929 %}
8930 
8931 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8932   predicate(UsePopCountInstruction);
8933   match(Set dst (PopCountL (LoadL mem)));
8934   effect(TEMP tmp);
8935   ins_cost(INSN_COST * 13);
8936 
8937   format %{ "ldrd   $tmp, $mem\n\t"
8938             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8939             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8940             "mov    $dst, $tmp\t# vector (1D)" %}
8941   ins_encode %{
8942     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8943     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8944                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8945     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8946     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8947     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8948   %}
8949 
8950   ins_pipe(pipe_class_default);
8951 %}
8952 
8953 // ============================================================================
8954 // MemBar Instruction
8955 
8956 instruct load_fence() %{
8957   match(LoadFence);
8958   ins_cost(VOLATILE_REF_COST);
8959 
8960   format %{ "load_fence" %}
8961 
8962   ins_encode %{
8963     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8964   %}
8965   ins_pipe(pipe_serial);
8966 %}
8967 
8968 instruct unnecessary_membar_acquire() %{
8969   predicate(unnecessary_acquire(n));
8970   match(MemBarAcquire);
8971   ins_cost(0);
8972 
8973   format %{ "membar_acquire (elided)" %}
8974 
8975   ins_encode %{
8976     __ block_comment("membar_acquire (elided)");
8977   %}
8978 
8979   ins_pipe(pipe_class_empty);
8980 %}
8981 
8982 instruct membar_acquire() %{
8983   match(MemBarAcquire);
8984   ins_cost(VOLATILE_REF_COST);
8985 
8986   format %{ "membar_acquire" %}
8987 
8988   ins_encode %{
8989     __ block_comment("membar_acquire");
8990     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8991   %}
8992 
8993   ins_pipe(pipe_serial);
8994 %}
8995 
8996 
8997 instruct membar_acquire_lock() %{
8998   match(MemBarAcquireLock);
8999   ins_cost(VOLATILE_REF_COST);
9000 
9001   format %{ "membar_acquire_lock (elided)" %}
9002 
9003   ins_encode %{
9004     __ block_comment("membar_acquire_lock (elided)");
9005   %}
9006 
9007   ins_pipe(pipe_serial);
9008 %}
9009 
9010 instruct store_fence() %{
9011   match(StoreFence);
9012   ins_cost(VOLATILE_REF_COST);
9013 
9014   format %{ "store_fence" %}
9015 
9016   ins_encode %{
9017     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9018   %}
9019   ins_pipe(pipe_serial);
9020 %}
9021 
9022 instruct unnecessary_membar_release() %{
9023   predicate(unnecessary_release(n));
9024   match(MemBarRelease);
9025   ins_cost(0);
9026 
9027   format %{ "membar_release (elided)" %}
9028 
9029   ins_encode %{
9030     __ block_comment("membar_release (elided)");
9031   %}
9032   ins_pipe(pipe_serial);
9033 %}
9034 
9035 instruct membar_release() %{
9036   match(MemBarRelease);
9037   ins_cost(VOLATILE_REF_COST);
9038 
9039   format %{ "membar_release" %}
9040 
9041   ins_encode %{
9042     __ block_comment("membar_release");
9043     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9044   %}
9045   ins_pipe(pipe_serial);
9046 %}
9047 
9048 instruct membar_storestore() %{
9049   match(MemBarStoreStore);
9050   ins_cost(VOLATILE_REF_COST);
9051 
9052   format %{ "MEMBAR-store-store" %}
9053 
9054   ins_encode %{
9055     __ membar(Assembler::StoreStore);
9056   %}
9057   ins_pipe(pipe_serial);
9058 %}
9059 
9060 instruct membar_release_lock() %{
9061   match(MemBarReleaseLock);
9062   ins_cost(VOLATILE_REF_COST);
9063 
9064   format %{ "membar_release_lock (elided)" %}
9065 
9066   ins_encode %{
9067     __ block_comment("membar_release_lock (elided)");
9068   %}
9069 
9070   ins_pipe(pipe_serial);
9071 %}
9072 
9073 instruct unnecessary_membar_volatile() %{
9074   predicate(unnecessary_volatile(n));
9075   match(MemBarVolatile);
9076   ins_cost(0);
9077 
9078   format %{ "membar_volatile (elided)" %}
9079 
9080   ins_encode %{
9081     __ block_comment("membar_volatile (elided)");
9082   %}
9083 
9084   ins_pipe(pipe_serial);
9085 %}
9086 
9087 instruct membar_volatile() %{
9088   match(MemBarVolatile);
9089   ins_cost(VOLATILE_REF_COST*100);
9090 
9091   format %{ "membar_volatile" %}
9092 
9093   ins_encode %{
9094     __ block_comment("membar_volatile");
9095     __ membar(Assembler::StoreLoad);
9096   %}
9097 
9098   ins_pipe(pipe_serial);
9099 %}
9100 
9101 // ============================================================================
9102 // Cast/Convert Instructions
9103 
9104 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9105   match(Set dst (CastX2P src));
9106 
9107   ins_cost(INSN_COST);
9108   format %{ "mov $dst, $src\t# long -> ptr" %}
9109 
9110   ins_encode %{
9111     if ($dst$$reg != $src$$reg) {
9112       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9113     }
9114   %}
9115 
9116   ins_pipe(ialu_reg);
9117 %}
9118 
9119 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9120   match(Set dst (CastP2X src));
9121 
9122   ins_cost(INSN_COST);
9123   format %{ "mov $dst, $src\t# ptr -> long" %}
9124 
9125   ins_encode %{
9126     if ($dst$$reg != $src$$reg) {
9127       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9128     }
9129   %}
9130 
9131   ins_pipe(ialu_reg);
9132 %}
9133 
9134 // Convert oop into int for vectors alignment masking
9135 instruct convP2I(iRegINoSp dst, iRegP src) %{
9136   match(Set dst (ConvL2I (CastP2X src)));
9137 
9138   ins_cost(INSN_COST);
9139   format %{ "movw $dst, $src\t# ptr -> int" %}
9140   ins_encode %{
9141     __ movw($dst$$Register, $src$$Register);
9142   %}
9143 
9144   ins_pipe(ialu_reg);
9145 %}
9146 
9147 // Convert compressed oop into int for vectors alignment masking
9148 // in case of 32bit oops (heap < 4Gb).
9149 instruct convN2I(iRegINoSp dst, iRegN src)
9150 %{
9151   predicate(Universe::narrow_oop_shift() == 0);
9152   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9153 
9154   ins_cost(INSN_COST);
9155   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9156   ins_encode %{
9157     __ movw($dst$$Register, $src$$Register);
9158   %}
9159 
9160   ins_pipe(ialu_reg);
9161 %}
9162 
9163 
9164 // Convert oop pointer into compressed form
9165 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9166   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9167   match(Set dst (EncodeP src));
9168   effect(KILL cr);
9169   ins_cost(INSN_COST * 3);
9170   format %{ "encode_heap_oop $dst, $src" %}
9171   ins_encode %{
9172     Register s = $src$$Register;
9173     Register d = $dst$$Register;
9174     __ encode_heap_oop(d, s);
9175   %}
9176   ins_pipe(ialu_reg);
9177 %}
9178 
9179 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9180   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9181   match(Set dst (EncodeP src));
9182   ins_cost(INSN_COST * 3);
9183   format %{ "encode_heap_oop_not_null $dst, $src" %}
9184   ins_encode %{
9185     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9186   %}
9187   ins_pipe(ialu_reg);
9188 %}
9189 
9190 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9191   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9192             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9193   match(Set dst (DecodeN src));
9194   ins_cost(INSN_COST * 3);
9195   format %{ "decode_heap_oop $dst, $src" %}
9196   ins_encode %{
9197     Register s = $src$$Register;
9198     Register d = $dst$$Register;
9199     __ decode_heap_oop(d, s);
9200   %}
9201   ins_pipe(ialu_reg);
9202 %}
9203 
9204 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9205   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9206             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9207   match(Set dst (DecodeN src));
9208   ins_cost(INSN_COST * 3);
9209   format %{ "decode_heap_oop_not_null $dst, $src" %}
9210   ins_encode %{
9211     Register s = $src$$Register;
9212     Register d = $dst$$Register;
9213     __ decode_heap_oop_not_null(d, s);
9214   %}
9215   ins_pipe(ialu_reg);
9216 %}
9217 
9218 // n.b. AArch64 implementations of encode_klass_not_null and
9219 // decode_klass_not_null do not modify the flags register so, unlike
9220 // Intel, we don't kill CR as a side effect here
9221 
9222 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9223   match(Set dst (EncodePKlass src));
9224 
9225   ins_cost(INSN_COST * 3);
9226   format %{ "encode_klass_not_null $dst,$src" %}
9227 
9228   ins_encode %{
9229     Register src_reg = as_Register($src$$reg);
9230     Register dst_reg = as_Register($dst$$reg);
9231     __ encode_klass_not_null(dst_reg, src_reg);
9232   %}
9233 
9234    ins_pipe(ialu_reg);
9235 %}
9236 
9237 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9238   match(Set dst (DecodeNKlass src));
9239 
9240   ins_cost(INSN_COST * 3);
9241   format %{ "decode_klass_not_null $dst,$src" %}
9242 
9243   ins_encode %{
9244     Register src_reg = as_Register($src$$reg);
9245     Register dst_reg = as_Register($dst$$reg);
9246     if (dst_reg != src_reg) {
9247       __ decode_klass_not_null(dst_reg, src_reg);
9248     } else {
9249       __ decode_klass_not_null(dst_reg);
9250     }
9251   %}
9252 
9253    ins_pipe(ialu_reg);
9254 %}
9255 
9256 instruct checkCastPP(iRegPNoSp dst)
9257 %{
9258   match(Set dst (CheckCastPP dst));
9259 
9260   size(0);
9261   format %{ "# checkcastPP of $dst" %}
9262   ins_encode(/* empty encoding */);
9263   ins_pipe(pipe_class_empty);
9264 %}
9265 
9266 instruct castPP(iRegPNoSp dst)
9267 %{
9268   match(Set dst (CastPP dst));
9269 
9270   size(0);
9271   format %{ "# castPP of $dst" %}
9272   ins_encode(/* empty encoding */);
9273   ins_pipe(pipe_class_empty);
9274 %}
9275 
9276 instruct castII(iRegI dst)
9277 %{
9278   match(Set dst (CastII dst));
9279 
9280   size(0);
9281   format %{ "# castII of $dst" %}
9282   ins_encode(/* empty encoding */);
9283   ins_cost(0);
9284   ins_pipe(pipe_class_empty);
9285 %}
9286 
9287 // ============================================================================
9288 // Atomic operation instructions
9289 //
9290 // Intel and SPARC both implement Ideal Node LoadPLocked and
9291 // Store{PIL}Conditional instructions using a normal load for the
9292 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9293 //
9294 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9295 // pair to lock object allocations from Eden space when not using
9296 // TLABs.
9297 //
9298 // There does not appear to be a Load{IL}Locked Ideal Node and the
9299 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9300 // and to use StoreIConditional only for 32-bit and StoreLConditional
9301 // only for 64-bit.
9302 //
9303 // We implement LoadPLocked and StorePLocked instructions using,
9304 // respectively the AArch64 hw load-exclusive and store-conditional
9305 // instructions. Whereas we must implement each of
9306 // Store{IL}Conditional using a CAS which employs a pair of
9307 // instructions comprising a load-exclusive followed by a
9308 // store-conditional.
9309 
9310 
9311 // Locked-load (linked load) of the current heap-top
9312 // used when updating the eden heap top
9313 // implemented using ldaxr on AArch64
9314 
9315 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9316 %{
9317   match(Set dst (LoadPLocked mem));
9318 
9319   ins_cost(VOLATILE_REF_COST);
9320 
9321   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9322 
9323   ins_encode(aarch64_enc_ldaxr(dst, mem));
9324 
9325   ins_pipe(pipe_serial);
9326 %}
9327 
9328 // Conditional-store of the updated heap-top.
9329 // Used during allocation of the shared heap.
9330 // Sets flag (EQ) on success.
9331 // implemented using stlxr on AArch64.
9332 
9333 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9334 %{
9335   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9336 
9337   ins_cost(VOLATILE_REF_COST);
9338 
9339  // TODO
9340  // do we need to do a store-conditional release or can we just use a
9341  // plain store-conditional?
9342 
9343   format %{
9344     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9345     "cmpw rscratch1, zr\t# EQ on successful write"
9346   %}
9347 
9348   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9349 
9350   ins_pipe(pipe_serial);
9351 %}
9352 
9353 
9354 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9355 // when attempting to rebias a lock towards the current thread.  We
9356 // must use the acquire form of cmpxchg in order to guarantee acquire
9357 // semantics in this case.
9358 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9359 %{
9360   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9361 
9362   ins_cost(VOLATILE_REF_COST);
9363 
9364   format %{
9365     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9366     "cmpw rscratch1, zr\t# EQ on successful write"
9367   %}
9368 
9369   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9370 
9371   ins_pipe(pipe_slow);
9372 %}
9373 
9374 // storeIConditional also has acquire semantics, for no better reason
9375 // than matching storeLConditional.  At the time of writing this
9376 // comment storeIConditional was not used anywhere by AArch64.
9377 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9378 %{
9379   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9380 
9381   ins_cost(VOLATILE_REF_COST);
9382 
9383   format %{
9384     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9385     "cmpw rscratch1, zr\t# EQ on successful write"
9386   %}
9387 
9388   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9389 
9390   ins_pipe(pipe_slow);
9391 %}
9392 
9393 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9394 // can't match them
9395 
9396 // standard CompareAndSwapX when we are using barriers
9397 // these have higher priority than the rules selected by a predicate
9398 
9399 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9400 
9401   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9402   ins_cost(2 * VOLATILE_REF_COST);
9403 
9404   effect(KILL cr);
9405 
9406  format %{
9407     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9408     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9409  %}
9410 
9411  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9412             aarch64_enc_cset_eq(res));
9413 
9414   ins_pipe(pipe_slow);
9415 %}
9416 
9417 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9418 
9419   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9420   ins_cost(2 * VOLATILE_REF_COST);
9421 
9422   effect(KILL cr);
9423 
9424  format %{
9425     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9426     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9427  %}
9428 
9429  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9430             aarch64_enc_cset_eq(res));
9431 
9432   ins_pipe(pipe_slow);
9433 %}
9434 
9435 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9436 
9437   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9438   ins_cost(2 * VOLATILE_REF_COST);
9439 
9440   effect(KILL cr);
9441 
9442  format %{
9443     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9444     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9445  %}
9446 
9447  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9448             aarch64_enc_cset_eq(res));
9449 
9450   ins_pipe(pipe_slow);
9451 %}
9452 
9453 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9454 
9455   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9456   ins_cost(2 * VOLATILE_REF_COST);
9457 
9458   effect(KILL cr);
9459 
9460  format %{
9461     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9462     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9463  %}
9464 
9465  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9466             aarch64_enc_cset_eq(res));
9467 
9468   ins_pipe(pipe_slow);
9469 %}
9470 
9471 // alternative CompareAndSwapX when we are eliding barriers
9472 
9473 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9474 
9475   predicate(needs_acquiring_load_exclusive(n));
9476   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9477   ins_cost(VOLATILE_REF_COST);
9478 
9479   effect(KILL cr);
9480 
9481  format %{
9482     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9483     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9484  %}
9485 
9486  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9487             aarch64_enc_cset_eq(res));
9488 
9489   ins_pipe(pipe_slow);
9490 %}
9491 
9492 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9493 
9494   predicate(needs_acquiring_load_exclusive(n));
9495   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9496   ins_cost(VOLATILE_REF_COST);
9497 
9498   effect(KILL cr);
9499 
9500  format %{
9501     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9502     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9503  %}
9504 
9505  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9506             aarch64_enc_cset_eq(res));
9507 
9508   ins_pipe(pipe_slow);
9509 %}
9510 
9511 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9512 
9513   predicate(needs_acquiring_load_exclusive(n));
9514   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9515   ins_cost(VOLATILE_REF_COST);
9516 
9517   effect(KILL cr);
9518 
9519  format %{
9520     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9521     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9522  %}
9523 
9524  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9525             aarch64_enc_cset_eq(res));
9526 
9527   ins_pipe(pipe_slow);
9528 %}
9529 
9530 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9531 
9532   predicate(needs_acquiring_load_exclusive(n));
9533   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9534   ins_cost(VOLATILE_REF_COST);
9535 
9536   effect(KILL cr);
9537 
9538  format %{
9539     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9540     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9541  %}
9542 
9543  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9544             aarch64_enc_cset_eq(res));
9545 
9546   ins_pipe(pipe_slow);
9547 %}
9548 
9549 
9550 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9551   match(Set prev (GetAndSetI mem newv));
9552   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9553   ins_encode %{
9554     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9555   %}
9556   ins_pipe(pipe_serial);
9557 %}
9558 
9559 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9560   match(Set prev (GetAndSetL mem newv));
9561   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9562   ins_encode %{
9563     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9564   %}
9565   ins_pipe(pipe_serial);
9566 %}
9567 
9568 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9569   match(Set prev (GetAndSetN mem newv));
9570   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9571   ins_encode %{
9572     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9573   %}
9574   ins_pipe(pipe_serial);
9575 %}
9576 
9577 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9578   match(Set prev (GetAndSetP mem newv));
9579   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9580   ins_encode %{
9581     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9582   %}
9583   ins_pipe(pipe_serial);
9584 %}
9585 
9586 
9587 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9588   match(Set newval (GetAndAddL mem incr));
9589   ins_cost(INSN_COST * 10);
9590   format %{ "get_and_addL $newval, [$mem], $incr" %}
9591   ins_encode %{
9592     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9593   %}
9594   ins_pipe(pipe_serial);
9595 %}
9596 
9597 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9598   predicate(n->as_LoadStore()->result_not_used());
9599   match(Set dummy (GetAndAddL mem incr));
9600   ins_cost(INSN_COST * 9);
9601   format %{ "get_and_addL [$mem], $incr" %}
9602   ins_encode %{
9603     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9604   %}
9605   ins_pipe(pipe_serial);
9606 %}
9607 
9608 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9609   match(Set newval (GetAndAddL mem incr));
9610   ins_cost(INSN_COST * 10);
9611   format %{ "get_and_addL $newval, [$mem], $incr" %}
9612   ins_encode %{
9613     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9614   %}
9615   ins_pipe(pipe_serial);
9616 %}
9617 
9618 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9619   predicate(n->as_LoadStore()->result_not_used());
9620   match(Set dummy (GetAndAddL mem incr));
9621   ins_cost(INSN_COST * 9);
9622   format %{ "get_and_addL [$mem], $incr" %}
9623   ins_encode %{
9624     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9625   %}
9626   ins_pipe(pipe_serial);
9627 %}
9628 
9629 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9630   match(Set newval (GetAndAddI mem incr));
9631   ins_cost(INSN_COST * 10);
9632   format %{ "get_and_addI $newval, [$mem], $incr" %}
9633   ins_encode %{
9634     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9635   %}
9636   ins_pipe(pipe_serial);
9637 %}
9638 
9639 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9640   predicate(n->as_LoadStore()->result_not_used());
9641   match(Set dummy (GetAndAddI mem incr));
9642   ins_cost(INSN_COST * 9);
9643   format %{ "get_and_addI [$mem], $incr" %}
9644   ins_encode %{
9645     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9646   %}
9647   ins_pipe(pipe_serial);
9648 %}
9649 
9650 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9651   match(Set newval (GetAndAddI mem incr));
9652   ins_cost(INSN_COST * 10);
9653   format %{ "get_and_addI $newval, [$mem], $incr" %}
9654   ins_encode %{
9655     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9656   %}
9657   ins_pipe(pipe_serial);
9658 %}
9659 
9660 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9661   predicate(n->as_LoadStore()->result_not_used());
9662   match(Set dummy (GetAndAddI mem incr));
9663   ins_cost(INSN_COST * 9);
9664   format %{ "get_and_addI [$mem], $incr" %}
9665   ins_encode %{
9666     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9667   %}
9668   ins_pipe(pipe_serial);
9669 %}
9670 
9671 // Manifest a CmpL result in an integer register.
9672 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9673 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9674 %{
9675   match(Set dst (CmpL3 src1 src2));
9676   effect(KILL flags);
9677 
9678   ins_cost(INSN_COST * 6);
9679   format %{
9680       "cmp $src1, $src2"
9681       "csetw $dst, ne"
9682       "cnegw $dst, lt"
9683   %}
9684   // format %{ "CmpL3 $dst, $src1, $src2" %}
9685   ins_encode %{
9686     __ cmp($src1$$Register, $src2$$Register);
9687     __ csetw($dst$$Register, Assembler::NE);
9688     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9689   %}
9690 
9691   ins_pipe(pipe_class_default);
9692 %}
9693 
9694 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9695 %{
9696   match(Set dst (CmpL3 src1 src2));
9697   effect(KILL flags);
9698 
9699   ins_cost(INSN_COST * 6);
9700   format %{
9701       "cmp $src1, $src2"
9702       "csetw $dst, ne"
9703       "cnegw $dst, lt"
9704   %}
9705   ins_encode %{
9706     int32_t con = (int32_t)$src2$$constant;
9707      if (con < 0) {
9708       __ adds(zr, $src1$$Register, -con);
9709     } else {
9710       __ subs(zr, $src1$$Register, con);
9711     }
9712     __ csetw($dst$$Register, Assembler::NE);
9713     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9714   %}
9715 
9716   ins_pipe(pipe_class_default);
9717 %}
9718 
9719 // ============================================================================
9720 // Conditional Move Instructions
9721 
9722 // n.b. we have identical rules for both a signed compare op (cmpOp)
9723 // and an unsigned compare op (cmpOpU). it would be nice if we could
9724 // define an op class which merged both inputs and use it to type the
9725 // argument to a single rule. unfortunatelyt his fails because the
9726 // opclass does not live up to the COND_INTER interface of its
9727 // component operands. When the generic code tries to negate the
9728 // operand it ends up running the generci Machoper::negate method
9729 // which throws a ShouldNotHappen. So, we have to provide two flavours
9730 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9731 
9732 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9733   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9734 
9735   ins_cost(INSN_COST * 2);
9736   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9737 
9738   ins_encode %{
9739     __ cselw(as_Register($dst$$reg),
9740              as_Register($src2$$reg),
9741              as_Register($src1$$reg),
9742              (Assembler::Condition)$cmp$$cmpcode);
9743   %}
9744 
9745   ins_pipe(icond_reg_reg);
9746 %}
9747 
9748 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9749   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9750 
9751   ins_cost(INSN_COST * 2);
9752   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9753 
9754   ins_encode %{
9755     __ cselw(as_Register($dst$$reg),
9756              as_Register($src2$$reg),
9757              as_Register($src1$$reg),
9758              (Assembler::Condition)$cmp$$cmpcode);
9759   %}
9760 
9761   ins_pipe(icond_reg_reg);
9762 %}
9763 
9764 // special cases where one arg is zero
9765 
9766 // n.b. this is selected in preference to the rule above because it
9767 // avoids loading constant 0 into a source register
9768 
9769 // TODO
9770 // we ought only to be able to cull one of these variants as the ideal
9771 // transforms ought always to order the zero consistently (to left/right?)
9772 
9773 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9774   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9775 
9776   ins_cost(INSN_COST * 2);
9777   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9778 
9779   ins_encode %{
9780     __ cselw(as_Register($dst$$reg),
9781              as_Register($src$$reg),
9782              zr,
9783              (Assembler::Condition)$cmp$$cmpcode);
9784   %}
9785 
9786   ins_pipe(icond_reg);
9787 %}
9788 
9789 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9790   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9791 
9792   ins_cost(INSN_COST * 2);
9793   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9794 
9795   ins_encode %{
9796     __ cselw(as_Register($dst$$reg),
9797              as_Register($src$$reg),
9798              zr,
9799              (Assembler::Condition)$cmp$$cmpcode);
9800   %}
9801 
9802   ins_pipe(icond_reg);
9803 %}
9804 
9805 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9806   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9807 
9808   ins_cost(INSN_COST * 2);
9809   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9810 
9811   ins_encode %{
9812     __ cselw(as_Register($dst$$reg),
9813              zr,
9814              as_Register($src$$reg),
9815              (Assembler::Condition)$cmp$$cmpcode);
9816   %}
9817 
9818   ins_pipe(icond_reg);
9819 %}
9820 
9821 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9822   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9823 
9824   ins_cost(INSN_COST * 2);
9825   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9826 
9827   ins_encode %{
9828     __ cselw(as_Register($dst$$reg),
9829              zr,
9830              as_Register($src$$reg),
9831              (Assembler::Condition)$cmp$$cmpcode);
9832   %}
9833 
9834   ins_pipe(icond_reg);
9835 %}
9836 
9837 // special case for creating a boolean 0 or 1
9838 
9839 // n.b. this is selected in preference to the rule above because it
9840 // avoids loading constants 0 and 1 into a source register
9841 
9842 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9843   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9844 
9845   ins_cost(INSN_COST * 2);
9846   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9847 
9848   ins_encode %{
9849     // equivalently
9850     // cset(as_Register($dst$$reg),
9851     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9852     __ csincw(as_Register($dst$$reg),
9853              zr,
9854              zr,
9855              (Assembler::Condition)$cmp$$cmpcode);
9856   %}
9857 
9858   ins_pipe(icond_none);
9859 %}
9860 
9861 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9862   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9863 
9864   ins_cost(INSN_COST * 2);
9865   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9866 
9867   ins_encode %{
9868     // equivalently
9869     // cset(as_Register($dst$$reg),
9870     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9871     __ csincw(as_Register($dst$$reg),
9872              zr,
9873              zr,
9874              (Assembler::Condition)$cmp$$cmpcode);
9875   %}
9876 
9877   ins_pipe(icond_none);
9878 %}
9879 
9880 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9881   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9882 
9883   ins_cost(INSN_COST * 2);
9884   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9885 
9886   ins_encode %{
9887     __ csel(as_Register($dst$$reg),
9888             as_Register($src2$$reg),
9889             as_Register($src1$$reg),
9890             (Assembler::Condition)$cmp$$cmpcode);
9891   %}
9892 
9893   ins_pipe(icond_reg_reg);
9894 %}
9895 
9896 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9897   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9898 
9899   ins_cost(INSN_COST * 2);
9900   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9901 
9902   ins_encode %{
9903     __ csel(as_Register($dst$$reg),
9904             as_Register($src2$$reg),
9905             as_Register($src1$$reg),
9906             (Assembler::Condition)$cmp$$cmpcode);
9907   %}
9908 
9909   ins_pipe(icond_reg_reg);
9910 %}
9911 
9912 // special cases where one arg is zero
9913 
9914 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9915   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9916 
9917   ins_cost(INSN_COST * 2);
9918   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9919 
9920   ins_encode %{
9921     __ csel(as_Register($dst$$reg),
9922             zr,
9923             as_Register($src$$reg),
9924             (Assembler::Condition)$cmp$$cmpcode);
9925   %}
9926 
9927   ins_pipe(icond_reg);
9928 %}
9929 
9930 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9931   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9932 
9933   ins_cost(INSN_COST * 2);
9934   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9935 
9936   ins_encode %{
9937     __ csel(as_Register($dst$$reg),
9938             zr,
9939             as_Register($src$$reg),
9940             (Assembler::Condition)$cmp$$cmpcode);
9941   %}
9942 
9943   ins_pipe(icond_reg);
9944 %}
9945 
9946 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9947   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9948 
9949   ins_cost(INSN_COST * 2);
9950   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9951 
9952   ins_encode %{
9953     __ csel(as_Register($dst$$reg),
9954             as_Register($src$$reg),
9955             zr,
9956             (Assembler::Condition)$cmp$$cmpcode);
9957   %}
9958 
9959   ins_pipe(icond_reg);
9960 %}
9961 
9962 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9963   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9964 
9965   ins_cost(INSN_COST * 2);
9966   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9967 
9968   ins_encode %{
9969     __ csel(as_Register($dst$$reg),
9970             as_Register($src$$reg),
9971             zr,
9972             (Assembler::Condition)$cmp$$cmpcode);
9973   %}
9974 
9975   ins_pipe(icond_reg);
9976 %}
9977 
9978 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9979   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9980 
9981   ins_cost(INSN_COST * 2);
9982   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9983 
9984   ins_encode %{
9985     __ csel(as_Register($dst$$reg),
9986             as_Register($src2$$reg),
9987             as_Register($src1$$reg),
9988             (Assembler::Condition)$cmp$$cmpcode);
9989   %}
9990 
9991   ins_pipe(icond_reg_reg);
9992 %}
9993 
9994 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9995   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9996 
9997   ins_cost(INSN_COST * 2);
9998   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9999 
10000   ins_encode %{
10001     __ csel(as_Register($dst$$reg),
10002             as_Register($src2$$reg),
10003             as_Register($src1$$reg),
10004             (Assembler::Condition)$cmp$$cmpcode);
10005   %}
10006 
10007   ins_pipe(icond_reg_reg);
10008 %}
10009 
10010 // special cases where one arg is zero
10011 
10012 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10013   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10014 
10015   ins_cost(INSN_COST * 2);
10016   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10017 
10018   ins_encode %{
10019     __ csel(as_Register($dst$$reg),
10020             zr,
10021             as_Register($src$$reg),
10022             (Assembler::Condition)$cmp$$cmpcode);
10023   %}
10024 
10025   ins_pipe(icond_reg);
10026 %}
10027 
10028 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10029   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10030 
10031   ins_cost(INSN_COST * 2);
10032   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10033 
10034   ins_encode %{
10035     __ csel(as_Register($dst$$reg),
10036             zr,
10037             as_Register($src$$reg),
10038             (Assembler::Condition)$cmp$$cmpcode);
10039   %}
10040 
10041   ins_pipe(icond_reg);
10042 %}
10043 
10044 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10045   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10046 
10047   ins_cost(INSN_COST * 2);
10048   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10049 
10050   ins_encode %{
10051     __ csel(as_Register($dst$$reg),
10052             as_Register($src$$reg),
10053             zr,
10054             (Assembler::Condition)$cmp$$cmpcode);
10055   %}
10056 
10057   ins_pipe(icond_reg);
10058 %}
10059 
10060 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10061   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10062 
10063   ins_cost(INSN_COST * 2);
10064   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10065 
10066   ins_encode %{
10067     __ csel(as_Register($dst$$reg),
10068             as_Register($src$$reg),
10069             zr,
10070             (Assembler::Condition)$cmp$$cmpcode);
10071   %}
10072 
10073   ins_pipe(icond_reg);
10074 %}
10075 
10076 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10077   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10078 
10079   ins_cost(INSN_COST * 2);
10080   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10081 
10082   ins_encode %{
10083     __ cselw(as_Register($dst$$reg),
10084              as_Register($src2$$reg),
10085              as_Register($src1$$reg),
10086              (Assembler::Condition)$cmp$$cmpcode);
10087   %}
10088 
10089   ins_pipe(icond_reg_reg);
10090 %}
10091 
10092 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10093   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10094 
10095   ins_cost(INSN_COST * 2);
10096   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10097 
10098   ins_encode %{
10099     __ cselw(as_Register($dst$$reg),
10100              as_Register($src2$$reg),
10101              as_Register($src1$$reg),
10102              (Assembler::Condition)$cmp$$cmpcode);
10103   %}
10104 
10105   ins_pipe(icond_reg_reg);
10106 %}
10107 
10108 // special cases where one arg is zero
10109 
10110 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10111   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10112 
10113   ins_cost(INSN_COST * 2);
10114   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10115 
10116   ins_encode %{
10117     __ cselw(as_Register($dst$$reg),
10118              zr,
10119              as_Register($src$$reg),
10120              (Assembler::Condition)$cmp$$cmpcode);
10121   %}
10122 
10123   ins_pipe(icond_reg);
10124 %}
10125 
10126 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10127   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10128 
10129   ins_cost(INSN_COST * 2);
10130   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10131 
10132   ins_encode %{
10133     __ cselw(as_Register($dst$$reg),
10134              zr,
10135              as_Register($src$$reg),
10136              (Assembler::Condition)$cmp$$cmpcode);
10137   %}
10138 
10139   ins_pipe(icond_reg);
10140 %}
10141 
10142 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10143   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10144 
10145   ins_cost(INSN_COST * 2);
10146   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10147 
10148   ins_encode %{
10149     __ cselw(as_Register($dst$$reg),
10150              as_Register($src$$reg),
10151              zr,
10152              (Assembler::Condition)$cmp$$cmpcode);
10153   %}
10154 
10155   ins_pipe(icond_reg);
10156 %}
10157 
10158 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10159   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10160 
10161   ins_cost(INSN_COST * 2);
10162   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10163 
10164   ins_encode %{
10165     __ cselw(as_Register($dst$$reg),
10166              as_Register($src$$reg),
10167              zr,
10168              (Assembler::Condition)$cmp$$cmpcode);
10169   %}
10170 
10171   ins_pipe(icond_reg);
10172 %}
10173 
10174 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10175 %{
10176   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10177 
10178   ins_cost(INSN_COST * 3);
10179 
10180   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10181   ins_encode %{
10182     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10183     __ fcsels(as_FloatRegister($dst$$reg),
10184               as_FloatRegister($src2$$reg),
10185               as_FloatRegister($src1$$reg),
10186               cond);
10187   %}
10188 
10189   ins_pipe(fp_cond_reg_reg_s);
10190 %}
10191 
10192 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10193 %{
10194   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10195 
10196   ins_cost(INSN_COST * 3);
10197 
10198   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10199   ins_encode %{
10200     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10201     __ fcsels(as_FloatRegister($dst$$reg),
10202               as_FloatRegister($src2$$reg),
10203               as_FloatRegister($src1$$reg),
10204               cond);
10205   %}
10206 
10207   ins_pipe(fp_cond_reg_reg_s);
10208 %}
10209 
10210 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10211 %{
10212   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10213 
10214   ins_cost(INSN_COST * 3);
10215 
10216   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10217   ins_encode %{
10218     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10219     __ fcseld(as_FloatRegister($dst$$reg),
10220               as_FloatRegister($src2$$reg),
10221               as_FloatRegister($src1$$reg),
10222               cond);
10223   %}
10224 
10225   ins_pipe(fp_cond_reg_reg_d);
10226 %}
10227 
10228 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10229 %{
10230   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10231 
10232   ins_cost(INSN_COST * 3);
10233 
10234   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10235   ins_encode %{
10236     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10237     __ fcseld(as_FloatRegister($dst$$reg),
10238               as_FloatRegister($src2$$reg),
10239               as_FloatRegister($src1$$reg),
10240               cond);
10241   %}
10242 
10243   ins_pipe(fp_cond_reg_reg_d);
10244 %}
10245 
10246 // ============================================================================
10247 // Arithmetic Instructions
10248 //
10249 
10250 // Integer Addition
10251 
10252 // TODO
10253 // these currently employ operations which do not set CR and hence are
10254 // not flagged as killing CR but we would like to isolate the cases
10255 // where we want to set flags from those where we don't. need to work
10256 // out how to do that.
10257 
10258 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10259   match(Set dst (AddI src1 src2));
10260 
10261   ins_cost(INSN_COST);
10262   format %{ "addw  $dst, $src1, $src2" %}
10263 
10264   ins_encode %{
10265     __ addw(as_Register($dst$$reg),
10266             as_Register($src1$$reg),
10267             as_Register($src2$$reg));
10268   %}
10269 
10270   ins_pipe(ialu_reg_reg);
10271 %}
10272 
10273 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10274   match(Set dst (AddI src1 src2));
10275 
10276   ins_cost(INSN_COST);
10277   format %{ "addw $dst, $src1, $src2" %}
10278 
10279   // use opcode to indicate that this is an add not a sub
10280   opcode(0x0);
10281 
10282   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10283 
10284   ins_pipe(ialu_reg_imm);
10285 %}
10286 
10287 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10288   match(Set dst (AddI (ConvL2I src1) src2));
10289 
10290   ins_cost(INSN_COST);
10291   format %{ "addw $dst, $src1, $src2" %}
10292 
10293   // use opcode to indicate that this is an add not a sub
10294   opcode(0x0);
10295 
10296   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10297 
10298   ins_pipe(ialu_reg_imm);
10299 %}
10300 
10301 // Pointer Addition
10302 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10303   match(Set dst (AddP src1 src2));
10304 
10305   ins_cost(INSN_COST);
10306   format %{ "add $dst, $src1, $src2\t# ptr" %}
10307 
10308   ins_encode %{
10309     __ add(as_Register($dst$$reg),
10310            as_Register($src1$$reg),
10311            as_Register($src2$$reg));
10312   %}
10313 
10314   ins_pipe(ialu_reg_reg);
10315 %}
10316 
10317 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10318   match(Set dst (AddP src1 (ConvI2L src2)));
10319 
10320   ins_cost(1.9 * INSN_COST);
10321   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10322 
10323   ins_encode %{
10324     __ add(as_Register($dst$$reg),
10325            as_Register($src1$$reg),
10326            as_Register($src2$$reg), ext::sxtw);
10327   %}
10328 
10329   ins_pipe(ialu_reg_reg);
10330 %}
10331 
10332 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10333   match(Set dst (AddP src1 (LShiftL src2 scale)));
10334 
10335   ins_cost(1.9 * INSN_COST);
10336   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10337 
10338   ins_encode %{
10339     __ lea(as_Register($dst$$reg),
10340            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10341                    Address::lsl($scale$$constant)));
10342   %}
10343 
10344   ins_pipe(ialu_reg_reg_shift);
10345 %}
10346 
10347 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10348   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10349 
10350   ins_cost(1.9 * INSN_COST);
10351   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10352 
10353   ins_encode %{
10354     __ lea(as_Register($dst$$reg),
10355            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10356                    Address::sxtw($scale$$constant)));
10357   %}
10358 
10359   ins_pipe(ialu_reg_reg_shift);
10360 %}
10361 
10362 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10363   match(Set dst (LShiftL (ConvI2L src) scale));
10364 
10365   ins_cost(INSN_COST);
10366   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10367 
10368   ins_encode %{
10369     __ sbfiz(as_Register($dst$$reg),
10370           as_Register($src$$reg),
10371           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10372   %}
10373 
10374   ins_pipe(ialu_reg_shift);
10375 %}
10376 
10377 // Pointer Immediate Addition
10378 // n.b. this needs to be more expensive than using an indirect memory
10379 // operand
10380 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10381   match(Set dst (AddP src1 src2));
10382 
10383   ins_cost(INSN_COST);
10384   format %{ "add $dst, $src1, $src2\t# ptr" %}
10385 
10386   // use opcode to indicate that this is an add not a sub
10387   opcode(0x0);
10388 
10389   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10390 
10391   ins_pipe(ialu_reg_imm);
10392 %}
10393 
10394 // Long Addition
10395 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10396 
10397   match(Set dst (AddL src1 src2));
10398 
10399   ins_cost(INSN_COST);
10400   format %{ "add  $dst, $src1, $src2" %}
10401 
10402   ins_encode %{
10403     __ add(as_Register($dst$$reg),
10404            as_Register($src1$$reg),
10405            as_Register($src2$$reg));
10406   %}
10407 
10408   ins_pipe(ialu_reg_reg);
10409 %}
10410 
10411 // No constant pool entries requiredLong Immediate Addition.
10412 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10413   match(Set dst (AddL src1 src2));
10414 
10415   ins_cost(INSN_COST);
10416   format %{ "add $dst, $src1, $src2" %}
10417 
10418   // use opcode to indicate that this is an add not a sub
10419   opcode(0x0);
10420 
10421   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10422 
10423   ins_pipe(ialu_reg_imm);
10424 %}
10425 
10426 // Integer Subtraction
10427 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10428   match(Set dst (SubI src1 src2));
10429 
10430   ins_cost(INSN_COST);
10431   format %{ "subw  $dst, $src1, $src2" %}
10432 
10433   ins_encode %{
10434     __ subw(as_Register($dst$$reg),
10435             as_Register($src1$$reg),
10436             as_Register($src2$$reg));
10437   %}
10438 
10439   ins_pipe(ialu_reg_reg);
10440 %}
10441 
10442 // Immediate Subtraction
10443 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10444   match(Set dst (SubI src1 src2));
10445 
10446   ins_cost(INSN_COST);
10447   format %{ "subw $dst, $src1, $src2" %}
10448 
10449   // use opcode to indicate that this is a sub not an add
10450   opcode(0x1);
10451 
10452   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10453 
10454   ins_pipe(ialu_reg_imm);
10455 %}
10456 
10457 // Long Subtraction
10458 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10459 
10460   match(Set dst (SubL src1 src2));
10461 
10462   ins_cost(INSN_COST);
10463   format %{ "sub  $dst, $src1, $src2" %}
10464 
10465   ins_encode %{
10466     __ sub(as_Register($dst$$reg),
10467            as_Register($src1$$reg),
10468            as_Register($src2$$reg));
10469   %}
10470 
10471   ins_pipe(ialu_reg_reg);
10472 %}
10473 
10474 // No constant pool entries requiredLong Immediate Subtraction.
10475 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10476   match(Set dst (SubL src1 src2));
10477 
10478   ins_cost(INSN_COST);
10479   format %{ "sub$dst, $src1, $src2" %}
10480 
10481   // use opcode to indicate that this is a sub not an add
10482   opcode(0x1);
10483 
10484   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10485 
10486   ins_pipe(ialu_reg_imm);
10487 %}
10488 
10489 // Integer Negation (special case for sub)
10490 
10491 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10492   match(Set dst (SubI zero src));
10493 
10494   ins_cost(INSN_COST);
10495   format %{ "negw $dst, $src\t# int" %}
10496 
10497   ins_encode %{
10498     __ negw(as_Register($dst$$reg),
10499             as_Register($src$$reg));
10500   %}
10501 
10502   ins_pipe(ialu_reg);
10503 %}
10504 
10505 // Long Negation
10506 
10507 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10508   match(Set dst (SubL zero src));
10509 
10510   ins_cost(INSN_COST);
10511   format %{ "neg $dst, $src\t# long" %}
10512 
10513   ins_encode %{
10514     __ neg(as_Register($dst$$reg),
10515            as_Register($src$$reg));
10516   %}
10517 
10518   ins_pipe(ialu_reg);
10519 %}
10520 
10521 // Integer Multiply
10522 
10523 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10524   match(Set dst (MulI src1 src2));
10525 
10526   ins_cost(INSN_COST * 3);
10527   format %{ "mulw  $dst, $src1, $src2" %}
10528 
10529   ins_encode %{
10530     __ mulw(as_Register($dst$$reg),
10531             as_Register($src1$$reg),
10532             as_Register($src2$$reg));
10533   %}
10534 
10535   ins_pipe(imul_reg_reg);
10536 %}
10537 
10538 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10539   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10540 
10541   ins_cost(INSN_COST * 3);
10542   format %{ "smull  $dst, $src1, $src2" %}
10543 
10544   ins_encode %{
10545     __ smull(as_Register($dst$$reg),
10546              as_Register($src1$$reg),
10547              as_Register($src2$$reg));
10548   %}
10549 
10550   ins_pipe(imul_reg_reg);
10551 %}
10552 
10553 // Long Multiply
10554 
10555 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10556   match(Set dst (MulL src1 src2));
10557 
10558   ins_cost(INSN_COST * 5);
10559   format %{ "mul  $dst, $src1, $src2" %}
10560 
10561   ins_encode %{
10562     __ mul(as_Register($dst$$reg),
10563            as_Register($src1$$reg),
10564            as_Register($src2$$reg));
10565   %}
10566 
10567   ins_pipe(lmul_reg_reg);
10568 %}
10569 
10570 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10571 %{
10572   match(Set dst (MulHiL src1 src2));
10573 
10574   ins_cost(INSN_COST * 7);
10575   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10576 
10577   ins_encode %{
10578     __ smulh(as_Register($dst$$reg),
10579              as_Register($src1$$reg),
10580              as_Register($src2$$reg));
10581   %}
10582 
10583   ins_pipe(lmul_reg_reg);
10584 %}
10585 
10586 // Combined Integer Multiply & Add/Sub
10587 
10588 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10589   match(Set dst (AddI src3 (MulI src1 src2)));
10590 
10591   ins_cost(INSN_COST * 3);
10592   format %{ "madd  $dst, $src1, $src2, $src3" %}
10593 
10594   ins_encode %{
10595     __ maddw(as_Register($dst$$reg),
10596              as_Register($src1$$reg),
10597              as_Register($src2$$reg),
10598              as_Register($src3$$reg));
10599   %}
10600 
10601   ins_pipe(imac_reg_reg);
10602 %}
10603 
10604 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10605   match(Set dst (SubI src3 (MulI src1 src2)));
10606 
10607   ins_cost(INSN_COST * 3);
10608   format %{ "msub  $dst, $src1, $src2, $src3" %}
10609 
10610   ins_encode %{
10611     __ msubw(as_Register($dst$$reg),
10612              as_Register($src1$$reg),
10613              as_Register($src2$$reg),
10614              as_Register($src3$$reg));
10615   %}
10616 
10617   ins_pipe(imac_reg_reg);
10618 %}
10619 
10620 // Combined Long Multiply & Add/Sub
10621 
10622 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10623   match(Set dst (AddL src3 (MulL src1 src2)));
10624 
10625   ins_cost(INSN_COST * 5);
10626   format %{ "madd  $dst, $src1, $src2, $src3" %}
10627 
10628   ins_encode %{
10629     __ madd(as_Register($dst$$reg),
10630             as_Register($src1$$reg),
10631             as_Register($src2$$reg),
10632             as_Register($src3$$reg));
10633   %}
10634 
10635   ins_pipe(lmac_reg_reg);
10636 %}
10637 
10638 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10639   match(Set dst (SubL src3 (MulL src1 src2)));
10640 
10641   ins_cost(INSN_COST * 5);
10642   format %{ "msub  $dst, $src1, $src2, $src3" %}
10643 
10644   ins_encode %{
10645     __ msub(as_Register($dst$$reg),
10646             as_Register($src1$$reg),
10647             as_Register($src2$$reg),
10648             as_Register($src3$$reg));
10649   %}
10650 
10651   ins_pipe(lmac_reg_reg);
10652 %}
10653 
10654 // Integer Divide
10655 
10656 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10657   match(Set dst (DivI src1 src2));
10658 
10659   ins_cost(INSN_COST * 19);
10660   format %{ "sdivw  $dst, $src1, $src2" %}
10661 
10662   ins_encode(aarch64_enc_divw(dst, src1, src2));
10663   ins_pipe(idiv_reg_reg);
10664 %}
10665 
10666 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10667   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10668   ins_cost(INSN_COST);
10669   format %{ "lsrw $dst, $src1, $div1" %}
10670   ins_encode %{
10671     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10672   %}
10673   ins_pipe(ialu_reg_shift);
10674 %}
10675 
10676 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10677   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10678   ins_cost(INSN_COST);
10679   format %{ "addw $dst, $src, LSR $div1" %}
10680 
10681   ins_encode %{
10682     __ addw(as_Register($dst$$reg),
10683               as_Register($src$$reg),
10684               as_Register($src$$reg),
10685               Assembler::LSR, 31);
10686   %}
10687   ins_pipe(ialu_reg);
10688 %}
10689 
10690 // Long Divide
10691 
10692 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10693   match(Set dst (DivL src1 src2));
10694 
10695   ins_cost(INSN_COST * 35);
10696   format %{ "sdiv   $dst, $src1, $src2" %}
10697 
10698   ins_encode(aarch64_enc_div(dst, src1, src2));
10699   ins_pipe(ldiv_reg_reg);
10700 %}
10701 
10702 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10703   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10704   ins_cost(INSN_COST);
10705   format %{ "lsr $dst, $src1, $div1" %}
10706   ins_encode %{
10707     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10708   %}
10709   ins_pipe(ialu_reg_shift);
10710 %}
10711 
10712 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10713   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10714   ins_cost(INSN_COST);
10715   format %{ "add $dst, $src, $div1" %}
10716 
10717   ins_encode %{
10718     __ add(as_Register($dst$$reg),
10719               as_Register($src$$reg),
10720               as_Register($src$$reg),
10721               Assembler::LSR, 63);
10722   %}
10723   ins_pipe(ialu_reg);
10724 %}
10725 
10726 // Integer Remainder
10727 
10728 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10729   match(Set dst (ModI src1 src2));
10730 
10731   ins_cost(INSN_COST * 22);
10732   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10733             "msubw($dst, rscratch1, $src2, $src1" %}
10734 
10735   ins_encode(aarch64_enc_modw(dst, src1, src2));
10736   ins_pipe(idiv_reg_reg);
10737 %}
10738 
10739 // Long Remainder
10740 
10741 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10742   match(Set dst (ModL src1 src2));
10743 
10744   ins_cost(INSN_COST * 38);
10745   format %{ "sdiv   rscratch1, $src1, $src2\n"
10746             "msub($dst, rscratch1, $src2, $src1" %}
10747 
10748   ins_encode(aarch64_enc_mod(dst, src1, src2));
10749   ins_pipe(ldiv_reg_reg);
10750 %}
10751 
10752 // Integer Shifts
10753 
10754 // Shift Left Register
10755 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10756   match(Set dst (LShiftI src1 src2));
10757 
10758   ins_cost(INSN_COST * 2);
10759   format %{ "lslvw  $dst, $src1, $src2" %}
10760 
10761   ins_encode %{
10762     __ lslvw(as_Register($dst$$reg),
10763              as_Register($src1$$reg),
10764              as_Register($src2$$reg));
10765   %}
10766 
10767   ins_pipe(ialu_reg_reg_vshift);
10768 %}
10769 
10770 // Shift Left Immediate
10771 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10772   match(Set dst (LShiftI src1 src2));
10773 
10774   ins_cost(INSN_COST);
10775   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10776 
10777   ins_encode %{
10778     __ lslw(as_Register($dst$$reg),
10779             as_Register($src1$$reg),
10780             $src2$$constant & 0x1f);
10781   %}
10782 
10783   ins_pipe(ialu_reg_shift);
10784 %}
10785 
10786 // Shift Right Logical Register
10787 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10788   match(Set dst (URShiftI src1 src2));
10789 
10790   ins_cost(INSN_COST * 2);
10791   format %{ "lsrvw  $dst, $src1, $src2" %}
10792 
10793   ins_encode %{
10794     __ lsrvw(as_Register($dst$$reg),
10795              as_Register($src1$$reg),
10796              as_Register($src2$$reg));
10797   %}
10798 
10799   ins_pipe(ialu_reg_reg_vshift);
10800 %}
10801 
10802 // Shift Right Logical Immediate
10803 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10804   match(Set dst (URShiftI src1 src2));
10805 
10806   ins_cost(INSN_COST);
10807   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10808 
10809   ins_encode %{
10810     __ lsrw(as_Register($dst$$reg),
10811             as_Register($src1$$reg),
10812             $src2$$constant & 0x1f);
10813   %}
10814 
10815   ins_pipe(ialu_reg_shift);
10816 %}
10817 
10818 // Shift Right Arithmetic Register
10819 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10820   match(Set dst (RShiftI src1 src2));
10821 
10822   ins_cost(INSN_COST * 2);
10823   format %{ "asrvw  $dst, $src1, $src2" %}
10824 
10825   ins_encode %{
10826     __ asrvw(as_Register($dst$$reg),
10827              as_Register($src1$$reg),
10828              as_Register($src2$$reg));
10829   %}
10830 
10831   ins_pipe(ialu_reg_reg_vshift);
10832 %}
10833 
10834 // Shift Right Arithmetic Immediate
10835 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10836   match(Set dst (RShiftI src1 src2));
10837 
10838   ins_cost(INSN_COST);
10839   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10840 
10841   ins_encode %{
10842     __ asrw(as_Register($dst$$reg),
10843             as_Register($src1$$reg),
10844             $src2$$constant & 0x1f);
10845   %}
10846 
10847   ins_pipe(ialu_reg_shift);
10848 %}
10849 
10850 // Combined Int Mask and Right Shift (using UBFM)
10851 // TODO
10852 
10853 // Long Shifts
10854 
10855 // Shift Left Register
10856 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10857   match(Set dst (LShiftL src1 src2));
10858 
10859   ins_cost(INSN_COST * 2);
10860   format %{ "lslv  $dst, $src1, $src2" %}
10861 
10862   ins_encode %{
10863     __ lslv(as_Register($dst$$reg),
10864             as_Register($src1$$reg),
10865             as_Register($src2$$reg));
10866   %}
10867 
10868   ins_pipe(ialu_reg_reg_vshift);
10869 %}
10870 
10871 // Shift Left Immediate
10872 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10873   match(Set dst (LShiftL src1 src2));
10874 
10875   ins_cost(INSN_COST);
10876   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10877 
10878   ins_encode %{
10879     __ lsl(as_Register($dst$$reg),
10880             as_Register($src1$$reg),
10881             $src2$$constant & 0x3f);
10882   %}
10883 
10884   ins_pipe(ialu_reg_shift);
10885 %}
10886 
10887 // Shift Right Logical Register
10888 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10889   match(Set dst (URShiftL src1 src2));
10890 
10891   ins_cost(INSN_COST * 2);
10892   format %{ "lsrv  $dst, $src1, $src2" %}
10893 
10894   ins_encode %{
10895     __ lsrv(as_Register($dst$$reg),
10896             as_Register($src1$$reg),
10897             as_Register($src2$$reg));
10898   %}
10899 
10900   ins_pipe(ialu_reg_reg_vshift);
10901 %}
10902 
10903 // Shift Right Logical Immediate
10904 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10905   match(Set dst (URShiftL src1 src2));
10906 
10907   ins_cost(INSN_COST);
10908   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10909 
10910   ins_encode %{
10911     __ lsr(as_Register($dst$$reg),
10912            as_Register($src1$$reg),
10913            $src2$$constant & 0x3f);
10914   %}
10915 
10916   ins_pipe(ialu_reg_shift);
10917 %}
10918 
10919 // A special-case pattern for card table stores.
10920 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10921   match(Set dst (URShiftL (CastP2X src1) src2));
10922 
10923   ins_cost(INSN_COST);
10924   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10925 
10926   ins_encode %{
10927     __ lsr(as_Register($dst$$reg),
10928            as_Register($src1$$reg),
10929            $src2$$constant & 0x3f);
10930   %}
10931 
10932   ins_pipe(ialu_reg_shift);
10933 %}
10934 
10935 // Shift Right Arithmetic Register
10936 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10937   match(Set dst (RShiftL src1 src2));
10938 
10939   ins_cost(INSN_COST * 2);
10940   format %{ "asrv  $dst, $src1, $src2" %}
10941 
10942   ins_encode %{
10943     __ asrv(as_Register($dst$$reg),
10944             as_Register($src1$$reg),
10945             as_Register($src2$$reg));
10946   %}
10947 
10948   ins_pipe(ialu_reg_reg_vshift);
10949 %}
10950 
10951 // Shift Right Arithmetic Immediate
10952 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10953   match(Set dst (RShiftL src1 src2));
10954 
10955   ins_cost(INSN_COST);
10956   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10957 
10958   ins_encode %{
10959     __ asr(as_Register($dst$$reg),
10960            as_Register($src1$$reg),
10961            $src2$$constant & 0x3f);
10962   %}
10963 
10964   ins_pipe(ialu_reg_shift);
10965 %}
10966 
10967 // BEGIN This section of the file is automatically generated. Do not edit --------------
10968 
10969 instruct regL_not_reg(iRegLNoSp dst,
10970                          iRegL src1, immL_M1 m1,
10971                          rFlagsReg cr) %{
10972   match(Set dst (XorL src1 m1));
10973   ins_cost(INSN_COST);
10974   format %{ "eon  $dst, $src1, zr" %}
10975 
10976   ins_encode %{
10977     __ eon(as_Register($dst$$reg),
10978               as_Register($src1$$reg),
10979               zr,
10980               Assembler::LSL, 0);
10981   %}
10982 
10983   ins_pipe(ialu_reg);
10984 %}
10985 instruct regI_not_reg(iRegINoSp dst,
10986                          iRegIorL2I src1, immI_M1 m1,
10987                          rFlagsReg cr) %{
10988   match(Set dst (XorI src1 m1));
10989   ins_cost(INSN_COST);
10990   format %{ "eonw  $dst, $src1, zr" %}
10991 
10992   ins_encode %{
10993     __ eonw(as_Register($dst$$reg),
10994               as_Register($src1$$reg),
10995               zr,
10996               Assembler::LSL, 0);
10997   %}
10998 
10999   ins_pipe(ialu_reg);
11000 %}
11001 
11002 instruct AndI_reg_not_reg(iRegINoSp dst,
11003                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11004                          rFlagsReg cr) %{
11005   match(Set dst (AndI src1 (XorI src2 m1)));
11006   ins_cost(INSN_COST);
11007   format %{ "bicw  $dst, $src1, $src2" %}
11008 
11009   ins_encode %{
11010     __ bicw(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::LSL, 0);
11014   %}
11015 
11016   ins_pipe(ialu_reg_reg);
11017 %}
11018 
11019 instruct AndL_reg_not_reg(iRegLNoSp dst,
11020                          iRegL src1, iRegL src2, immL_M1 m1,
11021                          rFlagsReg cr) %{
11022   match(Set dst (AndL src1 (XorL src2 m1)));
11023   ins_cost(INSN_COST);
11024   format %{ "bic  $dst, $src1, $src2" %}
11025 
11026   ins_encode %{
11027     __ bic(as_Register($dst$$reg),
11028               as_Register($src1$$reg),
11029               as_Register($src2$$reg),
11030               Assembler::LSL, 0);
11031   %}
11032 
11033   ins_pipe(ialu_reg_reg);
11034 %}
11035 
11036 instruct OrI_reg_not_reg(iRegINoSp dst,
11037                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11038                          rFlagsReg cr) %{
11039   match(Set dst (OrI src1 (XorI src2 m1)));
11040   ins_cost(INSN_COST);
11041   format %{ "ornw  $dst, $src1, $src2" %}
11042 
11043   ins_encode %{
11044     __ ornw(as_Register($dst$$reg),
11045               as_Register($src1$$reg),
11046               as_Register($src2$$reg),
11047               Assembler::LSL, 0);
11048   %}
11049 
11050   ins_pipe(ialu_reg_reg);
11051 %}
11052 
11053 instruct OrL_reg_not_reg(iRegLNoSp dst,
11054                          iRegL src1, iRegL src2, immL_M1 m1,
11055                          rFlagsReg cr) %{
11056   match(Set dst (OrL src1 (XorL src2 m1)));
11057   ins_cost(INSN_COST);
11058   format %{ "orn  $dst, $src1, $src2" %}
11059 
11060   ins_encode %{
11061     __ orn(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::LSL, 0);
11065   %}
11066 
11067   ins_pipe(ialu_reg_reg);
11068 %}
11069 
11070 instruct XorI_reg_not_reg(iRegINoSp dst,
11071                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11072                          rFlagsReg cr) %{
11073   match(Set dst (XorI m1 (XorI src2 src1)));
11074   ins_cost(INSN_COST);
11075   format %{ "eonw  $dst, $src1, $src2" %}
11076 
11077   ins_encode %{
11078     __ eonw(as_Register($dst$$reg),
11079               as_Register($src1$$reg),
11080               as_Register($src2$$reg),
11081               Assembler::LSL, 0);
11082   %}
11083 
11084   ins_pipe(ialu_reg_reg);
11085 %}
11086 
11087 instruct XorL_reg_not_reg(iRegLNoSp dst,
11088                          iRegL src1, iRegL src2, immL_M1 m1,
11089                          rFlagsReg cr) %{
11090   match(Set dst (XorL m1 (XorL src2 src1)));
11091   ins_cost(INSN_COST);
11092   format %{ "eon  $dst, $src1, $src2" %}
11093 
11094   ins_encode %{
11095     __ eon(as_Register($dst$$reg),
11096               as_Register($src1$$reg),
11097               as_Register($src2$$reg),
11098               Assembler::LSL, 0);
11099   %}
11100 
11101   ins_pipe(ialu_reg_reg);
11102 %}
11103 
11104 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11105                          iRegIorL2I src1, iRegIorL2I src2,
11106                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11107   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11108   ins_cost(1.9 * INSN_COST);
11109   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11110 
11111   ins_encode %{
11112     __ bicw(as_Register($dst$$reg),
11113               as_Register($src1$$reg),
11114               as_Register($src2$$reg),
11115               Assembler::LSR,
11116               $src3$$constant & 0x1f);
11117   %}
11118 
11119   ins_pipe(ialu_reg_reg_shift);
11120 %}
11121 
11122 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11123                          iRegL src1, iRegL src2,
11124                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11125   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11126   ins_cost(1.9 * INSN_COST);
11127   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11128 
11129   ins_encode %{
11130     __ bic(as_Register($dst$$reg),
11131               as_Register($src1$$reg),
11132               as_Register($src2$$reg),
11133               Assembler::LSR,
11134               $src3$$constant & 0x3f);
11135   %}
11136 
11137   ins_pipe(ialu_reg_reg_shift);
11138 %}
11139 
11140 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11141                          iRegIorL2I src1, iRegIorL2I src2,
11142                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11143   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11144   ins_cost(1.9 * INSN_COST);
11145   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11146 
11147   ins_encode %{
11148     __ bicw(as_Register($dst$$reg),
11149               as_Register($src1$$reg),
11150               as_Register($src2$$reg),
11151               Assembler::ASR,
11152               $src3$$constant & 0x1f);
11153   %}
11154 
11155   ins_pipe(ialu_reg_reg_shift);
11156 %}
11157 
11158 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11159                          iRegL src1, iRegL src2,
11160                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11161   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11162   ins_cost(1.9 * INSN_COST);
11163   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11164 
11165   ins_encode %{
11166     __ bic(as_Register($dst$$reg),
11167               as_Register($src1$$reg),
11168               as_Register($src2$$reg),
11169               Assembler::ASR,
11170               $src3$$constant & 0x3f);
11171   %}
11172 
11173   ins_pipe(ialu_reg_reg_shift);
11174 %}
11175 
11176 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11177                          iRegIorL2I src1, iRegIorL2I src2,
11178                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11179   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11180   ins_cost(1.9 * INSN_COST);
11181   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11182 
11183   ins_encode %{
11184     __ bicw(as_Register($dst$$reg),
11185               as_Register($src1$$reg),
11186               as_Register($src2$$reg),
11187               Assembler::LSL,
11188               $src3$$constant & 0x1f);
11189   %}
11190 
11191   ins_pipe(ialu_reg_reg_shift);
11192 %}
11193 
11194 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11195                          iRegL src1, iRegL src2,
11196                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11197   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11198   ins_cost(1.9 * INSN_COST);
11199   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11200 
11201   ins_encode %{
11202     __ bic(as_Register($dst$$reg),
11203               as_Register($src1$$reg),
11204               as_Register($src2$$reg),
11205               Assembler::LSL,
11206               $src3$$constant & 0x3f);
11207   %}
11208 
11209   ins_pipe(ialu_reg_reg_shift);
11210 %}
11211 
11212 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11213                          iRegIorL2I src1, iRegIorL2I src2,
11214                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11215   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11216   ins_cost(1.9 * INSN_COST);
11217   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11218 
11219   ins_encode %{
11220     __ eonw(as_Register($dst$$reg),
11221               as_Register($src1$$reg),
11222               as_Register($src2$$reg),
11223               Assembler::LSR,
11224               $src3$$constant & 0x1f);
11225   %}
11226 
11227   ins_pipe(ialu_reg_reg_shift);
11228 %}
11229 
11230 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11231                          iRegL src1, iRegL src2,
11232                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11233   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11234   ins_cost(1.9 * INSN_COST);
11235   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11236 
11237   ins_encode %{
11238     __ eon(as_Register($dst$$reg),
11239               as_Register($src1$$reg),
11240               as_Register($src2$$reg),
11241               Assembler::LSR,
11242               $src3$$constant & 0x3f);
11243   %}
11244 
11245   ins_pipe(ialu_reg_reg_shift);
11246 %}
11247 
11248 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11249                          iRegIorL2I src1, iRegIorL2I src2,
11250                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11251   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11252   ins_cost(1.9 * INSN_COST);
11253   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11254 
11255   ins_encode %{
11256     __ eonw(as_Register($dst$$reg),
11257               as_Register($src1$$reg),
11258               as_Register($src2$$reg),
11259               Assembler::ASR,
11260               $src3$$constant & 0x1f);
11261   %}
11262 
11263   ins_pipe(ialu_reg_reg_shift);
11264 %}
11265 
11266 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11267                          iRegL src1, iRegL src2,
11268                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11269   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11270   ins_cost(1.9 * INSN_COST);
11271   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11272 
11273   ins_encode %{
11274     __ eon(as_Register($dst$$reg),
11275               as_Register($src1$$reg),
11276               as_Register($src2$$reg),
11277               Assembler::ASR,
11278               $src3$$constant & 0x3f);
11279   %}
11280 
11281   ins_pipe(ialu_reg_reg_shift);
11282 %}
11283 
11284 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11285                          iRegIorL2I src1, iRegIorL2I src2,
11286                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11287   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11288   ins_cost(1.9 * INSN_COST);
11289   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11290 
11291   ins_encode %{
11292     __ eonw(as_Register($dst$$reg),
11293               as_Register($src1$$reg),
11294               as_Register($src2$$reg),
11295               Assembler::LSL,
11296               $src3$$constant & 0x1f);
11297   %}
11298 
11299   ins_pipe(ialu_reg_reg_shift);
11300 %}
11301 
11302 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11303                          iRegL src1, iRegL src2,
11304                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11305   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11306   ins_cost(1.9 * INSN_COST);
11307   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11308 
11309   ins_encode %{
11310     __ eon(as_Register($dst$$reg),
11311               as_Register($src1$$reg),
11312               as_Register($src2$$reg),
11313               Assembler::LSL,
11314               $src3$$constant & 0x3f);
11315   %}
11316 
11317   ins_pipe(ialu_reg_reg_shift);
11318 %}
11319 
11320 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11321                          iRegIorL2I src1, iRegIorL2I src2,
11322                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11323   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11324   ins_cost(1.9 * INSN_COST);
11325   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11326 
11327   ins_encode %{
11328     __ ornw(as_Register($dst$$reg),
11329               as_Register($src1$$reg),
11330               as_Register($src2$$reg),
11331               Assembler::LSR,
11332               $src3$$constant & 0x1f);
11333   %}
11334 
11335   ins_pipe(ialu_reg_reg_shift);
11336 %}
11337 
11338 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11339                          iRegL src1, iRegL src2,
11340                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11341   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11342   ins_cost(1.9 * INSN_COST);
11343   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11344 
11345   ins_encode %{
11346     __ orn(as_Register($dst$$reg),
11347               as_Register($src1$$reg),
11348               as_Register($src2$$reg),
11349               Assembler::LSR,
11350               $src3$$constant & 0x3f);
11351   %}
11352 
11353   ins_pipe(ialu_reg_reg_shift);
11354 %}
11355 
11356 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11357                          iRegIorL2I src1, iRegIorL2I src2,
11358                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11359   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11360   ins_cost(1.9 * INSN_COST);
11361   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11362 
11363   ins_encode %{
11364     __ ornw(as_Register($dst$$reg),
11365               as_Register($src1$$reg),
11366               as_Register($src2$$reg),
11367               Assembler::ASR,
11368               $src3$$constant & 0x1f);
11369   %}
11370 
11371   ins_pipe(ialu_reg_reg_shift);
11372 %}
11373 
11374 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11375                          iRegL src1, iRegL src2,
11376                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11377   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11378   ins_cost(1.9 * INSN_COST);
11379   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11380 
11381   ins_encode %{
11382     __ orn(as_Register($dst$$reg),
11383               as_Register($src1$$reg),
11384               as_Register($src2$$reg),
11385               Assembler::ASR,
11386               $src3$$constant & 0x3f);
11387   %}
11388 
11389   ins_pipe(ialu_reg_reg_shift);
11390 %}
11391 
11392 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11393                          iRegIorL2I src1, iRegIorL2I src2,
11394                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11395   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11396   ins_cost(1.9 * INSN_COST);
11397   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11398 
11399   ins_encode %{
11400     __ ornw(as_Register($dst$$reg),
11401               as_Register($src1$$reg),
11402               as_Register($src2$$reg),
11403               Assembler::LSL,
11404               $src3$$constant & 0x1f);
11405   %}
11406 
11407   ins_pipe(ialu_reg_reg_shift);
11408 %}
11409 
11410 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11411                          iRegL src1, iRegL src2,
11412                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11413   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11416 
11417   ins_encode %{
11418     __ orn(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::LSL,
11422               $src3$$constant & 0x3f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11429                          iRegIorL2I src1, iRegIorL2I src2,
11430                          immI src3, rFlagsReg cr) %{
11431   match(Set dst (AndI src1 (URShiftI src2 src3)));
11432 
11433   ins_cost(1.9 * INSN_COST);
11434   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11435 
11436   ins_encode %{
11437     __ andw(as_Register($dst$$reg),
11438               as_Register($src1$$reg),
11439               as_Register($src2$$reg),
11440               Assembler::LSR,
11441               $src3$$constant & 0x1f);
11442   %}
11443 
11444   ins_pipe(ialu_reg_reg_shift);
11445 %}
11446 
11447 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11448                          iRegL src1, iRegL src2,
11449                          immI src3, rFlagsReg cr) %{
11450   match(Set dst (AndL src1 (URShiftL src2 src3)));
11451 
11452   ins_cost(1.9 * INSN_COST);
11453   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11454 
11455   ins_encode %{
11456     __ andr(as_Register($dst$$reg),
11457               as_Register($src1$$reg),
11458               as_Register($src2$$reg),
11459               Assembler::LSR,
11460               $src3$$constant & 0x3f);
11461   %}
11462 
11463   ins_pipe(ialu_reg_reg_shift);
11464 %}
11465 
11466 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11467                          iRegIorL2I src1, iRegIorL2I src2,
11468                          immI src3, rFlagsReg cr) %{
11469   match(Set dst (AndI src1 (RShiftI src2 src3)));
11470 
11471   ins_cost(1.9 * INSN_COST);
11472   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11473 
11474   ins_encode %{
11475     __ andw(as_Register($dst$$reg),
11476               as_Register($src1$$reg),
11477               as_Register($src2$$reg),
11478               Assembler::ASR,
11479               $src3$$constant & 0x1f);
11480   %}
11481 
11482   ins_pipe(ialu_reg_reg_shift);
11483 %}
11484 
11485 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11486                          iRegL src1, iRegL src2,
11487                          immI src3, rFlagsReg cr) %{
11488   match(Set dst (AndL src1 (RShiftL src2 src3)));
11489 
11490   ins_cost(1.9 * INSN_COST);
11491   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11492 
11493   ins_encode %{
11494     __ andr(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::ASR,
11498               $src3$$constant & 0x3f);
11499   %}
11500 
11501   ins_pipe(ialu_reg_reg_shift);
11502 %}
11503 
11504 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11505                          iRegIorL2I src1, iRegIorL2I src2,
11506                          immI src3, rFlagsReg cr) %{
11507   match(Set dst (AndI src1 (LShiftI src2 src3)));
11508 
11509   ins_cost(1.9 * INSN_COST);
11510   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11511 
11512   ins_encode %{
11513     __ andw(as_Register($dst$$reg),
11514               as_Register($src1$$reg),
11515               as_Register($src2$$reg),
11516               Assembler::LSL,
11517               $src3$$constant & 0x1f);
11518   %}
11519 
11520   ins_pipe(ialu_reg_reg_shift);
11521 %}
11522 
11523 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11524                          iRegL src1, iRegL src2,
11525                          immI src3, rFlagsReg cr) %{
11526   match(Set dst (AndL src1 (LShiftL src2 src3)));
11527 
11528   ins_cost(1.9 * INSN_COST);
11529   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11530 
11531   ins_encode %{
11532     __ andr(as_Register($dst$$reg),
11533               as_Register($src1$$reg),
11534               as_Register($src2$$reg),
11535               Assembler::LSL,
11536               $src3$$constant & 0x3f);
11537   %}
11538 
11539   ins_pipe(ialu_reg_reg_shift);
11540 %}
11541 
11542 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11543                          iRegIorL2I src1, iRegIorL2I src2,
11544                          immI src3, rFlagsReg cr) %{
11545   match(Set dst (XorI src1 (URShiftI src2 src3)));
11546 
11547   ins_cost(1.9 * INSN_COST);
11548   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11549 
11550   ins_encode %{
11551     __ eorw(as_Register($dst$$reg),
11552               as_Register($src1$$reg),
11553               as_Register($src2$$reg),
11554               Assembler::LSR,
11555               $src3$$constant & 0x1f);
11556   %}
11557 
11558   ins_pipe(ialu_reg_reg_shift);
11559 %}
11560 
11561 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11562                          iRegL src1, iRegL src2,
11563                          immI src3, rFlagsReg cr) %{
11564   match(Set dst (XorL src1 (URShiftL src2 src3)));
11565 
11566   ins_cost(1.9 * INSN_COST);
11567   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11568 
11569   ins_encode %{
11570     __ eor(as_Register($dst$$reg),
11571               as_Register($src1$$reg),
11572               as_Register($src2$$reg),
11573               Assembler::LSR,
11574               $src3$$constant & 0x3f);
11575   %}
11576 
11577   ins_pipe(ialu_reg_reg_shift);
11578 %}
11579 
11580 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11581                          iRegIorL2I src1, iRegIorL2I src2,
11582                          immI src3, rFlagsReg cr) %{
11583   match(Set dst (XorI src1 (RShiftI src2 src3)));
11584 
11585   ins_cost(1.9 * INSN_COST);
11586   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11587 
11588   ins_encode %{
11589     __ eorw(as_Register($dst$$reg),
11590               as_Register($src1$$reg),
11591               as_Register($src2$$reg),
11592               Assembler::ASR,
11593               $src3$$constant & 0x1f);
11594   %}
11595 
11596   ins_pipe(ialu_reg_reg_shift);
11597 %}
11598 
11599 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11600                          iRegL src1, iRegL src2,
11601                          immI src3, rFlagsReg cr) %{
11602   match(Set dst (XorL src1 (RShiftL src2 src3)));
11603 
11604   ins_cost(1.9 * INSN_COST);
11605   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11606 
11607   ins_encode %{
11608     __ eor(as_Register($dst$$reg),
11609               as_Register($src1$$reg),
11610               as_Register($src2$$reg),
11611               Assembler::ASR,
11612               $src3$$constant & 0x3f);
11613   %}
11614 
11615   ins_pipe(ialu_reg_reg_shift);
11616 %}
11617 
11618 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11619                          iRegIorL2I src1, iRegIorL2I src2,
11620                          immI src3, rFlagsReg cr) %{
11621   match(Set dst (XorI src1 (LShiftI src2 src3)));
11622 
11623   ins_cost(1.9 * INSN_COST);
11624   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11625 
11626   ins_encode %{
11627     __ eorw(as_Register($dst$$reg),
11628               as_Register($src1$$reg),
11629               as_Register($src2$$reg),
11630               Assembler::LSL,
11631               $src3$$constant & 0x1f);
11632   %}
11633 
11634   ins_pipe(ialu_reg_reg_shift);
11635 %}
11636 
11637 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11638                          iRegL src1, iRegL src2,
11639                          immI src3, rFlagsReg cr) %{
11640   match(Set dst (XorL src1 (LShiftL src2 src3)));
11641 
11642   ins_cost(1.9 * INSN_COST);
11643   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11644 
11645   ins_encode %{
11646     __ eor(as_Register($dst$$reg),
11647               as_Register($src1$$reg),
11648               as_Register($src2$$reg),
11649               Assembler::LSL,
11650               $src3$$constant & 0x3f);
11651   %}
11652 
11653   ins_pipe(ialu_reg_reg_shift);
11654 %}
11655 
11656 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11657                          iRegIorL2I src1, iRegIorL2I src2,
11658                          immI src3, rFlagsReg cr) %{
11659   match(Set dst (OrI src1 (URShiftI src2 src3)));
11660 
11661   ins_cost(1.9 * INSN_COST);
11662   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11663 
11664   ins_encode %{
11665     __ orrw(as_Register($dst$$reg),
11666               as_Register($src1$$reg),
11667               as_Register($src2$$reg),
11668               Assembler::LSR,
11669               $src3$$constant & 0x1f);
11670   %}
11671 
11672   ins_pipe(ialu_reg_reg_shift);
11673 %}
11674 
11675 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11676                          iRegL src1, iRegL src2,
11677                          immI src3, rFlagsReg cr) %{
11678   match(Set dst (OrL src1 (URShiftL src2 src3)));
11679 
11680   ins_cost(1.9 * INSN_COST);
11681   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11682 
11683   ins_encode %{
11684     __ orr(as_Register($dst$$reg),
11685               as_Register($src1$$reg),
11686               as_Register($src2$$reg),
11687               Assembler::LSR,
11688               $src3$$constant & 0x3f);
11689   %}
11690 
11691   ins_pipe(ialu_reg_reg_shift);
11692 %}
11693 
11694 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11695                          iRegIorL2I src1, iRegIorL2I src2,
11696                          immI src3, rFlagsReg cr) %{
11697   match(Set dst (OrI src1 (RShiftI src2 src3)));
11698 
11699   ins_cost(1.9 * INSN_COST);
11700   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11701 
11702   ins_encode %{
11703     __ orrw(as_Register($dst$$reg),
11704               as_Register($src1$$reg),
11705               as_Register($src2$$reg),
11706               Assembler::ASR,
11707               $src3$$constant & 0x1f);
11708   %}
11709 
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11714                          iRegL src1, iRegL src2,
11715                          immI src3, rFlagsReg cr) %{
11716   match(Set dst (OrL src1 (RShiftL src2 src3)));
11717 
11718   ins_cost(1.9 * INSN_COST);
11719   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11720 
11721   ins_encode %{
11722     __ orr(as_Register($dst$$reg),
11723               as_Register($src1$$reg),
11724               as_Register($src2$$reg),
11725               Assembler::ASR,
11726               $src3$$constant & 0x3f);
11727   %}
11728 
11729   ins_pipe(ialu_reg_reg_shift);
11730 %}
11731 
11732 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11733                          iRegIorL2I src1, iRegIorL2I src2,
11734                          immI src3, rFlagsReg cr) %{
11735   match(Set dst (OrI src1 (LShiftI src2 src3)));
11736 
11737   ins_cost(1.9 * INSN_COST);
11738   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11739 
11740   ins_encode %{
11741     __ orrw(as_Register($dst$$reg),
11742               as_Register($src1$$reg),
11743               as_Register($src2$$reg),
11744               Assembler::LSL,
11745               $src3$$constant & 0x1f);
11746   %}
11747 
11748   ins_pipe(ialu_reg_reg_shift);
11749 %}
11750 
11751 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11752                          iRegL src1, iRegL src2,
11753                          immI src3, rFlagsReg cr) %{
11754   match(Set dst (OrL src1 (LShiftL src2 src3)));
11755 
11756   ins_cost(1.9 * INSN_COST);
11757   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11758 
11759   ins_encode %{
11760     __ orr(as_Register($dst$$reg),
11761               as_Register($src1$$reg),
11762               as_Register($src2$$reg),
11763               Assembler::LSL,
11764               $src3$$constant & 0x3f);
11765   %}
11766 
11767   ins_pipe(ialu_reg_reg_shift);
11768 %}
11769 
11770 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11771                          iRegIorL2I src1, iRegIorL2I src2,
11772                          immI src3, rFlagsReg cr) %{
11773   match(Set dst (AddI src1 (URShiftI src2 src3)));
11774 
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11777 
11778   ins_encode %{
11779     __ addw(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSR,
11783               $src3$$constant & 0x1f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11790                          iRegL src1, iRegL src2,
11791                          immI src3, rFlagsReg cr) %{
11792   match(Set dst (AddL src1 (URShiftL src2 src3)));
11793 
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11796 
11797   ins_encode %{
11798     __ add(as_Register($dst$$reg),
11799               as_Register($src1$$reg),
11800               as_Register($src2$$reg),
11801               Assembler::LSR,
11802               $src3$$constant & 0x3f);
11803   %}
11804 
11805   ins_pipe(ialu_reg_reg_shift);
11806 %}
11807 
11808 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11809                          iRegIorL2I src1, iRegIorL2I src2,
11810                          immI src3, rFlagsReg cr) %{
11811   match(Set dst (AddI src1 (RShiftI src2 src3)));
11812 
11813   ins_cost(1.9 * INSN_COST);
11814   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11815 
11816   ins_encode %{
11817     __ addw(as_Register($dst$$reg),
11818               as_Register($src1$$reg),
11819               as_Register($src2$$reg),
11820               Assembler::ASR,
11821               $src3$$constant & 0x1f);
11822   %}
11823 
11824   ins_pipe(ialu_reg_reg_shift);
11825 %}
11826 
11827 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11828                          iRegL src1, iRegL src2,
11829                          immI src3, rFlagsReg cr) %{
11830   match(Set dst (AddL src1 (RShiftL src2 src3)));
11831 
11832   ins_cost(1.9 * INSN_COST);
11833   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11834 
11835   ins_encode %{
11836     __ add(as_Register($dst$$reg),
11837               as_Register($src1$$reg),
11838               as_Register($src2$$reg),
11839               Assembler::ASR,
11840               $src3$$constant & 0x3f);
11841   %}
11842 
11843   ins_pipe(ialu_reg_reg_shift);
11844 %}
11845 
11846 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11847                          iRegIorL2I src1, iRegIorL2I src2,
11848                          immI src3, rFlagsReg cr) %{
11849   match(Set dst (AddI src1 (LShiftI src2 src3)));
11850 
11851   ins_cost(1.9 * INSN_COST);
11852   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11853 
11854   ins_encode %{
11855     __ addw(as_Register($dst$$reg),
11856               as_Register($src1$$reg),
11857               as_Register($src2$$reg),
11858               Assembler::LSL,
11859               $src3$$constant & 0x1f);
11860   %}
11861 
11862   ins_pipe(ialu_reg_reg_shift);
11863 %}
11864 
11865 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11866                          iRegL src1, iRegL src2,
11867                          immI src3, rFlagsReg cr) %{
11868   match(Set dst (AddL src1 (LShiftL src2 src3)));
11869 
11870   ins_cost(1.9 * INSN_COST);
11871   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11872 
11873   ins_encode %{
11874     __ add(as_Register($dst$$reg),
11875               as_Register($src1$$reg),
11876               as_Register($src2$$reg),
11877               Assembler::LSL,
11878               $src3$$constant & 0x3f);
11879   %}
11880 
11881   ins_pipe(ialu_reg_reg_shift);
11882 %}
11883 
11884 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11885                          iRegIorL2I src1, iRegIorL2I src2,
11886                          immI src3, rFlagsReg cr) %{
11887   match(Set dst (SubI src1 (URShiftI src2 src3)));
11888 
11889   ins_cost(1.9 * INSN_COST);
11890   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11891 
11892   ins_encode %{
11893     __ subw(as_Register($dst$$reg),
11894               as_Register($src1$$reg),
11895               as_Register($src2$$reg),
11896               Assembler::LSR,
11897               $src3$$constant & 0x1f);
11898   %}
11899 
11900   ins_pipe(ialu_reg_reg_shift);
11901 %}
11902 
11903 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11904                          iRegL src1, iRegL src2,
11905                          immI src3, rFlagsReg cr) %{
11906   match(Set dst (SubL src1 (URShiftL src2 src3)));
11907 
11908   ins_cost(1.9 * INSN_COST);
11909   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11910 
11911   ins_encode %{
11912     __ sub(as_Register($dst$$reg),
11913               as_Register($src1$$reg),
11914               as_Register($src2$$reg),
11915               Assembler::LSR,
11916               $src3$$constant & 0x3f);
11917   %}
11918 
11919   ins_pipe(ialu_reg_reg_shift);
11920 %}
11921 
11922 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11923                          iRegIorL2I src1, iRegIorL2I src2,
11924                          immI src3, rFlagsReg cr) %{
11925   match(Set dst (SubI src1 (RShiftI src2 src3)));
11926 
11927   ins_cost(1.9 * INSN_COST);
11928   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11929 
11930   ins_encode %{
11931     __ subw(as_Register($dst$$reg),
11932               as_Register($src1$$reg),
11933               as_Register($src2$$reg),
11934               Assembler::ASR,
11935               $src3$$constant & 0x1f);
11936   %}
11937 
11938   ins_pipe(ialu_reg_reg_shift);
11939 %}
11940 
11941 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11942                          iRegL src1, iRegL src2,
11943                          immI src3, rFlagsReg cr) %{
11944   match(Set dst (SubL src1 (RShiftL src2 src3)));
11945 
11946   ins_cost(1.9 * INSN_COST);
11947   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11948 
11949   ins_encode %{
11950     __ sub(as_Register($dst$$reg),
11951               as_Register($src1$$reg),
11952               as_Register($src2$$reg),
11953               Assembler::ASR,
11954               $src3$$constant & 0x3f);
11955   %}
11956 
11957   ins_pipe(ialu_reg_reg_shift);
11958 %}
11959 
11960 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11961                          iRegIorL2I src1, iRegIorL2I src2,
11962                          immI src3, rFlagsReg cr) %{
11963   match(Set dst (SubI src1 (LShiftI src2 src3)));
11964 
11965   ins_cost(1.9 * INSN_COST);
11966   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11967 
11968   ins_encode %{
11969     __ subw(as_Register($dst$$reg),
11970               as_Register($src1$$reg),
11971               as_Register($src2$$reg),
11972               Assembler::LSL,
11973               $src3$$constant & 0x1f);
11974   %}
11975 
11976   ins_pipe(ialu_reg_reg_shift);
11977 %}
11978 
11979 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11980                          iRegL src1, iRegL src2,
11981                          immI src3, rFlagsReg cr) %{
11982   match(Set dst (SubL src1 (LShiftL src2 src3)));
11983 
11984   ins_cost(1.9 * INSN_COST);
11985   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11986 
11987   ins_encode %{
11988     __ sub(as_Register($dst$$reg),
11989               as_Register($src1$$reg),
11990               as_Register($src2$$reg),
11991               Assembler::LSL,
11992               $src3$$constant & 0x3f);
11993   %}
11994 
11995   ins_pipe(ialu_reg_reg_shift);
11996 %}
11997 
11998 
11999 
12000 // Shift Left followed by Shift Right.
12001 // This idiom is used by the compiler for the i2b bytecode etc.
12002 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12003 %{
12004   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12005   // Make sure we are not going to exceed what sbfm can do.
12006   predicate((unsigned int)n->in(2)->get_int() <= 63
12007             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12008 
12009   ins_cost(INSN_COST * 2);
12010   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12011   ins_encode %{
12012     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12013     int s = 63 - lshift;
12014     int r = (rshift - lshift) & 63;
12015     __ sbfm(as_Register($dst$$reg),
12016             as_Register($src$$reg),
12017             r, s);
12018   %}
12019 
12020   ins_pipe(ialu_reg_shift);
12021 %}
12022 
12023 // Shift Left followed by Shift Right.
12024 // This idiom is used by the compiler for the i2b bytecode etc.
12025 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12026 %{
12027   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12028   // Make sure we are not going to exceed what sbfmw can do.
12029   predicate((unsigned int)n->in(2)->get_int() <= 31
12030             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12031 
12032   ins_cost(INSN_COST * 2);
12033   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12034   ins_encode %{
12035     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12036     int s = 31 - lshift;
12037     int r = (rshift - lshift) & 31;
12038     __ sbfmw(as_Register($dst$$reg),
12039             as_Register($src$$reg),
12040             r, s);
12041   %}
12042 
12043   ins_pipe(ialu_reg_shift);
12044 %}
12045 
12046 // Shift Left followed by Shift Right.
12047 // This idiom is used by the compiler for the i2b bytecode etc.
12048 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12049 %{
12050   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12051   // Make sure we are not going to exceed what ubfm can do.
12052   predicate((unsigned int)n->in(2)->get_int() <= 63
12053             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12054 
12055   ins_cost(INSN_COST * 2);
12056   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12057   ins_encode %{
12058     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12059     int s = 63 - lshift;
12060     int r = (rshift - lshift) & 63;
12061     __ ubfm(as_Register($dst$$reg),
12062             as_Register($src$$reg),
12063             r, s);
12064   %}
12065 
12066   ins_pipe(ialu_reg_shift);
12067 %}
12068 
12069 // Shift Left followed by Shift Right.
12070 // This idiom is used by the compiler for the i2b bytecode etc.
12071 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12072 %{
12073   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12074   // Make sure we are not going to exceed what ubfmw can do.
12075   predicate((unsigned int)n->in(2)->get_int() <= 31
12076             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12077 
12078   ins_cost(INSN_COST * 2);
12079   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12080   ins_encode %{
12081     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12082     int s = 31 - lshift;
12083     int r = (rshift - lshift) & 31;
12084     __ ubfmw(as_Register($dst$$reg),
12085             as_Register($src$$reg),
12086             r, s);
12087   %}
12088 
12089   ins_pipe(ialu_reg_shift);
12090 %}
12091 // Bitfield extract with shift & mask
12092 
12093 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12094 %{
12095   match(Set dst (AndI (URShiftI src rshift) mask));
12096 
12097   ins_cost(INSN_COST);
12098   format %{ "ubfxw $dst, $src, $mask" %}
12099   ins_encode %{
12100     int rshift = $rshift$$constant;
12101     long mask = $mask$$constant;
12102     int width = exact_log2(mask+1);
12103     __ ubfxw(as_Register($dst$$reg),
12104             as_Register($src$$reg), rshift, width);
12105   %}
12106   ins_pipe(ialu_reg_shift);
12107 %}
12108 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12109 %{
12110   match(Set dst (AndL (URShiftL src rshift) mask));
12111 
12112   ins_cost(INSN_COST);
12113   format %{ "ubfx $dst, $src, $mask" %}
12114   ins_encode %{
12115     int rshift = $rshift$$constant;
12116     long mask = $mask$$constant;
12117     int width = exact_log2(mask+1);
12118     __ ubfx(as_Register($dst$$reg),
12119             as_Register($src$$reg), rshift, width);
12120   %}
12121   ins_pipe(ialu_reg_shift);
12122 %}
12123 
12124 // We can use ubfx when extending an And with a mask when we know mask
12125 // is positive.  We know that because immI_bitmask guarantees it.
12126 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12127 %{
12128   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12129 
12130   ins_cost(INSN_COST * 2);
12131   format %{ "ubfx $dst, $src, $mask" %}
12132   ins_encode %{
12133     int rshift = $rshift$$constant;
12134     long mask = $mask$$constant;
12135     int width = exact_log2(mask+1);
12136     __ ubfx(as_Register($dst$$reg),
12137             as_Register($src$$reg), rshift, width);
12138   %}
12139   ins_pipe(ialu_reg_shift);
12140 %}
12141 
12142 // Rotations
12143 
12144 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12145 %{
12146   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12147   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12148 
12149   ins_cost(INSN_COST);
12150   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12151 
12152   ins_encode %{
12153     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12154             $rshift$$constant & 63);
12155   %}
12156   ins_pipe(ialu_reg_reg_extr);
12157 %}
12158 
12159 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12160 %{
12161   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12162   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12163 
12164   ins_cost(INSN_COST);
12165   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12166 
12167   ins_encode %{
12168     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12169             $rshift$$constant & 31);
12170   %}
12171   ins_pipe(ialu_reg_reg_extr);
12172 %}
12173 
12174 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12175 %{
12176   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12177   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12178 
12179   ins_cost(INSN_COST);
12180   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12181 
12182   ins_encode %{
12183     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12184             $rshift$$constant & 63);
12185   %}
12186   ins_pipe(ialu_reg_reg_extr);
12187 %}
12188 
12189 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12190 %{
12191   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12192   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12193 
12194   ins_cost(INSN_COST);
12195   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12196 
12197   ins_encode %{
12198     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12199             $rshift$$constant & 31);
12200   %}
12201   ins_pipe(ialu_reg_reg_extr);
12202 %}
12203 
12204 
12205 // rol expander
12206 
12207 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12208 %{
12209   effect(DEF dst, USE src, USE shift);
12210 
12211   format %{ "rol    $dst, $src, $shift" %}
12212   ins_cost(INSN_COST * 3);
12213   ins_encode %{
12214     __ subw(rscratch1, zr, as_Register($shift$$reg));
12215     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12216             rscratch1);
12217     %}
12218   ins_pipe(ialu_reg_reg_vshift);
12219 %}
12220 
12221 // rol expander
12222 
12223 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12224 %{
12225   effect(DEF dst, USE src, USE shift);
12226 
12227   format %{ "rol    $dst, $src, $shift" %}
12228   ins_cost(INSN_COST * 3);
12229   ins_encode %{
12230     __ subw(rscratch1, zr, as_Register($shift$$reg));
12231     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12232             rscratch1);
12233     %}
12234   ins_pipe(ialu_reg_reg_vshift);
12235 %}
12236 
12237 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12238 %{
12239   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12240 
12241   expand %{
12242     rolL_rReg(dst, src, shift, cr);
12243   %}
12244 %}
12245 
12246 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12247 %{
12248   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12249 
12250   expand %{
12251     rolL_rReg(dst, src, shift, cr);
12252   %}
12253 %}
12254 
12255 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12256 %{
12257   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12258 
12259   expand %{
12260     rolI_rReg(dst, src, shift, cr);
12261   %}
12262 %}
12263 
12264 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12265 %{
12266   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12267 
12268   expand %{
12269     rolI_rReg(dst, src, shift, cr);
12270   %}
12271 %}
12272 
12273 // ror expander
12274 
12275 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12276 %{
12277   effect(DEF dst, USE src, USE shift);
12278 
12279   format %{ "ror    $dst, $src, $shift" %}
12280   ins_cost(INSN_COST);
12281   ins_encode %{
12282     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12283             as_Register($shift$$reg));
12284     %}
12285   ins_pipe(ialu_reg_reg_vshift);
12286 %}
12287 
12288 // ror expander
12289 
12290 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12291 %{
12292   effect(DEF dst, USE src, USE shift);
12293 
12294   format %{ "ror    $dst, $src, $shift" %}
12295   ins_cost(INSN_COST);
12296   ins_encode %{
12297     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12298             as_Register($shift$$reg));
12299     %}
12300   ins_pipe(ialu_reg_reg_vshift);
12301 %}
12302 
12303 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12304 %{
12305   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12306 
12307   expand %{
12308     rorL_rReg(dst, src, shift, cr);
12309   %}
12310 %}
12311 
12312 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12313 %{
12314   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12315 
12316   expand %{
12317     rorL_rReg(dst, src, shift, cr);
12318   %}
12319 %}
12320 
12321 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12322 %{
12323   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12324 
12325   expand %{
12326     rorI_rReg(dst, src, shift, cr);
12327   %}
12328 %}
12329 
12330 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12331 %{
12332   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12333 
12334   expand %{
12335     rorI_rReg(dst, src, shift, cr);
12336   %}
12337 %}
12338 
12339 // Add/subtract (extended)
12340 
12341 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12342 %{
12343   match(Set dst (AddL src1 (ConvI2L src2)));
12344   ins_cost(INSN_COST);
12345   format %{ "add  $dst, $src1, sxtw $src2" %}
12346 
12347    ins_encode %{
12348      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12349             as_Register($src2$$reg), ext::sxtw);
12350    %}
12351   ins_pipe(ialu_reg_reg);
12352 %};
12353 
12354 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12355 %{
12356   match(Set dst (SubL src1 (ConvI2L src2)));
12357   ins_cost(INSN_COST);
12358   format %{ "sub  $dst, $src1, sxtw $src2" %}
12359 
12360    ins_encode %{
12361      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12362             as_Register($src2$$reg), ext::sxtw);
12363    %}
12364   ins_pipe(ialu_reg_reg);
12365 %};
12366 
12367 
12368 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12369 %{
12370   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12371   ins_cost(INSN_COST);
12372   format %{ "add  $dst, $src1, sxth $src2" %}
12373 
12374    ins_encode %{
12375      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12376             as_Register($src2$$reg), ext::sxth);
12377    %}
12378   ins_pipe(ialu_reg_reg);
12379 %}
12380 
12381 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12382 %{
12383   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12384   ins_cost(INSN_COST);
12385   format %{ "add  $dst, $src1, sxtb $src2" %}
12386 
12387    ins_encode %{
12388      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12389             as_Register($src2$$reg), ext::sxtb);
12390    %}
12391   ins_pipe(ialu_reg_reg);
12392 %}
12393 
12394 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12395 %{
12396   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12397   ins_cost(INSN_COST);
12398   format %{ "add  $dst, $src1, uxtb $src2" %}
12399 
12400    ins_encode %{
12401      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12402             as_Register($src2$$reg), ext::uxtb);
12403    %}
12404   ins_pipe(ialu_reg_reg);
12405 %}
12406 
12407 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12408 %{
12409   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12410   ins_cost(INSN_COST);
12411   format %{ "add  $dst, $src1, sxth $src2" %}
12412 
12413    ins_encode %{
12414      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12415             as_Register($src2$$reg), ext::sxth);
12416    %}
12417   ins_pipe(ialu_reg_reg);
12418 %}
12419 
12420 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12421 %{
12422   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12423   ins_cost(INSN_COST);
12424   format %{ "add  $dst, $src1, sxtw $src2" %}
12425 
12426    ins_encode %{
12427      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12428             as_Register($src2$$reg), ext::sxtw);
12429    %}
12430   ins_pipe(ialu_reg_reg);
12431 %}
12432 
12433 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12434 %{
12435   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12436   ins_cost(INSN_COST);
12437   format %{ "add  $dst, $src1, sxtb $src2" %}
12438 
12439    ins_encode %{
12440      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12441             as_Register($src2$$reg), ext::sxtb);
12442    %}
12443   ins_pipe(ialu_reg_reg);
12444 %}
12445 
12446 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12447 %{
12448   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12449   ins_cost(INSN_COST);
12450   format %{ "add  $dst, $src1, uxtb $src2" %}
12451 
12452    ins_encode %{
12453      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12454             as_Register($src2$$reg), ext::uxtb);
12455    %}
12456   ins_pipe(ialu_reg_reg);
12457 %}
12458 
12459 
12460 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12461 %{
12462   match(Set dst (AddI src1 (AndI src2 mask)));
12463   ins_cost(INSN_COST);
12464   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12465 
12466    ins_encode %{
12467      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12468             as_Register($src2$$reg), ext::uxtb);
12469    %}
12470   ins_pipe(ialu_reg_reg);
12471 %}
12472 
12473 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12474 %{
12475   match(Set dst (AddI src1 (AndI src2 mask)));
12476   ins_cost(INSN_COST);
12477   format %{ "addw  $dst, $src1, $src2, uxth" %}
12478 
12479    ins_encode %{
12480      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12481             as_Register($src2$$reg), ext::uxth);
12482    %}
12483   ins_pipe(ialu_reg_reg);
12484 %}
12485 
12486 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12487 %{
12488   match(Set dst (AddL src1 (AndL src2 mask)));
12489   ins_cost(INSN_COST);
12490   format %{ "add  $dst, $src1, $src2, uxtb" %}
12491 
12492    ins_encode %{
12493      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12494             as_Register($src2$$reg), ext::uxtb);
12495    %}
12496   ins_pipe(ialu_reg_reg);
12497 %}
12498 
12499 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12500 %{
12501   match(Set dst (AddL src1 (AndL src2 mask)));
12502   ins_cost(INSN_COST);
12503   format %{ "add  $dst, $src1, $src2, uxth" %}
12504 
12505    ins_encode %{
12506      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12507             as_Register($src2$$reg), ext::uxth);
12508    %}
12509   ins_pipe(ialu_reg_reg);
12510 %}
12511 
12512 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12513 %{
12514   match(Set dst (AddL src1 (AndL src2 mask)));
12515   ins_cost(INSN_COST);
12516   format %{ "add  $dst, $src1, $src2, uxtw" %}
12517 
12518    ins_encode %{
12519      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12520             as_Register($src2$$reg), ext::uxtw);
12521    %}
12522   ins_pipe(ialu_reg_reg);
12523 %}
12524 
12525 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12526 %{
12527   match(Set dst (SubI src1 (AndI src2 mask)));
12528   ins_cost(INSN_COST);
12529   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12530 
12531    ins_encode %{
12532      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12533             as_Register($src2$$reg), ext::uxtb);
12534    %}
12535   ins_pipe(ialu_reg_reg);
12536 %}
12537 
12538 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12539 %{
12540   match(Set dst (SubI src1 (AndI src2 mask)));
12541   ins_cost(INSN_COST);
12542   format %{ "subw  $dst, $src1, $src2, uxth" %}
12543 
12544    ins_encode %{
12545      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12546             as_Register($src2$$reg), ext::uxth);
12547    %}
12548   ins_pipe(ialu_reg_reg);
12549 %}
12550 
12551 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12552 %{
12553   match(Set dst (SubL src1 (AndL src2 mask)));
12554   ins_cost(INSN_COST);
12555   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12556 
12557    ins_encode %{
12558      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12559             as_Register($src2$$reg), ext::uxtb);
12560    %}
12561   ins_pipe(ialu_reg_reg);
12562 %}
12563 
12564 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12565 %{
12566   match(Set dst (SubL src1 (AndL src2 mask)));
12567   ins_cost(INSN_COST);
12568   format %{ "sub  $dst, $src1, $src2, uxth" %}
12569 
12570    ins_encode %{
12571      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12572             as_Register($src2$$reg), ext::uxth);
12573    %}
12574   ins_pipe(ialu_reg_reg);
12575 %}
12576 
12577 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12578 %{
12579   match(Set dst (SubL src1 (AndL src2 mask)));
12580   ins_cost(INSN_COST);
12581   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12582 
12583    ins_encode %{
12584      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12585             as_Register($src2$$reg), ext::uxtw);
12586    %}
12587   ins_pipe(ialu_reg_reg);
12588 %}
12589 
12590 // END This section of the file is automatically generated. Do not edit --------------
12591 
12592 // ============================================================================
12593 // Floating Point Arithmetic Instructions
12594 
12595 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12596   match(Set dst (AddF src1 src2));
12597 
12598   ins_cost(INSN_COST * 5);
12599   format %{ "fadds   $dst, $src1, $src2" %}
12600 
12601   ins_encode %{
12602     __ fadds(as_FloatRegister($dst$$reg),
12603              as_FloatRegister($src1$$reg),
12604              as_FloatRegister($src2$$reg));
12605   %}
12606 
12607   ins_pipe(fp_dop_reg_reg_s);
12608 %}
12609 
12610 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12611   match(Set dst (AddD src1 src2));
12612 
12613   ins_cost(INSN_COST * 5);
12614   format %{ "faddd   $dst, $src1, $src2" %}
12615 
12616   ins_encode %{
12617     __ faddd(as_FloatRegister($dst$$reg),
12618              as_FloatRegister($src1$$reg),
12619              as_FloatRegister($src2$$reg));
12620   %}
12621 
12622   ins_pipe(fp_dop_reg_reg_d);
12623 %}
12624 
12625 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12626   match(Set dst (SubF src1 src2));
12627 
12628   ins_cost(INSN_COST * 5);
12629   format %{ "fsubs   $dst, $src1, $src2" %}
12630 
12631   ins_encode %{
12632     __ fsubs(as_FloatRegister($dst$$reg),
12633              as_FloatRegister($src1$$reg),
12634              as_FloatRegister($src2$$reg));
12635   %}
12636 
12637   ins_pipe(fp_dop_reg_reg_s);
12638 %}
12639 
12640 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12641   match(Set dst (SubD src1 src2));
12642 
12643   ins_cost(INSN_COST * 5);
12644   format %{ "fsubd   $dst, $src1, $src2" %}
12645 
12646   ins_encode %{
12647     __ fsubd(as_FloatRegister($dst$$reg),
12648              as_FloatRegister($src1$$reg),
12649              as_FloatRegister($src2$$reg));
12650   %}
12651 
12652   ins_pipe(fp_dop_reg_reg_d);
12653 %}
12654 
12655 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12656   match(Set dst (MulF src1 src2));
12657 
12658   ins_cost(INSN_COST * 6);
12659   format %{ "fmuls   $dst, $src1, $src2" %}
12660 
12661   ins_encode %{
12662     __ fmuls(as_FloatRegister($dst$$reg),
12663              as_FloatRegister($src1$$reg),
12664              as_FloatRegister($src2$$reg));
12665   %}
12666 
12667   ins_pipe(fp_dop_reg_reg_s);
12668 %}
12669 
12670 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12671   match(Set dst (MulD src1 src2));
12672 
12673   ins_cost(INSN_COST * 6);
12674   format %{ "fmuld   $dst, $src1, $src2" %}
12675 
12676   ins_encode %{
12677     __ fmuld(as_FloatRegister($dst$$reg),
12678              as_FloatRegister($src1$$reg),
12679              as_FloatRegister($src2$$reg));
12680   %}
12681 
12682   ins_pipe(fp_dop_reg_reg_d);
12683 %}
12684 
12685 // We cannot use these fused mul w add/sub ops because they don't
12686 // produce the same result as the equivalent separated ops
12687 // (essentially they don't round the intermediate result). that's a
12688 // shame. leaving them here in case we can idenitfy cases where it is
12689 // legitimate to use them
12690 
12691 
12692 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12693 //   match(Set dst (AddF (MulF src1 src2) src3));
12694 
12695 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12696 
12697 //   ins_encode %{
12698 //     __ fmadds(as_FloatRegister($dst$$reg),
12699 //              as_FloatRegister($src1$$reg),
12700 //              as_FloatRegister($src2$$reg),
12701 //              as_FloatRegister($src3$$reg));
12702 //   %}
12703 
12704 //   ins_pipe(pipe_class_default);
12705 // %}
12706 
12707 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12708 //   match(Set dst (AddD (MulD src1 src2) src3));
12709 
12710 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12711 
12712 //   ins_encode %{
12713 //     __ fmaddd(as_FloatRegister($dst$$reg),
12714 //              as_FloatRegister($src1$$reg),
12715 //              as_FloatRegister($src2$$reg),
12716 //              as_FloatRegister($src3$$reg));
12717 //   %}
12718 
12719 //   ins_pipe(pipe_class_default);
12720 // %}
12721 
12722 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12723 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12724 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12725 
12726 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12727 
12728 //   ins_encode %{
12729 //     __ fmsubs(as_FloatRegister($dst$$reg),
12730 //               as_FloatRegister($src1$$reg),
12731 //               as_FloatRegister($src2$$reg),
12732 //              as_FloatRegister($src3$$reg));
12733 //   %}
12734 
12735 //   ins_pipe(pipe_class_default);
12736 // %}
12737 
12738 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12739 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12740 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12741 
12742 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12743 
12744 //   ins_encode %{
12745 //     __ fmsubd(as_FloatRegister($dst$$reg),
12746 //               as_FloatRegister($src1$$reg),
12747 //               as_FloatRegister($src2$$reg),
12748 //               as_FloatRegister($src3$$reg));
12749 //   %}
12750 
12751 //   ins_pipe(pipe_class_default);
12752 // %}
12753 
12754 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12755 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12756 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12757 
12758 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12759 
12760 //   ins_encode %{
12761 //     __ fnmadds(as_FloatRegister($dst$$reg),
12762 //                as_FloatRegister($src1$$reg),
12763 //                as_FloatRegister($src2$$reg),
12764 //                as_FloatRegister($src3$$reg));
12765 //   %}
12766 
12767 //   ins_pipe(pipe_class_default);
12768 // %}
12769 
12770 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12771 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12772 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12773 
12774 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12775 
12776 //   ins_encode %{
12777 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12778 //                as_FloatRegister($src1$$reg),
12779 //                as_FloatRegister($src2$$reg),
12780 //                as_FloatRegister($src3$$reg));
12781 //   %}
12782 
12783 //   ins_pipe(pipe_class_default);
12784 // %}
12785 
12786 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12787 //   match(Set dst (SubF (MulF src1 src2) src3));
12788 
12789 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12790 
12791 //   ins_encode %{
12792 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12793 //                as_FloatRegister($src1$$reg),
12794 //                as_FloatRegister($src2$$reg),
12795 //                as_FloatRegister($src3$$reg));
12796 //   %}
12797 
12798 //   ins_pipe(pipe_class_default);
12799 // %}
12800 
12801 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12802 //   match(Set dst (SubD (MulD src1 src2) src3));
12803 
12804 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12805 
12806 //   ins_encode %{
12807 //   // n.b. insn name should be fnmsubd
12808 //     __ fnmsub(as_FloatRegister($dst$$reg),
12809 //                as_FloatRegister($src1$$reg),
12810 //                as_FloatRegister($src2$$reg),
12811 //                as_FloatRegister($src3$$reg));
12812 //   %}
12813 
12814 //   ins_pipe(pipe_class_default);
12815 // %}
12816 
12817 
12818 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12819   match(Set dst (DivF src1  src2));
12820 
12821   ins_cost(INSN_COST * 18);
12822   format %{ "fdivs   $dst, $src1, $src2" %}
12823 
12824   ins_encode %{
12825     __ fdivs(as_FloatRegister($dst$$reg),
12826              as_FloatRegister($src1$$reg),
12827              as_FloatRegister($src2$$reg));
12828   %}
12829 
12830   ins_pipe(fp_div_s);
12831 %}
12832 
12833 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12834   match(Set dst (DivD src1  src2));
12835 
12836   ins_cost(INSN_COST * 32);
12837   format %{ "fdivd   $dst, $src1, $src2" %}
12838 
12839   ins_encode %{
12840     __ fdivd(as_FloatRegister($dst$$reg),
12841              as_FloatRegister($src1$$reg),
12842              as_FloatRegister($src2$$reg));
12843   %}
12844 
12845   ins_pipe(fp_div_d);
12846 %}
12847 
12848 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12849   match(Set dst (NegF src));
12850 
12851   ins_cost(INSN_COST * 3);
12852   format %{ "fneg   $dst, $src" %}
12853 
12854   ins_encode %{
12855     __ fnegs(as_FloatRegister($dst$$reg),
12856              as_FloatRegister($src$$reg));
12857   %}
12858 
12859   ins_pipe(fp_uop_s);
12860 %}
12861 
12862 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12863   match(Set dst (NegD src));
12864 
12865   ins_cost(INSN_COST * 3);
12866   format %{ "fnegd   $dst, $src" %}
12867 
12868   ins_encode %{
12869     __ fnegd(as_FloatRegister($dst$$reg),
12870              as_FloatRegister($src$$reg));
12871   %}
12872 
12873   ins_pipe(fp_uop_d);
12874 %}
12875 
12876 instruct absF_reg(vRegF dst, vRegF src) %{
12877   match(Set dst (AbsF src));
12878 
12879   ins_cost(INSN_COST * 3);
12880   format %{ "fabss   $dst, $src" %}
12881   ins_encode %{
12882     __ fabss(as_FloatRegister($dst$$reg),
12883              as_FloatRegister($src$$reg));
12884   %}
12885 
12886   ins_pipe(fp_uop_s);
12887 %}
12888 
12889 instruct absD_reg(vRegD dst, vRegD src) %{
12890   match(Set dst (AbsD src));
12891 
12892   ins_cost(INSN_COST * 3);
12893   format %{ "fabsd   $dst, $src" %}
12894   ins_encode %{
12895     __ fabsd(as_FloatRegister($dst$$reg),
12896              as_FloatRegister($src$$reg));
12897   %}
12898 
12899   ins_pipe(fp_uop_d);
12900 %}
12901 
12902 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12903   match(Set dst (SqrtD src));
12904 
12905   ins_cost(INSN_COST * 50);
12906   format %{ "fsqrtd  $dst, $src" %}
12907   ins_encode %{
12908     __ fsqrtd(as_FloatRegister($dst$$reg),
12909              as_FloatRegister($src$$reg));
12910   %}
12911 
12912   ins_pipe(fp_div_s);
12913 %}
12914 
12915 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12916   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12917 
12918   ins_cost(INSN_COST * 50);
12919   format %{ "fsqrts  $dst, $src" %}
12920   ins_encode %{
12921     __ fsqrts(as_FloatRegister($dst$$reg),
12922              as_FloatRegister($src$$reg));
12923   %}
12924 
12925   ins_pipe(fp_div_d);
12926 %}
12927 
12928 // ============================================================================
12929 // Logical Instructions
12930 
12931 // Integer Logical Instructions
12932 
12933 // And Instructions
12934 
12935 
12936 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12937   match(Set dst (AndI src1 src2));
12938 
12939   format %{ "andw  $dst, $src1, $src2\t# int" %}
12940 
12941   ins_cost(INSN_COST);
12942   ins_encode %{
12943     __ andw(as_Register($dst$$reg),
12944             as_Register($src1$$reg),
12945             as_Register($src2$$reg));
12946   %}
12947 
12948   ins_pipe(ialu_reg_reg);
12949 %}
12950 
12951 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12952   match(Set dst (AndI src1 src2));
12953 
12954   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12955 
12956   ins_cost(INSN_COST);
12957   ins_encode %{
12958     __ andw(as_Register($dst$$reg),
12959             as_Register($src1$$reg),
12960             (unsigned long)($src2$$constant));
12961   %}
12962 
12963   ins_pipe(ialu_reg_imm);
12964 %}
12965 
12966 // Or Instructions
12967 
12968 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12969   match(Set dst (OrI src1 src2));
12970 
12971   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12972 
12973   ins_cost(INSN_COST);
12974   ins_encode %{
12975     __ orrw(as_Register($dst$$reg),
12976             as_Register($src1$$reg),
12977             as_Register($src2$$reg));
12978   %}
12979 
12980   ins_pipe(ialu_reg_reg);
12981 %}
12982 
12983 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12984   match(Set dst (OrI src1 src2));
12985 
12986   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12987 
12988   ins_cost(INSN_COST);
12989   ins_encode %{
12990     __ orrw(as_Register($dst$$reg),
12991             as_Register($src1$$reg),
12992             (unsigned long)($src2$$constant));
12993   %}
12994 
12995   ins_pipe(ialu_reg_imm);
12996 %}
12997 
12998 // Xor Instructions
12999 
13000 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13001   match(Set dst (XorI src1 src2));
13002 
13003   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13004 
13005   ins_cost(INSN_COST);
13006   ins_encode %{
13007     __ eorw(as_Register($dst$$reg),
13008             as_Register($src1$$reg),
13009             as_Register($src2$$reg));
13010   %}
13011 
13012   ins_pipe(ialu_reg_reg);
13013 %}
13014 
13015 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13016   match(Set dst (XorI src1 src2));
13017 
13018   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13019 
13020   ins_cost(INSN_COST);
13021   ins_encode %{
13022     __ eorw(as_Register($dst$$reg),
13023             as_Register($src1$$reg),
13024             (unsigned long)($src2$$constant));
13025   %}
13026 
13027   ins_pipe(ialu_reg_imm);
13028 %}
13029 
13030 // Long Logical Instructions
13031 // TODO
13032 
13033 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13034   match(Set dst (AndL src1 src2));
13035 
13036   format %{ "and  $dst, $src1, $src2\t# int" %}
13037 
13038   ins_cost(INSN_COST);
13039   ins_encode %{
13040     __ andr(as_Register($dst$$reg),
13041             as_Register($src1$$reg),
13042             as_Register($src2$$reg));
13043   %}
13044 
13045   ins_pipe(ialu_reg_reg);
13046 %}
13047 
13048 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13049   match(Set dst (AndL src1 src2));
13050 
13051   format %{ "and  $dst, $src1, $src2\t# int" %}
13052 
13053   ins_cost(INSN_COST);
13054   ins_encode %{
13055     __ andr(as_Register($dst$$reg),
13056             as_Register($src1$$reg),
13057             (unsigned long)($src2$$constant));
13058   %}
13059 
13060   ins_pipe(ialu_reg_imm);
13061 %}
13062 
13063 // Or Instructions
13064 
13065 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13066   match(Set dst (OrL src1 src2));
13067 
13068   format %{ "orr  $dst, $src1, $src2\t# int" %}
13069 
13070   ins_cost(INSN_COST);
13071   ins_encode %{
13072     __ orr(as_Register($dst$$reg),
13073            as_Register($src1$$reg),
13074            as_Register($src2$$reg));
13075   %}
13076 
13077   ins_pipe(ialu_reg_reg);
13078 %}
13079 
13080 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13081   match(Set dst (OrL src1 src2));
13082 
13083   format %{ "orr  $dst, $src1, $src2\t# int" %}
13084 
13085   ins_cost(INSN_COST);
13086   ins_encode %{
13087     __ orr(as_Register($dst$$reg),
13088            as_Register($src1$$reg),
13089            (unsigned long)($src2$$constant));
13090   %}
13091 
13092   ins_pipe(ialu_reg_imm);
13093 %}
13094 
13095 // Xor Instructions
13096 
13097 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13098   match(Set dst (XorL src1 src2));
13099 
13100   format %{ "eor  $dst, $src1, $src2\t# int" %}
13101 
13102   ins_cost(INSN_COST);
13103   ins_encode %{
13104     __ eor(as_Register($dst$$reg),
13105            as_Register($src1$$reg),
13106            as_Register($src2$$reg));
13107   %}
13108 
13109   ins_pipe(ialu_reg_reg);
13110 %}
13111 
13112 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13113   match(Set dst (XorL src1 src2));
13114 
13115   ins_cost(INSN_COST);
13116   format %{ "eor  $dst, $src1, $src2\t# int" %}
13117 
13118   ins_encode %{
13119     __ eor(as_Register($dst$$reg),
13120            as_Register($src1$$reg),
13121            (unsigned long)($src2$$constant));
13122   %}
13123 
13124   ins_pipe(ialu_reg_imm);
13125 %}
13126 
13127 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13128 %{
13129   match(Set dst (ConvI2L src));
13130 
13131   ins_cost(INSN_COST);
13132   format %{ "sxtw  $dst, $src\t# i2l" %}
13133   ins_encode %{
13134     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13135   %}
13136   ins_pipe(ialu_reg_shift);
13137 %}
13138 
13139 // this pattern occurs in bigmath arithmetic
13140 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13141 %{
13142   match(Set dst (AndL (ConvI2L src) mask));
13143 
13144   ins_cost(INSN_COST);
13145   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13146   ins_encode %{
13147     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13148   %}
13149 
13150   ins_pipe(ialu_reg_shift);
13151 %}
13152 
13153 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13154   match(Set dst (ConvL2I src));
13155 
13156   ins_cost(INSN_COST);
13157   format %{ "movw  $dst, $src \t// l2i" %}
13158 
13159   ins_encode %{
13160     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13161   %}
13162 
13163   ins_pipe(ialu_reg);
13164 %}
13165 
13166 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13167 %{
13168   match(Set dst (Conv2B src));
13169   effect(KILL cr);
13170 
13171   format %{
13172     "cmpw $src, zr\n\t"
13173     "cset $dst, ne"
13174   %}
13175 
13176   ins_encode %{
13177     __ cmpw(as_Register($src$$reg), zr);
13178     __ cset(as_Register($dst$$reg), Assembler::NE);
13179   %}
13180 
13181   ins_pipe(ialu_reg);
13182 %}
13183 
13184 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13185 %{
13186   match(Set dst (Conv2B src));
13187   effect(KILL cr);
13188 
13189   format %{
13190     "cmp  $src, zr\n\t"
13191     "cset $dst, ne"
13192   %}
13193 
13194   ins_encode %{
13195     __ cmp(as_Register($src$$reg), zr);
13196     __ cset(as_Register($dst$$reg), Assembler::NE);
13197   %}
13198 
13199   ins_pipe(ialu_reg);
13200 %}
13201 
13202 instruct convD2F_reg(vRegF dst, vRegD src) %{
13203   match(Set dst (ConvD2F src));
13204 
13205   ins_cost(INSN_COST * 5);
13206   format %{ "fcvtd  $dst, $src \t// d2f" %}
13207 
13208   ins_encode %{
13209     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13210   %}
13211 
13212   ins_pipe(fp_d2f);
13213 %}
13214 
13215 instruct convF2D_reg(vRegD dst, vRegF src) %{
13216   match(Set dst (ConvF2D src));
13217 
13218   ins_cost(INSN_COST * 5);
13219   format %{ "fcvts  $dst, $src \t// f2d" %}
13220 
13221   ins_encode %{
13222     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13223   %}
13224 
13225   ins_pipe(fp_f2d);
13226 %}
13227 
13228 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13229   match(Set dst (ConvF2I src));
13230 
13231   ins_cost(INSN_COST * 5);
13232   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13233 
13234   ins_encode %{
13235     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13236   %}
13237 
13238   ins_pipe(fp_f2i);
13239 %}
13240 
13241 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13242   match(Set dst (ConvF2L src));
13243 
13244   ins_cost(INSN_COST * 5);
13245   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13246 
13247   ins_encode %{
13248     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13249   %}
13250 
13251   ins_pipe(fp_f2l);
13252 %}
13253 
13254 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13255   match(Set dst (ConvI2F src));
13256 
13257   ins_cost(INSN_COST * 5);
13258   format %{ "scvtfws  $dst, $src \t// i2f" %}
13259 
13260   ins_encode %{
13261     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13262   %}
13263 
13264   ins_pipe(fp_i2f);
13265 %}
13266 
13267 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13268   match(Set dst (ConvL2F src));
13269 
13270   ins_cost(INSN_COST * 5);
13271   format %{ "scvtfs  $dst, $src \t// l2f" %}
13272 
13273   ins_encode %{
13274     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13275   %}
13276 
13277   ins_pipe(fp_l2f);
13278 %}
13279 
13280 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13281   match(Set dst (ConvD2I src));
13282 
13283   ins_cost(INSN_COST * 5);
13284   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13285 
13286   ins_encode %{
13287     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13288   %}
13289 
13290   ins_pipe(fp_d2i);
13291 %}
13292 
13293 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13294   match(Set dst (ConvD2L src));
13295 
13296   ins_cost(INSN_COST * 5);
13297   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13298 
13299   ins_encode %{
13300     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13301   %}
13302 
13303   ins_pipe(fp_d2l);
13304 %}
13305 
13306 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13307   match(Set dst (ConvI2D src));
13308 
13309   ins_cost(INSN_COST * 5);
13310   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13311 
13312   ins_encode %{
13313     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13314   %}
13315 
13316   ins_pipe(fp_i2d);
13317 %}
13318 
13319 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13320   match(Set dst (ConvL2D src));
13321 
13322   ins_cost(INSN_COST * 5);
13323   format %{ "scvtfd  $dst, $src \t// l2d" %}
13324 
13325   ins_encode %{
13326     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13327   %}
13328 
13329   ins_pipe(fp_l2d);
13330 %}
13331 
13332 // stack <-> reg and reg <-> reg shuffles with no conversion
13333 
13334 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13335 
13336   match(Set dst (MoveF2I src));
13337 
13338   effect(DEF dst, USE src);
13339 
13340   ins_cost(4 * INSN_COST);
13341 
13342   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13343 
13344   ins_encode %{
13345     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13346   %}
13347 
13348   ins_pipe(iload_reg_reg);
13349 
13350 %}
13351 
13352 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13353 
13354   match(Set dst (MoveI2F src));
13355 
13356   effect(DEF dst, USE src);
13357 
13358   ins_cost(4 * INSN_COST);
13359 
13360   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13361 
13362   ins_encode %{
13363     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13364   %}
13365 
13366   ins_pipe(pipe_class_memory);
13367 
13368 %}
13369 
13370 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13371 
13372   match(Set dst (MoveD2L src));
13373 
13374   effect(DEF dst, USE src);
13375 
13376   ins_cost(4 * INSN_COST);
13377 
13378   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13379 
13380   ins_encode %{
13381     __ ldr($dst$$Register, Address(sp, $src$$disp));
13382   %}
13383 
13384   ins_pipe(iload_reg_reg);
13385 
13386 %}
13387 
13388 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13389 
13390   match(Set dst (MoveL2D src));
13391 
13392   effect(DEF dst, USE src);
13393 
13394   ins_cost(4 * INSN_COST);
13395 
13396   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13397 
13398   ins_encode %{
13399     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13400   %}
13401 
13402   ins_pipe(pipe_class_memory);
13403 
13404 %}
13405 
13406 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13407 
13408   match(Set dst (MoveF2I src));
13409 
13410   effect(DEF dst, USE src);
13411 
13412   ins_cost(INSN_COST);
13413 
13414   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13415 
13416   ins_encode %{
13417     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13418   %}
13419 
13420   ins_pipe(pipe_class_memory);
13421 
13422 %}
13423 
13424 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13425 
13426   match(Set dst (MoveI2F src));
13427 
13428   effect(DEF dst, USE src);
13429 
13430   ins_cost(INSN_COST);
13431 
13432   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13433 
13434   ins_encode %{
13435     __ strw($src$$Register, Address(sp, $dst$$disp));
13436   %}
13437 
13438   ins_pipe(istore_reg_reg);
13439 
13440 %}
13441 
13442 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13443 
13444   match(Set dst (MoveD2L src));
13445 
13446   effect(DEF dst, USE src);
13447 
13448   ins_cost(INSN_COST);
13449 
13450   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13451 
13452   ins_encode %{
13453     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13454   %}
13455 
13456   ins_pipe(pipe_class_memory);
13457 
13458 %}
13459 
13460 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13461 
13462   match(Set dst (MoveL2D src));
13463 
13464   effect(DEF dst, USE src);
13465 
13466   ins_cost(INSN_COST);
13467 
13468   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13469 
13470   ins_encode %{
13471     __ str($src$$Register, Address(sp, $dst$$disp));
13472   %}
13473 
13474   ins_pipe(istore_reg_reg);
13475 
13476 %}
13477 
13478 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13479 
13480   match(Set dst (MoveF2I src));
13481 
13482   effect(DEF dst, USE src);
13483 
13484   ins_cost(INSN_COST);
13485 
13486   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13487 
13488   ins_encode %{
13489     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13490   %}
13491 
13492   ins_pipe(fp_f2i);
13493 
13494 %}
13495 
13496 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13497 
13498   match(Set dst (MoveI2F src));
13499 
13500   effect(DEF dst, USE src);
13501 
13502   ins_cost(INSN_COST);
13503 
13504   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13505 
13506   ins_encode %{
13507     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13508   %}
13509 
13510   ins_pipe(fp_i2f);
13511 
13512 %}
13513 
13514 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13515 
13516   match(Set dst (MoveD2L src));
13517 
13518   effect(DEF dst, USE src);
13519 
13520   ins_cost(INSN_COST);
13521 
13522   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13523 
13524   ins_encode %{
13525     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13526   %}
13527 
13528   ins_pipe(fp_d2l);
13529 
13530 %}
13531 
13532 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13533 
13534   match(Set dst (MoveL2D src));
13535 
13536   effect(DEF dst, USE src);
13537 
13538   ins_cost(INSN_COST);
13539 
13540   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13541 
13542   ins_encode %{
13543     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13544   %}
13545 
13546   ins_pipe(fp_l2d);
13547 
13548 %}
13549 
13550 // ============================================================================
13551 // clearing of an array
13552 
13553 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13554 %{
13555   match(Set dummy (ClearArray cnt base));
13556   effect(USE_KILL cnt, USE_KILL base);
13557 
13558   ins_cost(4 * INSN_COST);
13559   format %{ "ClearArray $cnt, $base" %}
13560 
13561   ins_encode %{
13562     __ zero_words($base$$Register, $cnt$$Register);
13563   %}
13564 
13565   ins_pipe(pipe_class_memory);
13566 %}
13567 
13568 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
13569 %{
13570   match(Set dummy (ClearArray cnt base));
13571   effect(USE_KILL base, TEMP tmp);
13572 
13573   ins_cost(4 * INSN_COST);
13574   format %{ "ClearArray $cnt, $base" %}
13575 
13576   ins_encode %{
13577     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13578   %}
13579 
13580   ins_pipe(pipe_class_memory);
13581 %}
13582 
13583 // ============================================================================
13584 // Overflow Math Instructions
13585 
13586 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13587 %{
13588   match(Set cr (OverflowAddI op1 op2));
13589 
13590   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13591   ins_cost(INSN_COST);
13592   ins_encode %{
13593     __ cmnw($op1$$Register, $op2$$Register);
13594   %}
13595 
13596   ins_pipe(icmp_reg_reg);
13597 %}
13598 
13599 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13600 %{
13601   match(Set cr (OverflowAddI op1 op2));
13602 
13603   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13604   ins_cost(INSN_COST);
13605   ins_encode %{
13606     __ cmnw($op1$$Register, $op2$$constant);
13607   %}
13608 
13609   ins_pipe(icmp_reg_imm);
13610 %}
13611 
13612 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13613 %{
13614   match(Set cr (OverflowAddL op1 op2));
13615 
13616   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13617   ins_cost(INSN_COST);
13618   ins_encode %{
13619     __ cmn($op1$$Register, $op2$$Register);
13620   %}
13621 
13622   ins_pipe(icmp_reg_reg);
13623 %}
13624 
13625 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13626 %{
13627   match(Set cr (OverflowAddL op1 op2));
13628 
13629   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13630   ins_cost(INSN_COST);
13631   ins_encode %{
13632     __ cmn($op1$$Register, $op2$$constant);
13633   %}
13634 
13635   ins_pipe(icmp_reg_imm);
13636 %}
13637 
13638 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13639 %{
13640   match(Set cr (OverflowSubI op1 op2));
13641 
13642   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13643   ins_cost(INSN_COST);
13644   ins_encode %{
13645     __ cmpw($op1$$Register, $op2$$Register);
13646   %}
13647 
13648   ins_pipe(icmp_reg_reg);
13649 %}
13650 
13651 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13652 %{
13653   match(Set cr (OverflowSubI op1 op2));
13654 
13655   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13656   ins_cost(INSN_COST);
13657   ins_encode %{
13658     __ cmpw($op1$$Register, $op2$$constant);
13659   %}
13660 
13661   ins_pipe(icmp_reg_imm);
13662 %}
13663 
13664 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13665 %{
13666   match(Set cr (OverflowSubL op1 op2));
13667 
13668   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13669   ins_cost(INSN_COST);
13670   ins_encode %{
13671     __ cmp($op1$$Register, $op2$$Register);
13672   %}
13673 
13674   ins_pipe(icmp_reg_reg);
13675 %}
13676 
13677 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13678 %{
13679   match(Set cr (OverflowSubL op1 op2));
13680 
13681   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13682   ins_cost(INSN_COST);
13683   ins_encode %{
13684     __ cmp($op1$$Register, $op2$$constant);
13685   %}
13686 
13687   ins_pipe(icmp_reg_imm);
13688 %}
13689 
13690 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13691 %{
13692   match(Set cr (OverflowSubI zero op1));
13693 
13694   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13695   ins_cost(INSN_COST);
13696   ins_encode %{
13697     __ cmpw(zr, $op1$$Register);
13698   %}
13699 
13700   ins_pipe(icmp_reg_imm);
13701 %}
13702 
13703 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13704 %{
13705   match(Set cr (OverflowSubL zero op1));
13706 
13707   format %{ "cmp   zr, $op1\t# overflow check long" %}
13708   ins_cost(INSN_COST);
13709   ins_encode %{
13710     __ cmp(zr, $op1$$Register);
13711   %}
13712 
13713   ins_pipe(icmp_reg_imm);
13714 %}
13715 
13716 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13717 %{
13718   match(Set cr (OverflowMulI op1 op2));
13719 
13720   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13721             "cmp   rscratch1, rscratch1, sxtw\n\t"
13722             "movw  rscratch1, #0x80000000\n\t"
13723             "cselw rscratch1, rscratch1, zr, NE\n\t"
13724             "cmpw  rscratch1, #1" %}
13725   ins_cost(5 * INSN_COST);
13726   ins_encode %{
13727     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13728     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13729     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13730     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13731     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13732   %}
13733 
13734   ins_pipe(pipe_slow);
13735 %}
13736 
13737 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13738 %{
13739   match(If cmp (OverflowMulI op1 op2));
13740   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13741             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13742   effect(USE labl, KILL cr);
13743 
13744   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13745             "cmp   rscratch1, rscratch1, sxtw\n\t"
13746             "b$cmp   $labl" %}
13747   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13748   ins_encode %{
13749     Label* L = $labl$$label;
13750     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13751     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13752     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13753     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13754   %}
13755 
13756   ins_pipe(pipe_serial);
13757 %}
13758 
13759 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13760 %{
13761   match(Set cr (OverflowMulL op1 op2));
13762 
13763   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13764             "smulh rscratch2, $op1, $op2\n\t"
13765             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13766             "movw  rscratch1, #0x80000000\n\t"
13767             "cselw rscratch1, rscratch1, zr, NE\n\t"
13768             "cmpw  rscratch1, #1" %}
13769   ins_cost(6 * INSN_COST);
13770   ins_encode %{
13771     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13772     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13773     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13774     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13775     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13776     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13777   %}
13778 
13779   ins_pipe(pipe_slow);
13780 %}
13781 
13782 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13783 %{
13784   match(If cmp (OverflowMulL op1 op2));
13785   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13786             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13787   effect(USE labl, KILL cr);
13788 
13789   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13790             "smulh rscratch2, $op1, $op2\n\t"
13791             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13792             "b$cmp $labl" %}
13793   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13794   ins_encode %{
13795     Label* L = $labl$$label;
13796     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13797     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13798     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13799     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13800     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13801   %}
13802 
13803   ins_pipe(pipe_serial);
13804 %}
13805 
13806 // ============================================================================
13807 // Compare Instructions
13808 
13809 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13810 %{
13811   match(Set cr (CmpI op1 op2));
13812 
13813   effect(DEF cr, USE op1, USE op2);
13814 
13815   ins_cost(INSN_COST);
13816   format %{ "cmpw  $op1, $op2" %}
13817 
13818   ins_encode(aarch64_enc_cmpw(op1, op2));
13819 
13820   ins_pipe(icmp_reg_reg);
13821 %}
13822 
13823 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13824 %{
13825   match(Set cr (CmpI op1 zero));
13826 
13827   effect(DEF cr, USE op1);
13828 
13829   ins_cost(INSN_COST);
13830   format %{ "cmpw $op1, 0" %}
13831 
13832   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13833 
13834   ins_pipe(icmp_reg_imm);
13835 %}
13836 
13837 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13838 %{
13839   match(Set cr (CmpI op1 op2));
13840 
13841   effect(DEF cr, USE op1);
13842 
13843   ins_cost(INSN_COST);
13844   format %{ "cmpw  $op1, $op2" %}
13845 
13846   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13847 
13848   ins_pipe(icmp_reg_imm);
13849 %}
13850 
13851 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13852 %{
13853   match(Set cr (CmpI op1 op2));
13854 
13855   effect(DEF cr, USE op1);
13856 
13857   ins_cost(INSN_COST * 2);
13858   format %{ "cmpw  $op1, $op2" %}
13859 
13860   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13861 
13862   ins_pipe(icmp_reg_imm);
13863 %}
13864 
13865 // Unsigned compare Instructions; really, same as signed compare
13866 // except it should only be used to feed an If or a CMovI which takes a
13867 // cmpOpU.
13868 
13869 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13870 %{
13871   match(Set cr (CmpU op1 op2));
13872 
13873   effect(DEF cr, USE op1, USE op2);
13874 
13875   ins_cost(INSN_COST);
13876   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13877 
13878   ins_encode(aarch64_enc_cmpw(op1, op2));
13879 
13880   ins_pipe(icmp_reg_reg);
13881 %}
13882 
13883 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13884 %{
13885   match(Set cr (CmpU op1 zero));
13886 
13887   effect(DEF cr, USE op1);
13888 
13889   ins_cost(INSN_COST);
13890   format %{ "cmpw $op1, #0\t# unsigned" %}
13891 
13892   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13893 
13894   ins_pipe(icmp_reg_imm);
13895 %}
13896 
13897 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13898 %{
13899   match(Set cr (CmpU op1 op2));
13900 
13901   effect(DEF cr, USE op1);
13902 
13903   ins_cost(INSN_COST);
13904   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13905 
13906   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13907 
13908   ins_pipe(icmp_reg_imm);
13909 %}
13910 
13911 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13912 %{
13913   match(Set cr (CmpU op1 op2));
13914 
13915   effect(DEF cr, USE op1);
13916 
13917   ins_cost(INSN_COST * 2);
13918   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13919 
13920   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13921 
13922   ins_pipe(icmp_reg_imm);
13923 %}
13924 
13925 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13926 %{
13927   match(Set cr (CmpL op1 op2));
13928 
13929   effect(DEF cr, USE op1, USE op2);
13930 
13931   ins_cost(INSN_COST);
13932   format %{ "cmp  $op1, $op2" %}
13933 
13934   ins_encode(aarch64_enc_cmp(op1, op2));
13935 
13936   ins_pipe(icmp_reg_reg);
13937 %}
13938 
13939 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13940 %{
13941   match(Set cr (CmpL op1 zero));
13942 
13943   effect(DEF cr, USE op1);
13944 
13945   ins_cost(INSN_COST);
13946   format %{ "tst  $op1" %}
13947 
13948   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13949 
13950   ins_pipe(icmp_reg_imm);
13951 %}
13952 
13953 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13954 %{
13955   match(Set cr (CmpL op1 op2));
13956 
13957   effect(DEF cr, USE op1);
13958 
13959   ins_cost(INSN_COST);
13960   format %{ "cmp  $op1, $op2" %}
13961 
13962   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13963 
13964   ins_pipe(icmp_reg_imm);
13965 %}
13966 
13967 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13968 %{
13969   match(Set cr (CmpL op1 op2));
13970 
13971   effect(DEF cr, USE op1);
13972 
13973   ins_cost(INSN_COST * 2);
13974   format %{ "cmp  $op1, $op2" %}
13975 
13976   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13977 
13978   ins_pipe(icmp_reg_imm);
13979 %}
13980 
13981 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13982 %{
13983   match(Set cr (CmpP op1 op2));
13984 
13985   effect(DEF cr, USE op1, USE op2);
13986 
13987   ins_cost(INSN_COST);
13988   format %{ "cmp  $op1, $op2\t // ptr" %}
13989 
13990   ins_encode(aarch64_enc_cmpp(op1, op2));
13991 
13992   ins_pipe(icmp_reg_reg);
13993 %}
13994 
13995 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13996 %{
13997   match(Set cr (CmpN op1 op2));
13998 
13999   effect(DEF cr, USE op1, USE op2);
14000 
14001   ins_cost(INSN_COST);
14002   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14003 
14004   ins_encode(aarch64_enc_cmpn(op1, op2));
14005 
14006   ins_pipe(icmp_reg_reg);
14007 %}
14008 
14009 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14010 %{
14011   match(Set cr (CmpP op1 zero));
14012 
14013   effect(DEF cr, USE op1, USE zero);
14014 
14015   ins_cost(INSN_COST);
14016   format %{ "cmp  $op1, 0\t // ptr" %}
14017 
14018   ins_encode(aarch64_enc_testp(op1));
14019 
14020   ins_pipe(icmp_reg_imm);
14021 %}
14022 
14023 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14024 %{
14025   match(Set cr (CmpN op1 zero));
14026 
14027   effect(DEF cr, USE op1, USE zero);
14028 
14029   ins_cost(INSN_COST);
14030   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14031 
14032   ins_encode(aarch64_enc_testn(op1));
14033 
14034   ins_pipe(icmp_reg_imm);
14035 %}
14036 
14037 // FP comparisons
14038 //
14039 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14040 // using normal cmpOp. See declaration of rFlagsReg for details.
14041 
14042 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14043 %{
14044   match(Set cr (CmpF src1 src2));
14045 
14046   ins_cost(3 * INSN_COST);
14047   format %{ "fcmps $src1, $src2" %}
14048 
14049   ins_encode %{
14050     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14051   %}
14052 
14053   ins_pipe(pipe_class_compare);
14054 %}
14055 
14056 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14057 %{
14058   match(Set cr (CmpF src1 src2));
14059 
14060   ins_cost(3 * INSN_COST);
14061   format %{ "fcmps $src1, 0.0" %}
14062 
14063   ins_encode %{
14064     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14065   %}
14066 
14067   ins_pipe(pipe_class_compare);
14068 %}
14069 // FROM HERE
14070 
14071 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14072 %{
14073   match(Set cr (CmpD src1 src2));
14074 
14075   ins_cost(3 * INSN_COST);
14076   format %{ "fcmpd $src1, $src2" %}
14077 
14078   ins_encode %{
14079     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14080   %}
14081 
14082   ins_pipe(pipe_class_compare);
14083 %}
14084 
14085 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14086 %{
14087   match(Set cr (CmpD src1 src2));
14088 
14089   ins_cost(3 * INSN_COST);
14090   format %{ "fcmpd $src1, 0.0" %}
14091 
14092   ins_encode %{
14093     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14094   %}
14095 
14096   ins_pipe(pipe_class_compare);
14097 %}
14098 
14099 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14100 %{
14101   match(Set dst (CmpF3 src1 src2));
14102   effect(KILL cr);
14103 
14104   ins_cost(5 * INSN_COST);
14105   format %{ "fcmps $src1, $src2\n\t"
14106             "csinvw($dst, zr, zr, eq\n\t"
14107             "csnegw($dst, $dst, $dst, lt)"
14108   %}
14109 
14110   ins_encode %{
14111     Label done;
14112     FloatRegister s1 = as_FloatRegister($src1$$reg);
14113     FloatRegister s2 = as_FloatRegister($src2$$reg);
14114     Register d = as_Register($dst$$reg);
14115     __ fcmps(s1, s2);
14116     // installs 0 if EQ else -1
14117     __ csinvw(d, zr, zr, Assembler::EQ);
14118     // keeps -1 if less or unordered else installs 1
14119     __ csnegw(d, d, d, Assembler::LT);
14120     __ bind(done);
14121   %}
14122 
14123   ins_pipe(pipe_class_default);
14124 
14125 %}
14126 
14127 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14128 %{
14129   match(Set dst (CmpD3 src1 src2));
14130   effect(KILL cr);
14131 
14132   ins_cost(5 * INSN_COST);
14133   format %{ "fcmpd $src1, $src2\n\t"
14134             "csinvw($dst, zr, zr, eq\n\t"
14135             "csnegw($dst, $dst, $dst, lt)"
14136   %}
14137 
14138   ins_encode %{
14139     Label done;
14140     FloatRegister s1 = as_FloatRegister($src1$$reg);
14141     FloatRegister s2 = as_FloatRegister($src2$$reg);
14142     Register d = as_Register($dst$$reg);
14143     __ fcmpd(s1, s2);
14144     // installs 0 if EQ else -1
14145     __ csinvw(d, zr, zr, Assembler::EQ);
14146     // keeps -1 if less or unordered else installs 1
14147     __ csnegw(d, d, d, Assembler::LT);
14148     __ bind(done);
14149   %}
14150   ins_pipe(pipe_class_default);
14151 
14152 %}
14153 
14154 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14155 %{
14156   match(Set dst (CmpF3 src1 zero));
14157   effect(KILL cr);
14158 
14159   ins_cost(5 * INSN_COST);
14160   format %{ "fcmps $src1, 0.0\n\t"
14161             "csinvw($dst, zr, zr, eq\n\t"
14162             "csnegw($dst, $dst, $dst, lt)"
14163   %}
14164 
14165   ins_encode %{
14166     Label done;
14167     FloatRegister s1 = as_FloatRegister($src1$$reg);
14168     Register d = as_Register($dst$$reg);
14169     __ fcmps(s1, 0.0D);
14170     // installs 0 if EQ else -1
14171     __ csinvw(d, zr, zr, Assembler::EQ);
14172     // keeps -1 if less or unordered else installs 1
14173     __ csnegw(d, d, d, Assembler::LT);
14174     __ bind(done);
14175   %}
14176 
14177   ins_pipe(pipe_class_default);
14178 
14179 %}
14180 
14181 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14182 %{
14183   match(Set dst (CmpD3 src1 zero));
14184   effect(KILL cr);
14185 
14186   ins_cost(5 * INSN_COST);
14187   format %{ "fcmpd $src1, 0.0\n\t"
14188             "csinvw($dst, zr, zr, eq\n\t"
14189             "csnegw($dst, $dst, $dst, lt)"
14190   %}
14191 
14192   ins_encode %{
14193     Label done;
14194     FloatRegister s1 = as_FloatRegister($src1$$reg);
14195     Register d = as_Register($dst$$reg);
14196     __ fcmpd(s1, 0.0D);
14197     // installs 0 if EQ else -1
14198     __ csinvw(d, zr, zr, Assembler::EQ);
14199     // keeps -1 if less or unordered else installs 1
14200     __ csnegw(d, d, d, Assembler::LT);
14201     __ bind(done);
14202   %}
14203   ins_pipe(pipe_class_default);
14204 
14205 %}
14206 
14207 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14208 %{
14209   match(Set dst (CmpLTMask p q));
14210   effect(KILL cr);
14211 
14212   ins_cost(3 * INSN_COST);
14213 
14214   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14215             "csetw $dst, lt\n\t"
14216             "subw $dst, zr, $dst"
14217   %}
14218 
14219   ins_encode %{
14220     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14221     __ csetw(as_Register($dst$$reg), Assembler::LT);
14222     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14223   %}
14224 
14225   ins_pipe(ialu_reg_reg);
14226 %}
14227 
14228 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14229 %{
14230   match(Set dst (CmpLTMask src zero));
14231   effect(KILL cr);
14232 
14233   ins_cost(INSN_COST);
14234 
14235   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14236 
14237   ins_encode %{
14238     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14239   %}
14240 
14241   ins_pipe(ialu_reg_shift);
14242 %}
14243 
14244 // ============================================================================
14245 // Max and Min
14246 
14247 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14248 %{
14249   match(Set dst (MinI src1 src2));
14250 
14251   effect(DEF dst, USE src1, USE src2, KILL cr);
14252   size(8);
14253 
14254   ins_cost(INSN_COST * 3);
14255   format %{
14256     "cmpw $src1 $src2\t signed int\n\t"
14257     "cselw $dst, $src1, $src2 lt\t"
14258   %}
14259 
14260   ins_encode %{
14261     __ cmpw(as_Register($src1$$reg),
14262             as_Register($src2$$reg));
14263     __ cselw(as_Register($dst$$reg),
14264              as_Register($src1$$reg),
14265              as_Register($src2$$reg),
14266              Assembler::LT);
14267   %}
14268 
14269   ins_pipe(ialu_reg_reg);
14270 %}
14271 // FROM HERE
14272 
14273 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14274 %{
14275   match(Set dst (MaxI src1 src2));
14276 
14277   effect(DEF dst, USE src1, USE src2, KILL cr);
14278   size(8);
14279 
14280   ins_cost(INSN_COST * 3);
14281   format %{
14282     "cmpw $src1 $src2\t signed int\n\t"
14283     "cselw $dst, $src1, $src2 gt\t"
14284   %}
14285 
14286   ins_encode %{
14287     __ cmpw(as_Register($src1$$reg),
14288             as_Register($src2$$reg));
14289     __ cselw(as_Register($dst$$reg),
14290              as_Register($src1$$reg),
14291              as_Register($src2$$reg),
14292              Assembler::GT);
14293   %}
14294 
14295   ins_pipe(ialu_reg_reg);
14296 %}
14297 
14298 // ============================================================================
14299 // Branch Instructions
14300 
14301 // Direct Branch.
14302 instruct branch(label lbl)
14303 %{
14304   match(Goto);
14305 
14306   effect(USE lbl);
14307 
14308   ins_cost(BRANCH_COST);
14309   format %{ "b  $lbl" %}
14310 
14311   ins_encode(aarch64_enc_b(lbl));
14312 
14313   ins_pipe(pipe_branch);
14314 %}
14315 
14316 // Conditional Near Branch
14317 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14318 %{
14319   // Same match rule as `branchConFar'.
14320   match(If cmp cr);
14321 
14322   effect(USE lbl);
14323 
14324   ins_cost(BRANCH_COST);
14325   // If set to 1 this indicates that the current instruction is a
14326   // short variant of a long branch. This avoids using this
14327   // instruction in first-pass matching. It will then only be used in
14328   // the `Shorten_branches' pass.
14329   // ins_short_branch(1);
14330   format %{ "b$cmp  $lbl" %}
14331 
14332   ins_encode(aarch64_enc_br_con(cmp, lbl));
14333 
14334   ins_pipe(pipe_branch_cond);
14335 %}
14336 
14337 // Conditional Near Branch Unsigned
14338 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14339 %{
14340   // Same match rule as `branchConFar'.
14341   match(If cmp cr);
14342 
14343   effect(USE lbl);
14344 
14345   ins_cost(BRANCH_COST);
14346   // If set to 1 this indicates that the current instruction is a
14347   // short variant of a long branch. This avoids using this
14348   // instruction in first-pass matching. It will then only be used in
14349   // the `Shorten_branches' pass.
14350   // ins_short_branch(1);
14351   format %{ "b$cmp  $lbl\t# unsigned" %}
14352 
14353   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14354 
14355   ins_pipe(pipe_branch_cond);
14356 %}
14357 
14358 // Make use of CBZ and CBNZ.  These instructions, as well as being
14359 // shorter than (cmp; branch), have the additional benefit of not
14360 // killing the flags.
14361 
14362 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14363   match(If cmp (CmpI op1 op2));
14364   effect(USE labl);
14365 
14366   ins_cost(BRANCH_COST);
14367   format %{ "cbw$cmp   $op1, $labl" %}
14368   ins_encode %{
14369     Label* L = $labl$$label;
14370     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14371     if (cond == Assembler::EQ)
14372       __ cbzw($op1$$Register, *L);
14373     else
14374       __ cbnzw($op1$$Register, *L);
14375   %}
14376   ins_pipe(pipe_cmp_branch);
14377 %}
14378 
14379 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14380   match(If cmp (CmpL op1 op2));
14381   effect(USE labl);
14382 
14383   ins_cost(BRANCH_COST);
14384   format %{ "cb$cmp   $op1, $labl" %}
14385   ins_encode %{
14386     Label* L = $labl$$label;
14387     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14388     if (cond == Assembler::EQ)
14389       __ cbz($op1$$Register, *L);
14390     else
14391       __ cbnz($op1$$Register, *L);
14392   %}
14393   ins_pipe(pipe_cmp_branch);
14394 %}
14395 
14396 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14397   match(If cmp (CmpP op1 op2));
14398   effect(USE labl);
14399 
14400   ins_cost(BRANCH_COST);
14401   format %{ "cb$cmp   $op1, $labl" %}
14402   ins_encode %{
14403     Label* L = $labl$$label;
14404     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14405     if (cond == Assembler::EQ)
14406       __ cbz($op1$$Register, *L);
14407     else
14408       __ cbnz($op1$$Register, *L);
14409   %}
14410   ins_pipe(pipe_cmp_branch);
14411 %}
14412 
14413 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14414   match(If cmp (CmpN op1 op2));
14415   effect(USE labl);
14416 
14417   ins_cost(BRANCH_COST);
14418   format %{ "cbw$cmp   $op1, $labl" %}
14419   ins_encode %{
14420     Label* L = $labl$$label;
14421     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14422     if (cond == Assembler::EQ)
14423       __ cbzw($op1$$Register, *L);
14424     else
14425       __ cbnzw($op1$$Register, *L);
14426   %}
14427   ins_pipe(pipe_cmp_branch);
14428 %}
14429 
14430 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14431   match(If cmp (CmpP (DecodeN oop) zero));
14432   effect(USE labl);
14433 
14434   ins_cost(BRANCH_COST);
14435   format %{ "cb$cmp   $oop, $labl" %}
14436   ins_encode %{
14437     Label* L = $labl$$label;
14438     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14439     if (cond == Assembler::EQ)
14440       __ cbzw($oop$$Register, *L);
14441     else
14442       __ cbnzw($oop$$Register, *L);
14443   %}
14444   ins_pipe(pipe_cmp_branch);
14445 %}
14446 
14447 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14448   match(If cmp (CmpU op1 op2));
14449   effect(USE labl);
14450 
14451   ins_cost(BRANCH_COST);
14452   format %{ "cbw$cmp   $op1, $labl" %}
14453   ins_encode %{
14454     Label* L = $labl$$label;
14455     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14456     if (cond == Assembler::EQ || cond == Assembler::LS)
14457       __ cbzw($op1$$Register, *L);
14458     else
14459       __ cbnzw($op1$$Register, *L);
14460   %}
14461   ins_pipe(pipe_cmp_branch);
14462 %}
14463 
14464 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14465   match(If cmp (CmpU op1 op2));
14466   effect(USE labl);
14467 
14468   ins_cost(BRANCH_COST);
14469   format %{ "cb$cmp   $op1, $labl" %}
14470   ins_encode %{
14471     Label* L = $labl$$label;
14472     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14473     if (cond == Assembler::EQ || cond == Assembler::LS)
14474       __ cbz($op1$$Register, *L);
14475     else
14476       __ cbnz($op1$$Register, *L);
14477   %}
14478   ins_pipe(pipe_cmp_branch);
14479 %}
14480 
14481 // Test bit and Branch
14482 
14483 // Patterns for short (< 32KiB) variants
14484 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14485   match(If cmp (CmpL op1 op2));
14486   effect(USE labl);
14487 
14488   ins_cost(BRANCH_COST);
14489   format %{ "cb$cmp   $op1, $labl # long" %}
14490   ins_encode %{
14491     Label* L = $labl$$label;
14492     Assembler::Condition cond =
14493       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14494     __ tbr(cond, $op1$$Register, 63, *L);
14495   %}
14496   ins_pipe(pipe_cmp_branch);
14497   ins_short_branch(1);
14498 %}
14499 
14500 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14501   match(If cmp (CmpI op1 op2));
14502   effect(USE labl);
14503 
14504   ins_cost(BRANCH_COST);
14505   format %{ "cb$cmp   $op1, $labl # int" %}
14506   ins_encode %{
14507     Label* L = $labl$$label;
14508     Assembler::Condition cond =
14509       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14510     __ tbr(cond, $op1$$Register, 31, *L);
14511   %}
14512   ins_pipe(pipe_cmp_branch);
14513   ins_short_branch(1);
14514 %}
14515 
14516 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14517   match(If cmp (CmpL (AndL op1 op2) op3));
14518   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14519   effect(USE labl);
14520 
14521   ins_cost(BRANCH_COST);
14522   format %{ "tb$cmp   $op1, $op2, $labl" %}
14523   ins_encode %{
14524     Label* L = $labl$$label;
14525     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14526     int bit = exact_log2($op2$$constant);
14527     __ tbr(cond, $op1$$Register, bit, *L);
14528   %}
14529   ins_pipe(pipe_cmp_branch);
14530   ins_short_branch(1);
14531 %}
14532 
14533 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14534   match(If cmp (CmpI (AndI op1 op2) op3));
14535   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14536   effect(USE labl);
14537 
14538   ins_cost(BRANCH_COST);
14539   format %{ "tb$cmp   $op1, $op2, $labl" %}
14540   ins_encode %{
14541     Label* L = $labl$$label;
14542     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14543     int bit = exact_log2($op2$$constant);
14544     __ tbr(cond, $op1$$Register, bit, *L);
14545   %}
14546   ins_pipe(pipe_cmp_branch);
14547   ins_short_branch(1);
14548 %}
14549 
14550 // And far variants
14551 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14552   match(If cmp (CmpL op1 op2));
14553   effect(USE labl);
14554 
14555   ins_cost(BRANCH_COST);
14556   format %{ "cb$cmp   $op1, $labl # long" %}
14557   ins_encode %{
14558     Label* L = $labl$$label;
14559     Assembler::Condition cond =
14560       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14561     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14562   %}
14563   ins_pipe(pipe_cmp_branch);
14564 %}
14565 
14566 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14567   match(If cmp (CmpI op1 op2));
14568   effect(USE labl);
14569 
14570   ins_cost(BRANCH_COST);
14571   format %{ "cb$cmp   $op1, $labl # int" %}
14572   ins_encode %{
14573     Label* L = $labl$$label;
14574     Assembler::Condition cond =
14575       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14576     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14577   %}
14578   ins_pipe(pipe_cmp_branch);
14579 %}
14580 
14581 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14582   match(If cmp (CmpL (AndL op1 op2) op3));
14583   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14584   effect(USE labl);
14585 
14586   ins_cost(BRANCH_COST);
14587   format %{ "tb$cmp   $op1, $op2, $labl" %}
14588   ins_encode %{
14589     Label* L = $labl$$label;
14590     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14591     int bit = exact_log2($op2$$constant);
14592     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14593   %}
14594   ins_pipe(pipe_cmp_branch);
14595 %}
14596 
14597 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14598   match(If cmp (CmpI (AndI op1 op2) op3));
14599   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14600   effect(USE labl);
14601 
14602   ins_cost(BRANCH_COST);
14603   format %{ "tb$cmp   $op1, $op2, $labl" %}
14604   ins_encode %{
14605     Label* L = $labl$$label;
14606     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14607     int bit = exact_log2($op2$$constant);
14608     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14609   %}
14610   ins_pipe(pipe_cmp_branch);
14611 %}
14612 
14613 // Test bits
14614 
14615 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14616   match(Set cr (CmpL (AndL op1 op2) op3));
14617   predicate(Assembler::operand_valid_for_logical_immediate
14618             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14619 
14620   ins_cost(INSN_COST);
14621   format %{ "tst $op1, $op2 # long" %}
14622   ins_encode %{
14623     __ tst($op1$$Register, $op2$$constant);
14624   %}
14625   ins_pipe(ialu_reg_reg);
14626 %}
14627 
14628 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14629   match(Set cr (CmpI (AndI op1 op2) op3));
14630   predicate(Assembler::operand_valid_for_logical_immediate
14631             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14632 
14633   ins_cost(INSN_COST);
14634   format %{ "tst $op1, $op2 # int" %}
14635   ins_encode %{
14636     __ tstw($op1$$Register, $op2$$constant);
14637   %}
14638   ins_pipe(ialu_reg_reg);
14639 %}
14640 
14641 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14642   match(Set cr (CmpL (AndL op1 op2) op3));
14643 
14644   ins_cost(INSN_COST);
14645   format %{ "tst $op1, $op2 # long" %}
14646   ins_encode %{
14647     __ tst($op1$$Register, $op2$$Register);
14648   %}
14649   ins_pipe(ialu_reg_reg);
14650 %}
14651 
14652 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14653   match(Set cr (CmpI (AndI op1 op2) op3));
14654 
14655   ins_cost(INSN_COST);
14656   format %{ "tstw $op1, $op2 # int" %}
14657   ins_encode %{
14658     __ tstw($op1$$Register, $op2$$Register);
14659   %}
14660   ins_pipe(ialu_reg_reg);
14661 %}
14662 
14663 
14664 // Conditional Far Branch
14665 // Conditional Far Branch Unsigned
14666 // TODO: fixme
14667 
14668 // counted loop end branch near
14669 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14670 %{
14671   match(CountedLoopEnd cmp cr);
14672 
14673   effect(USE lbl);
14674 
14675   ins_cost(BRANCH_COST);
14676   // short variant.
14677   // ins_short_branch(1);
14678   format %{ "b$cmp $lbl \t// counted loop end" %}
14679 
14680   ins_encode(aarch64_enc_br_con(cmp, lbl));
14681 
14682   ins_pipe(pipe_branch);
14683 %}
14684 
14685 // counted loop end branch near Unsigned
14686 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14687 %{
14688   match(CountedLoopEnd cmp cr);
14689 
14690   effect(USE lbl);
14691 
14692   ins_cost(BRANCH_COST);
14693   // short variant.
14694   // ins_short_branch(1);
14695   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14696 
14697   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14698 
14699   ins_pipe(pipe_branch);
14700 %}
14701 
14702 // counted loop end branch far
14703 // counted loop end branch far unsigned
14704 // TODO: fixme
14705 
14706 // ============================================================================
14707 // inlined locking and unlocking
14708 
14709 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14710 %{
14711   match(Set cr (FastLock object box));
14712   effect(TEMP tmp, TEMP tmp2);
14713 
14714   // TODO
14715   // identify correct cost
14716   ins_cost(5 * INSN_COST);
14717   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14718 
14719   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14720 
14721   ins_pipe(pipe_serial);
14722 %}
14723 
14724 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14725 %{
14726   match(Set cr (FastUnlock object box));
14727   effect(TEMP tmp, TEMP tmp2);
14728 
14729   ins_cost(5 * INSN_COST);
14730   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14731 
14732   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14733 
14734   ins_pipe(pipe_serial);
14735 %}
14736 
14737 
14738 // ============================================================================
14739 // Safepoint Instructions
14740 
14741 // TODO
14742 // provide a near and far version of this code
14743 
14744 instruct safePoint(iRegP poll)
14745 %{
14746   match(SafePoint poll);
14747 
14748   format %{
14749     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14750   %}
14751   ins_encode %{
14752     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14753   %}
14754   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14755 %}
14756 
14757 
14758 // ============================================================================
14759 // Procedure Call/Return Instructions
14760 
14761 // Call Java Static Instruction
14762 
14763 instruct CallStaticJavaDirect(method meth)
14764 %{
14765   match(CallStaticJava);
14766 
14767   effect(USE meth);
14768 
14769   ins_cost(CALL_COST);
14770 
14771   format %{ "call,static $meth \t// ==> " %}
14772 
14773   ins_encode( aarch64_enc_java_static_call(meth),
14774               aarch64_enc_call_epilog );
14775 
14776   ins_pipe(pipe_class_call);
14777 %}
14778 
14779 // TO HERE
14780 
14781 // Call Java Dynamic Instruction
14782 instruct CallDynamicJavaDirect(method meth)
14783 %{
14784   match(CallDynamicJava);
14785 
14786   effect(USE meth);
14787 
14788   ins_cost(CALL_COST);
14789 
14790   format %{ "CALL,dynamic $meth \t// ==> " %}
14791 
14792   ins_encode( aarch64_enc_java_dynamic_call(meth),
14793                aarch64_enc_call_epilog );
14794 
14795   ins_pipe(pipe_class_call);
14796 %}
14797 
14798 // Call Runtime Instruction
14799 
14800 instruct CallRuntimeDirect(method meth)
14801 %{
14802   match(CallRuntime);
14803 
14804   effect(USE meth);
14805 
14806   ins_cost(CALL_COST);
14807 
14808   format %{ "CALL, runtime $meth" %}
14809 
14810   ins_encode( aarch64_enc_java_to_runtime(meth) );
14811 
14812   ins_pipe(pipe_class_call);
14813 %}
14814 
14815 // Call Runtime Instruction
14816 
14817 instruct CallLeafDirect(method meth)
14818 %{
14819   match(CallLeaf);
14820 
14821   effect(USE meth);
14822 
14823   ins_cost(CALL_COST);
14824 
14825   format %{ "CALL, runtime leaf $meth" %}
14826 
14827   ins_encode( aarch64_enc_java_to_runtime(meth) );
14828 
14829   ins_pipe(pipe_class_call);
14830 %}
14831 
14832 // Call Runtime Instruction
14833 
14834 instruct CallLeafNoFPDirect(method meth)
14835 %{
14836   match(CallLeafNoFP);
14837 
14838   effect(USE meth);
14839 
14840   ins_cost(CALL_COST);
14841 
14842   format %{ "CALL, runtime leaf nofp $meth" %}
14843 
14844   ins_encode( aarch64_enc_java_to_runtime(meth) );
14845 
14846   ins_pipe(pipe_class_call);
14847 %}
14848 
14849 // Tail Call; Jump from runtime stub to Java code.
14850 // Also known as an 'interprocedural jump'.
14851 // Target of jump will eventually return to caller.
14852 // TailJump below removes the return address.
14853 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14854 %{
14855   match(TailCall jump_target method_oop);
14856 
14857   ins_cost(CALL_COST);
14858 
14859   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14860 
14861   ins_encode(aarch64_enc_tail_call(jump_target));
14862 
14863   ins_pipe(pipe_class_call);
14864 %}
14865 
14866 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14867 %{
14868   match(TailJump jump_target ex_oop);
14869 
14870   ins_cost(CALL_COST);
14871 
14872   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14873 
14874   ins_encode(aarch64_enc_tail_jmp(jump_target));
14875 
14876   ins_pipe(pipe_class_call);
14877 %}
14878 
14879 // Create exception oop: created by stack-crawling runtime code.
14880 // Created exception is now available to this handler, and is setup
14881 // just prior to jumping to this handler. No code emitted.
14882 // TODO check
14883 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14884 instruct CreateException(iRegP_R0 ex_oop)
14885 %{
14886   match(Set ex_oop (CreateEx));
14887 
14888   format %{ " -- \t// exception oop; no code emitted" %}
14889 
14890   size(0);
14891 
14892   ins_encode( /*empty*/ );
14893 
14894   ins_pipe(pipe_class_empty);
14895 %}
14896 
14897 // Rethrow exception: The exception oop will come in the first
14898 // argument position. Then JUMP (not call) to the rethrow stub code.
14899 instruct RethrowException() %{
14900   match(Rethrow);
14901   ins_cost(CALL_COST);
14902 
14903   format %{ "b rethrow_stub" %}
14904 
14905   ins_encode( aarch64_enc_rethrow() );
14906 
14907   ins_pipe(pipe_class_call);
14908 %}
14909 
14910 
14911 // Return Instruction
14912 // epilog node loads ret address into lr as part of frame pop
14913 instruct Ret()
14914 %{
14915   match(Return);
14916 
14917   format %{ "ret\t// return register" %}
14918 
14919   ins_encode( aarch64_enc_ret() );
14920 
14921   ins_pipe(pipe_branch);
14922 %}
14923 
14924 // Die now.
14925 instruct ShouldNotReachHere() %{
14926   match(Halt);
14927 
14928   ins_cost(CALL_COST);
14929   format %{ "ShouldNotReachHere" %}
14930 
14931   ins_encode %{
14932     // TODO
14933     // implement proper trap call here
14934     __ brk(999);
14935   %}
14936 
14937   ins_pipe(pipe_class_default);
14938 %}
14939 
14940 // ============================================================================
14941 // Partial Subtype Check
14942 //
14943 // superklass array for an instance of the superklass.  Set a hidden
14944 // internal cache on a hit (cache is checked with exposed code in
14945 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14946 // encoding ALSO sets flags.
14947 
14948 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14949 %{
14950   match(Set result (PartialSubtypeCheck sub super));
14951   effect(KILL cr, KILL temp);
14952 
14953   ins_cost(1100);  // slightly larger than the next version
14954   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14955 
14956   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14957 
14958   opcode(0x1); // Force zero of result reg on hit
14959 
14960   ins_pipe(pipe_class_memory);
14961 %}
14962 
14963 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14964 %{
14965   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14966   effect(KILL temp, KILL result);
14967 
14968   ins_cost(1100);  // slightly larger than the next version
14969   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14970 
14971   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14972 
14973   opcode(0x0); // Don't zero result reg on hit
14974 
14975   ins_pipe(pipe_class_memory);
14976 %}
14977 
14978 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14979                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14980 %{
14981   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14982   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14983   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14984 
14985   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14986   ins_encode %{
14987     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14988     __ string_compare($str1$$Register, $str2$$Register,
14989                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14990                       $tmp1$$Register,
14991                       fnoreg, fnoreg, StrIntrinsicNode::UU);
14992   %}
14993   ins_pipe(pipe_class_memory);
14994 %}
14995 
14996 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14997                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14998 %{
14999   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15000   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15001   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15002 
15003   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15004   ins_encode %{
15005     __ string_compare($str1$$Register, $str2$$Register,
15006                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15007                       $tmp1$$Register,
15008                       fnoreg, fnoreg, StrIntrinsicNode::LL);
15009   %}
15010   ins_pipe(pipe_class_memory);
15011 %}
15012 
15013 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15014                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15015 %{
15016   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15017   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15018   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15019 
15020   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15021   ins_encode %{
15022     __ string_compare($str1$$Register, $str2$$Register,
15023                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15024                       $tmp1$$Register,
15025                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
15026   %}
15027   ins_pipe(pipe_class_memory);
15028 %}
15029 
15030 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15031                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15032 %{
15033   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15034   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15035   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15036 
15037   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15038   ins_encode %{
15039     __ string_compare($str1$$Register, $str2$$Register,
15040                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15041                       $tmp1$$Register,
15042                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
15043   %}
15044   ins_pipe(pipe_class_memory);
15045 %}
15046 
15047 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15048        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15049 %{
15050   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15051   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15052   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15053          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15054   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15055 
15056   ins_encode %{
15057     __ string_indexof($str1$$Register, $str2$$Register,
15058                       $cnt1$$Register, $cnt2$$Register,
15059                       $tmp1$$Register, $tmp2$$Register,
15060                       $tmp3$$Register, $tmp4$$Register,
15061                       -1, $result$$Register, StrIntrinsicNode::UU);
15062   %}
15063   ins_pipe(pipe_class_memory);
15064 %}
15065 
15066 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15067        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15068 %{
15069   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15070   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15071   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15072          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15073   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15074 
15075   ins_encode %{
15076     __ string_indexof($str1$$Register, $str2$$Register,
15077                       $cnt1$$Register, $cnt2$$Register,
15078                       $tmp1$$Register, $tmp2$$Register,
15079                       $tmp3$$Register, $tmp4$$Register,
15080                       -1, $result$$Register, StrIntrinsicNode::LL);
15081   %}
15082   ins_pipe(pipe_class_memory);
15083 %}
15084 
15085 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15086        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15087 %{
15088   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15089   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15090   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15091          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15092   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15093 
15094   ins_encode %{
15095     __ string_indexof($str1$$Register, $str2$$Register,
15096                       $cnt1$$Register, $cnt2$$Register,
15097                       $tmp1$$Register, $tmp2$$Register,
15098                       $tmp3$$Register, $tmp4$$Register,
15099                       -1, $result$$Register, StrIntrinsicNode::UL);
15100   %}
15101   ins_pipe(pipe_class_memory);
15102 %}
15103 
15104 instruct string_indexofLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15105        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15106 %{
15107   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
15108   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15109   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15110          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15111   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LU)" %}
15112 
15113   ins_encode %{
15114     __ string_indexof($str1$$Register, $str2$$Register,
15115                       $cnt1$$Register, $cnt2$$Register,
15116                       $tmp1$$Register, $tmp2$$Register,
15117                       $tmp3$$Register, $tmp4$$Register,
15118                       -1, $result$$Register, StrIntrinsicNode::LU);
15119   %}
15120   ins_pipe(pipe_class_memory);
15121 %}
15122 
15123 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15124                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
15125                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15126 %{
15127   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15128   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15129   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15130          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15131   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15132 
15133   ins_encode %{
15134     int icnt2 = (int)$int_cnt2$$constant;
15135     __ string_indexof($str1$$Register, $str2$$Register,
15136                       $cnt1$$Register, zr,
15137                       $tmp1$$Register, $tmp2$$Register,
15138                       $tmp3$$Register, $tmp4$$Register,
15139                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15140   %}
15141   ins_pipe(pipe_class_memory);
15142 %}
15143 
15144 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15145                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
15146                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15147 %{
15148   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15149   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15150   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15151          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15152   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15153 
15154   ins_encode %{
15155     int icnt2 = (int)$int_cnt2$$constant;
15156     __ string_indexof($str1$$Register, $str2$$Register,
15157                       $cnt1$$Register, zr,
15158                       $tmp1$$Register, $tmp2$$Register,
15159                       $tmp3$$Register, $tmp4$$Register,
15160                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15161   %}
15162   ins_pipe(pipe_class_memory);
15163 %}
15164 
15165 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15166                  immI_1 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
15167                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15168 %{
15169   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15170   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15171   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15172          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15173   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15174 
15175   ins_encode %{
15176     int icnt2 = (int)$int_cnt2$$constant;
15177     __ string_indexof($str1$$Register, $str2$$Register,
15178                       $cnt1$$Register, zr,
15179                       $tmp1$$Register, $tmp2$$Register,
15180                       $tmp3$$Register, $tmp4$$Register,
15181                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15182   %}
15183   ins_pipe(pipe_class_memory);
15184 %}
15185 
15186 instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15187                  immI_1 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
15188                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15189 %{
15190   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
15191   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15192   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15193          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15194   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LU)" %}
15195 
15196   ins_encode %{
15197     int icnt2 = (int)$int_cnt2$$constant;
15198     __ string_indexof($str1$$Register, $str2$$Register,
15199                       $cnt1$$Register, zr,
15200                       $tmp1$$Register, $tmp2$$Register,
15201                       $tmp3$$Register, $tmp4$$Register,
15202                       icnt2, $result$$Register, StrIntrinsicNode::LU);
15203   %}
15204   ins_pipe(pipe_class_memory);
15205 %}
15206 
15207 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15208                         iRegI_R0 result, rFlagsReg cr)
15209 %{
15210   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15211   match(Set result (StrEquals (Binary str1 str2) cnt));
15212   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15213 
15214   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15215   ins_encode %{
15216     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15217     __ arrays_equals($str1$$Register, $str2$$Register,
15218                      $result$$Register, $cnt$$Register,
15219                      1, /*is_string*/true);
15220   %}
15221   ins_pipe(pipe_class_memory);
15222 %}
15223 
15224 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15225                         iRegI_R0 result, rFlagsReg cr)
15226 %{
15227   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15228   match(Set result (StrEquals (Binary str1 str2) cnt));
15229   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15230 
15231   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15232   ins_encode %{
15233     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15234     __ asrw($cnt$$Register, $cnt$$Register, 1);
15235     __ arrays_equals($str1$$Register, $str2$$Register,
15236                      $result$$Register, $cnt$$Register,
15237                      2, /*is_string*/true);
15238   %}
15239   ins_pipe(pipe_class_memory);
15240 %}
15241 
15242 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15243                       iRegP_R10 tmp, rFlagsReg cr)
15244 %{
15245   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15246   match(Set result (AryEq ary1 ary2));
15247   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15248 
15249   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15250   ins_encode %{
15251     __ arrays_equals($ary1$$Register, $ary2$$Register,
15252                      $result$$Register, $tmp$$Register,
15253                      1, /*is_string*/false);
15254     %}
15255   ins_pipe(pipe_class_memory);
15256 %}
15257 
15258 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15259                       iRegP_R10 tmp, rFlagsReg cr)
15260 %{
15261   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15262   match(Set result (AryEq ary1 ary2));
15263   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15264 
15265   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15266   ins_encode %{
15267     __ arrays_equals($ary1$$Register, $ary2$$Register,
15268                      $result$$Register, $tmp$$Register,
15269                      2, /*is_string*/false);
15270   %}
15271   ins_pipe(pipe_class_memory);
15272 %}
15273 
15274 
15275 // fast char[] to byte[] compression
15276 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15277                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15278                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15279                          iRegI_R0 result, rFlagsReg cr)
15280 %{
15281   match(Set result (StrCompressedCopy src (Binary dst len)));
15282   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15283 
15284   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15285   ins_encode %{
15286     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15287                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15288                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15289                            $result$$Register);
15290   %}
15291   ins_pipe( pipe_slow );
15292 %}
15293 
15294 // fast byte[] to char[] inflation
15295 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15296                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15297 %{
15298   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15299   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15300 
15301   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15302   ins_encode %{
15303     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15304                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15305   %}
15306   ins_pipe(pipe_class_memory);
15307 %}
15308 
15309 // encode char[] to byte[] in ISO_8859_1
15310 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15311                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15312                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15313                           iRegI_R0 result, rFlagsReg cr)
15314 %{
15315   match(Set result (EncodeISOArray src (Binary dst len)));
15316   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15317          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15318 
15319   format %{ "Encode array $src,$dst,$len -> $result" %}
15320   ins_encode %{
15321     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15322          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15323          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15324   %}
15325   ins_pipe( pipe_class_memory );
15326 %}
15327 
15328 // ============================================================================
15329 // This name is KNOWN by the ADLC and cannot be changed.
15330 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15331 // for this guy.
15332 instruct tlsLoadP(thread_RegP dst)
15333 %{
15334   match(Set dst (ThreadLocal));
15335 
15336   ins_cost(0);
15337 
15338   format %{ " -- \t// $dst=Thread::current(), empty" %}
15339 
15340   size(0);
15341 
15342   ins_encode( /*empty*/ );
15343 
15344   ins_pipe(pipe_class_empty);
15345 %}
15346 
15347 // ====================VECTOR INSTRUCTIONS=====================================
15348 
15349 // Load vector (32 bits)
15350 instruct loadV4(vecD dst, vmem4 mem)
15351 %{
15352   predicate(n->as_LoadVector()->memory_size() == 4);
15353   match(Set dst (LoadVector mem));
15354   ins_cost(4 * INSN_COST);
15355   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15356   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15357   ins_pipe(vload_reg_mem64);
15358 %}
15359 
15360 // Load vector (64 bits)
15361 instruct loadV8(vecD dst, vmem8 mem)
15362 %{
15363   predicate(n->as_LoadVector()->memory_size() == 8);
15364   match(Set dst (LoadVector mem));
15365   ins_cost(4 * INSN_COST);
15366   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15367   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15368   ins_pipe(vload_reg_mem64);
15369 %}
15370 
15371 // Load Vector (128 bits)
15372 instruct loadV16(vecX dst, vmem16 mem)
15373 %{
15374   predicate(n->as_LoadVector()->memory_size() == 16);
15375   match(Set dst (LoadVector mem));
15376   ins_cost(4 * INSN_COST);
15377   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15378   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15379   ins_pipe(vload_reg_mem128);
15380 %}
15381 
15382 // Store Vector (32 bits)
15383 instruct storeV4(vecD src, vmem4 mem)
15384 %{
15385   predicate(n->as_StoreVector()->memory_size() == 4);
15386   match(Set mem (StoreVector mem src));
15387   ins_cost(4 * INSN_COST);
15388   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15389   ins_encode( aarch64_enc_strvS(src, mem) );
15390   ins_pipe(vstore_reg_mem64);
15391 %}
15392 
15393 // Store Vector (64 bits)
15394 instruct storeV8(vecD src, vmem8 mem)
15395 %{
15396   predicate(n->as_StoreVector()->memory_size() == 8);
15397   match(Set mem (StoreVector mem src));
15398   ins_cost(4 * INSN_COST);
15399   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15400   ins_encode( aarch64_enc_strvD(src, mem) );
15401   ins_pipe(vstore_reg_mem64);
15402 %}
15403 
15404 // Store Vector (128 bits)
15405 instruct storeV16(vecX src, vmem16 mem)
15406 %{
15407   predicate(n->as_StoreVector()->memory_size() == 16);
15408   match(Set mem (StoreVector mem src));
15409   ins_cost(4 * INSN_COST);
15410   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15411   ins_encode( aarch64_enc_strvQ(src, mem) );
15412   ins_pipe(vstore_reg_mem128);
15413 %}
15414 
15415 instruct replicate8B(vecD dst, iRegIorL2I src)
15416 %{
15417   predicate(n->as_Vector()->length() == 4 ||
15418             n->as_Vector()->length() == 8);
15419   match(Set dst (ReplicateB src));
15420   ins_cost(INSN_COST);
15421   format %{ "dup  $dst, $src\t# vector (8B)" %}
15422   ins_encode %{
15423     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15424   %}
15425   ins_pipe(vdup_reg_reg64);
15426 %}
15427 
15428 instruct replicate16B(vecX dst, iRegIorL2I src)
15429 %{
15430   predicate(n->as_Vector()->length() == 16);
15431   match(Set dst (ReplicateB src));
15432   ins_cost(INSN_COST);
15433   format %{ "dup  $dst, $src\t# vector (16B)" %}
15434   ins_encode %{
15435     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15436   %}
15437   ins_pipe(vdup_reg_reg128);
15438 %}
15439 
15440 instruct replicate8B_imm(vecD dst, immI con)
15441 %{
15442   predicate(n->as_Vector()->length() == 4 ||
15443             n->as_Vector()->length() == 8);
15444   match(Set dst (ReplicateB con));
15445   ins_cost(INSN_COST);
15446   format %{ "movi  $dst, $con\t# vector(8B)" %}
15447   ins_encode %{
15448     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15449   %}
15450   ins_pipe(vmovi_reg_imm64);
15451 %}
15452 
15453 instruct replicate16B_imm(vecX dst, immI con)
15454 %{
15455   predicate(n->as_Vector()->length() == 16);
15456   match(Set dst (ReplicateB con));
15457   ins_cost(INSN_COST);
15458   format %{ "movi  $dst, $con\t# vector(16B)" %}
15459   ins_encode %{
15460     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15461   %}
15462   ins_pipe(vmovi_reg_imm128);
15463 %}
15464 
15465 instruct replicate4S(vecD dst, iRegIorL2I src)
15466 %{
15467   predicate(n->as_Vector()->length() == 2 ||
15468             n->as_Vector()->length() == 4);
15469   match(Set dst (ReplicateS src));
15470   ins_cost(INSN_COST);
15471   format %{ "dup  $dst, $src\t# vector (4S)" %}
15472   ins_encode %{
15473     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15474   %}
15475   ins_pipe(vdup_reg_reg64);
15476 %}
15477 
15478 instruct replicate8S(vecX dst, iRegIorL2I src)
15479 %{
15480   predicate(n->as_Vector()->length() == 8);
15481   match(Set dst (ReplicateS src));
15482   ins_cost(INSN_COST);
15483   format %{ "dup  $dst, $src\t# vector (8S)" %}
15484   ins_encode %{
15485     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15486   %}
15487   ins_pipe(vdup_reg_reg128);
15488 %}
15489 
15490 instruct replicate4S_imm(vecD dst, immI con)
15491 %{
15492   predicate(n->as_Vector()->length() == 2 ||
15493             n->as_Vector()->length() == 4);
15494   match(Set dst (ReplicateS con));
15495   ins_cost(INSN_COST);
15496   format %{ "movi  $dst, $con\t# vector(4H)" %}
15497   ins_encode %{
15498     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15499   %}
15500   ins_pipe(vmovi_reg_imm64);
15501 %}
15502 
15503 instruct replicate8S_imm(vecX dst, immI con)
15504 %{
15505   predicate(n->as_Vector()->length() == 8);
15506   match(Set dst (ReplicateS con));
15507   ins_cost(INSN_COST);
15508   format %{ "movi  $dst, $con\t# vector(8H)" %}
15509   ins_encode %{
15510     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15511   %}
15512   ins_pipe(vmovi_reg_imm128);
15513 %}
15514 
15515 instruct replicate2I(vecD dst, iRegIorL2I src)
15516 %{
15517   predicate(n->as_Vector()->length() == 2);
15518   match(Set dst (ReplicateI src));
15519   ins_cost(INSN_COST);
15520   format %{ "dup  $dst, $src\t# vector (2I)" %}
15521   ins_encode %{
15522     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15523   %}
15524   ins_pipe(vdup_reg_reg64);
15525 %}
15526 
15527 instruct replicate4I(vecX dst, iRegIorL2I src)
15528 %{
15529   predicate(n->as_Vector()->length() == 4);
15530   match(Set dst (ReplicateI src));
15531   ins_cost(INSN_COST);
15532   format %{ "dup  $dst, $src\t# vector (4I)" %}
15533   ins_encode %{
15534     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15535   %}
15536   ins_pipe(vdup_reg_reg128);
15537 %}
15538 
15539 instruct replicate2I_imm(vecD dst, immI con)
15540 %{
15541   predicate(n->as_Vector()->length() == 2);
15542   match(Set dst (ReplicateI con));
15543   ins_cost(INSN_COST);
15544   format %{ "movi  $dst, $con\t# vector(2I)" %}
15545   ins_encode %{
15546     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15547   %}
15548   ins_pipe(vmovi_reg_imm64);
15549 %}
15550 
15551 instruct replicate4I_imm(vecX dst, immI con)
15552 %{
15553   predicate(n->as_Vector()->length() == 4);
15554   match(Set dst (ReplicateI con));
15555   ins_cost(INSN_COST);
15556   format %{ "movi  $dst, $con\t# vector(4I)" %}
15557   ins_encode %{
15558     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15559   %}
15560   ins_pipe(vmovi_reg_imm128);
15561 %}
15562 
15563 instruct replicate2L(vecX dst, iRegL src)
15564 %{
15565   predicate(n->as_Vector()->length() == 2);
15566   match(Set dst (ReplicateL src));
15567   ins_cost(INSN_COST);
15568   format %{ "dup  $dst, $src\t# vector (2L)" %}
15569   ins_encode %{
15570     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15571   %}
15572   ins_pipe(vdup_reg_reg128);
15573 %}
15574 
15575 instruct replicate2L_zero(vecX dst, immI0 zero)
15576 %{
15577   predicate(n->as_Vector()->length() == 2);
15578   match(Set dst (ReplicateI zero));
15579   ins_cost(INSN_COST);
15580   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15581   ins_encode %{
15582     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15583            as_FloatRegister($dst$$reg),
15584            as_FloatRegister($dst$$reg));
15585   %}
15586   ins_pipe(vmovi_reg_imm128);
15587 %}
15588 
15589 instruct replicate2F(vecD dst, vRegF src)
15590 %{
15591   predicate(n->as_Vector()->length() == 2);
15592   match(Set dst (ReplicateF src));
15593   ins_cost(INSN_COST);
15594   format %{ "dup  $dst, $src\t# vector (2F)" %}
15595   ins_encode %{
15596     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15597            as_FloatRegister($src$$reg));
15598   %}
15599   ins_pipe(vdup_reg_freg64);
15600 %}
15601 
15602 instruct replicate4F(vecX dst, vRegF src)
15603 %{
15604   predicate(n->as_Vector()->length() == 4);
15605   match(Set dst (ReplicateF src));
15606   ins_cost(INSN_COST);
15607   format %{ "dup  $dst, $src\t# vector (4F)" %}
15608   ins_encode %{
15609     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15610            as_FloatRegister($src$$reg));
15611   %}
15612   ins_pipe(vdup_reg_freg128);
15613 %}
15614 
15615 instruct replicate2D(vecX dst, vRegD src)
15616 %{
15617   predicate(n->as_Vector()->length() == 2);
15618   match(Set dst (ReplicateD src));
15619   ins_cost(INSN_COST);
15620   format %{ "dup  $dst, $src\t# vector (2D)" %}
15621   ins_encode %{
15622     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15623            as_FloatRegister($src$$reg));
15624   %}
15625   ins_pipe(vdup_reg_dreg128);
15626 %}
15627 
15628 // ====================REDUCTION ARITHMETIC====================================
15629 
15630 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15631 %{
15632   match(Set dst (AddReductionVI src1 src2));
15633   ins_cost(INSN_COST);
15634   effect(TEMP tmp, TEMP tmp2);
15635   format %{ "umov  $tmp, $src2, S, 0\n\t"
15636             "umov  $tmp2, $src2, S, 1\n\t"
15637             "addw  $dst, $src1, $tmp\n\t"
15638             "addw  $dst, $dst, $tmp2\t add reduction2i"
15639   %}
15640   ins_encode %{
15641     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15642     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15643     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15644     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15645   %}
15646   ins_pipe(pipe_class_default);
15647 %}
15648 
15649 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15650 %{
15651   match(Set dst (AddReductionVI src1 src2));
15652   ins_cost(INSN_COST);
15653   effect(TEMP tmp, TEMP tmp2);
15654   format %{ "addv  $tmp, T4S, $src2\n\t"
15655             "umov  $tmp2, $tmp, S, 0\n\t"
15656             "addw  $dst, $tmp2, $src1\t add reduction4i"
15657   %}
15658   ins_encode %{
15659     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15660             as_FloatRegister($src2$$reg));
15661     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15662     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15663   %}
15664   ins_pipe(pipe_class_default);
15665 %}
15666 
15667 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15668 %{
15669   match(Set dst (MulReductionVI src1 src2));
15670   ins_cost(INSN_COST);
15671   effect(TEMP tmp, TEMP dst);
15672   format %{ "umov  $tmp, $src2, S, 0\n\t"
15673             "mul   $dst, $tmp, $src1\n\t"
15674             "umov  $tmp, $src2, S, 1\n\t"
15675             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15676   %}
15677   ins_encode %{
15678     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15679     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15680     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15681     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15682   %}
15683   ins_pipe(pipe_class_default);
15684 %}
15685 
15686 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15687 %{
15688   match(Set dst (MulReductionVI src1 src2));
15689   ins_cost(INSN_COST);
15690   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15691   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15692             "mul   $tmp, $tmp, $src2\n\t"
15693             "umov  $tmp2, $tmp, S, 0\n\t"
15694             "mul   $dst, $tmp2, $src1\n\t"
15695             "umov  $tmp2, $tmp, S, 1\n\t"
15696             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15697   %}
15698   ins_encode %{
15699     __ ins(as_FloatRegister($tmp$$reg), __ D,
15700            as_FloatRegister($src2$$reg), 0, 1);
15701     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15702            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15703     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15704     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15705     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15706     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15707   %}
15708   ins_pipe(pipe_class_default);
15709 %}
15710 
15711 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15712 %{
15713   match(Set dst (AddReductionVF src1 src2));
15714   ins_cost(INSN_COST);
15715   effect(TEMP tmp, TEMP dst);
15716   format %{ "fadds $dst, $src1, $src2\n\t"
15717             "ins   $tmp, S, $src2, 0, 1\n\t"
15718             "fadds $dst, $dst, $tmp\t add reduction2f"
15719   %}
15720   ins_encode %{
15721     __ fadds(as_FloatRegister($dst$$reg),
15722              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15723     __ ins(as_FloatRegister($tmp$$reg), __ S,
15724            as_FloatRegister($src2$$reg), 0, 1);
15725     __ fadds(as_FloatRegister($dst$$reg),
15726              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15727   %}
15728   ins_pipe(pipe_class_default);
15729 %}
15730 
15731 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15732 %{
15733   match(Set dst (AddReductionVF src1 src2));
15734   ins_cost(INSN_COST);
15735   effect(TEMP tmp, TEMP dst);
15736   format %{ "fadds $dst, $src1, $src2\n\t"
15737             "ins   $tmp, S, $src2, 0, 1\n\t"
15738             "fadds $dst, $dst, $tmp\n\t"
15739             "ins   $tmp, S, $src2, 0, 2\n\t"
15740             "fadds $dst, $dst, $tmp\n\t"
15741             "ins   $tmp, S, $src2, 0, 3\n\t"
15742             "fadds $dst, $dst, $tmp\t add reduction4f"
15743   %}
15744   ins_encode %{
15745     __ fadds(as_FloatRegister($dst$$reg),
15746              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15747     __ ins(as_FloatRegister($tmp$$reg), __ S,
15748            as_FloatRegister($src2$$reg), 0, 1);
15749     __ fadds(as_FloatRegister($dst$$reg),
15750              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15751     __ ins(as_FloatRegister($tmp$$reg), __ S,
15752            as_FloatRegister($src2$$reg), 0, 2);
15753     __ fadds(as_FloatRegister($dst$$reg),
15754              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15755     __ ins(as_FloatRegister($tmp$$reg), __ S,
15756            as_FloatRegister($src2$$reg), 0, 3);
15757     __ fadds(as_FloatRegister($dst$$reg),
15758              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15759   %}
15760   ins_pipe(pipe_class_default);
15761 %}
15762 
15763 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15764 %{
15765   match(Set dst (MulReductionVF src1 src2));
15766   ins_cost(INSN_COST);
15767   effect(TEMP tmp, TEMP dst);
15768   format %{ "fmuls $dst, $src1, $src2\n\t"
15769             "ins   $tmp, S, $src2, 0, 1\n\t"
15770             "fmuls $dst, $dst, $tmp\t add reduction4f"
15771   %}
15772   ins_encode %{
15773     __ fmuls(as_FloatRegister($dst$$reg),
15774              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15775     __ ins(as_FloatRegister($tmp$$reg), __ S,
15776            as_FloatRegister($src2$$reg), 0, 1);
15777     __ fmuls(as_FloatRegister($dst$$reg),
15778              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15779   %}
15780   ins_pipe(pipe_class_default);
15781 %}
15782 
15783 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15784 %{
15785   match(Set dst (MulReductionVF src1 src2));
15786   ins_cost(INSN_COST);
15787   effect(TEMP tmp, TEMP dst);
15788   format %{ "fmuls $dst, $src1, $src2\n\t"
15789             "ins   $tmp, S, $src2, 0, 1\n\t"
15790             "fmuls $dst, $dst, $tmp\n\t"
15791             "ins   $tmp, S, $src2, 0, 2\n\t"
15792             "fmuls $dst, $dst, $tmp\n\t"
15793             "ins   $tmp, S, $src2, 0, 3\n\t"
15794             "fmuls $dst, $dst, $tmp\t add reduction4f"
15795   %}
15796   ins_encode %{
15797     __ fmuls(as_FloatRegister($dst$$reg),
15798              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15799     __ ins(as_FloatRegister($tmp$$reg), __ S,
15800            as_FloatRegister($src2$$reg), 0, 1);
15801     __ fmuls(as_FloatRegister($dst$$reg),
15802              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15803     __ ins(as_FloatRegister($tmp$$reg), __ S,
15804            as_FloatRegister($src2$$reg), 0, 2);
15805     __ fmuls(as_FloatRegister($dst$$reg),
15806              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15807     __ ins(as_FloatRegister($tmp$$reg), __ S,
15808            as_FloatRegister($src2$$reg), 0, 3);
15809     __ fmuls(as_FloatRegister($dst$$reg),
15810              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15811   %}
15812   ins_pipe(pipe_class_default);
15813 %}
15814 
15815 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15816 %{
15817   match(Set dst (AddReductionVD src1 src2));
15818   ins_cost(INSN_COST);
15819   effect(TEMP tmp, TEMP dst);
15820   format %{ "faddd $dst, $src1, $src2\n\t"
15821             "ins   $tmp, D, $src2, 0, 1\n\t"
15822             "faddd $dst, $dst, $tmp\t add reduction2d"
15823   %}
15824   ins_encode %{
15825     __ faddd(as_FloatRegister($dst$$reg),
15826              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15827     __ ins(as_FloatRegister($tmp$$reg), __ D,
15828            as_FloatRegister($src2$$reg), 0, 1);
15829     __ faddd(as_FloatRegister($dst$$reg),
15830              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15831   %}
15832   ins_pipe(pipe_class_default);
15833 %}
15834 
15835 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15836 %{
15837   match(Set dst (MulReductionVD src1 src2));
15838   ins_cost(INSN_COST);
15839   effect(TEMP tmp, TEMP dst);
15840   format %{ "fmuld $dst, $src1, $src2\n\t"
15841             "ins   $tmp, D, $src2, 0, 1\n\t"
15842             "fmuld $dst, $dst, $tmp\t add reduction2d"
15843   %}
15844   ins_encode %{
15845     __ fmuld(as_FloatRegister($dst$$reg),
15846              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15847     __ ins(as_FloatRegister($tmp$$reg), __ D,
15848            as_FloatRegister($src2$$reg), 0, 1);
15849     __ fmuld(as_FloatRegister($dst$$reg),
15850              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15851   %}
15852   ins_pipe(pipe_class_default);
15853 %}
15854 
15855 // ====================VECTOR ARITHMETIC=======================================
15856 
15857 // --------------------------------- ADD --------------------------------------
15858 
15859 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15860 %{
15861   predicate(n->as_Vector()->length() == 4 ||
15862             n->as_Vector()->length() == 8);
15863   match(Set dst (AddVB src1 src2));
15864   ins_cost(INSN_COST);
15865   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15866   ins_encode %{
15867     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15868             as_FloatRegister($src1$$reg),
15869             as_FloatRegister($src2$$reg));
15870   %}
15871   ins_pipe(vdop64);
15872 %}
15873 
15874 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15875 %{
15876   predicate(n->as_Vector()->length() == 16);
15877   match(Set dst (AddVB src1 src2));
15878   ins_cost(INSN_COST);
15879   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15880   ins_encode %{
15881     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15882             as_FloatRegister($src1$$reg),
15883             as_FloatRegister($src2$$reg));
15884   %}
15885   ins_pipe(vdop128);
15886 %}
15887 
15888 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15889 %{
15890   predicate(n->as_Vector()->length() == 2 ||
15891             n->as_Vector()->length() == 4);
15892   match(Set dst (AddVS src1 src2));
15893   ins_cost(INSN_COST);
15894   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15895   ins_encode %{
15896     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15897             as_FloatRegister($src1$$reg),
15898             as_FloatRegister($src2$$reg));
15899   %}
15900   ins_pipe(vdop64);
15901 %}
15902 
15903 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15904 %{
15905   predicate(n->as_Vector()->length() == 8);
15906   match(Set dst (AddVS src1 src2));
15907   ins_cost(INSN_COST);
15908   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15909   ins_encode %{
15910     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15911             as_FloatRegister($src1$$reg),
15912             as_FloatRegister($src2$$reg));
15913   %}
15914   ins_pipe(vdop128);
15915 %}
15916 
15917 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15918 %{
15919   predicate(n->as_Vector()->length() == 2);
15920   match(Set dst (AddVI src1 src2));
15921   ins_cost(INSN_COST);
15922   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15923   ins_encode %{
15924     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15925             as_FloatRegister($src1$$reg),
15926             as_FloatRegister($src2$$reg));
15927   %}
15928   ins_pipe(vdop64);
15929 %}
15930 
15931 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15932 %{
15933   predicate(n->as_Vector()->length() == 4);
15934   match(Set dst (AddVI src1 src2));
15935   ins_cost(INSN_COST);
15936   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15937   ins_encode %{
15938     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15939             as_FloatRegister($src1$$reg),
15940             as_FloatRegister($src2$$reg));
15941   %}
15942   ins_pipe(vdop128);
15943 %}
15944 
15945 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15946 %{
15947   predicate(n->as_Vector()->length() == 2);
15948   match(Set dst (AddVL src1 src2));
15949   ins_cost(INSN_COST);
15950   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15951   ins_encode %{
15952     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15953             as_FloatRegister($src1$$reg),
15954             as_FloatRegister($src2$$reg));
15955   %}
15956   ins_pipe(vdop128);
15957 %}
15958 
15959 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15960 %{
15961   predicate(n->as_Vector()->length() == 2);
15962   match(Set dst (AddVF src1 src2));
15963   ins_cost(INSN_COST);
15964   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15965   ins_encode %{
15966     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15967             as_FloatRegister($src1$$reg),
15968             as_FloatRegister($src2$$reg));
15969   %}
15970   ins_pipe(vdop_fp64);
15971 %}
15972 
15973 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15974 %{
15975   predicate(n->as_Vector()->length() == 4);
15976   match(Set dst (AddVF src1 src2));
15977   ins_cost(INSN_COST);
15978   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15979   ins_encode %{
15980     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15981             as_FloatRegister($src1$$reg),
15982             as_FloatRegister($src2$$reg));
15983   %}
15984   ins_pipe(vdop_fp128);
15985 %}
15986 
15987 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15988 %{
15989   match(Set dst (AddVD src1 src2));
15990   ins_cost(INSN_COST);
15991   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15992   ins_encode %{
15993     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15994             as_FloatRegister($src1$$reg),
15995             as_FloatRegister($src2$$reg));
15996   %}
15997   ins_pipe(vdop_fp128);
15998 %}
15999 
16000 // --------------------------------- SUB --------------------------------------
16001 
16002 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16003 %{
16004   predicate(n->as_Vector()->length() == 4 ||
16005             n->as_Vector()->length() == 8);
16006   match(Set dst (SubVB src1 src2));
16007   ins_cost(INSN_COST);
16008   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16009   ins_encode %{
16010     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16011             as_FloatRegister($src1$$reg),
16012             as_FloatRegister($src2$$reg));
16013   %}
16014   ins_pipe(vdop64);
16015 %}
16016 
16017 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16018 %{
16019   predicate(n->as_Vector()->length() == 16);
16020   match(Set dst (SubVB src1 src2));
16021   ins_cost(INSN_COST);
16022   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16023   ins_encode %{
16024     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16025             as_FloatRegister($src1$$reg),
16026             as_FloatRegister($src2$$reg));
16027   %}
16028   ins_pipe(vdop128);
16029 %}
16030 
16031 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16032 %{
16033   predicate(n->as_Vector()->length() == 2 ||
16034             n->as_Vector()->length() == 4);
16035   match(Set dst (SubVS src1 src2));
16036   ins_cost(INSN_COST);
16037   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16038   ins_encode %{
16039     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16040             as_FloatRegister($src1$$reg),
16041             as_FloatRegister($src2$$reg));
16042   %}
16043   ins_pipe(vdop64);
16044 %}
16045 
16046 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16047 %{
16048   predicate(n->as_Vector()->length() == 8);
16049   match(Set dst (SubVS src1 src2));
16050   ins_cost(INSN_COST);
16051   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16052   ins_encode %{
16053     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16054             as_FloatRegister($src1$$reg),
16055             as_FloatRegister($src2$$reg));
16056   %}
16057   ins_pipe(vdop128);
16058 %}
16059 
16060 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16061 %{
16062   predicate(n->as_Vector()->length() == 2);
16063   match(Set dst (SubVI src1 src2));
16064   ins_cost(INSN_COST);
16065   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16066   ins_encode %{
16067     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16068             as_FloatRegister($src1$$reg),
16069             as_FloatRegister($src2$$reg));
16070   %}
16071   ins_pipe(vdop64);
16072 %}
16073 
16074 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16075 %{
16076   predicate(n->as_Vector()->length() == 4);
16077   match(Set dst (SubVI src1 src2));
16078   ins_cost(INSN_COST);
16079   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16080   ins_encode %{
16081     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16082             as_FloatRegister($src1$$reg),
16083             as_FloatRegister($src2$$reg));
16084   %}
16085   ins_pipe(vdop128);
16086 %}
16087 
16088 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16089 %{
16090   predicate(n->as_Vector()->length() == 2);
16091   match(Set dst (SubVL src1 src2));
16092   ins_cost(INSN_COST);
16093   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16094   ins_encode %{
16095     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16096             as_FloatRegister($src1$$reg),
16097             as_FloatRegister($src2$$reg));
16098   %}
16099   ins_pipe(vdop128);
16100 %}
16101 
16102 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16103 %{
16104   predicate(n->as_Vector()->length() == 2);
16105   match(Set dst (SubVF src1 src2));
16106   ins_cost(INSN_COST);
16107   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16108   ins_encode %{
16109     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16110             as_FloatRegister($src1$$reg),
16111             as_FloatRegister($src2$$reg));
16112   %}
16113   ins_pipe(vdop_fp64);
16114 %}
16115 
16116 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16117 %{
16118   predicate(n->as_Vector()->length() == 4);
16119   match(Set dst (SubVF src1 src2));
16120   ins_cost(INSN_COST);
16121   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16122   ins_encode %{
16123     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16124             as_FloatRegister($src1$$reg),
16125             as_FloatRegister($src2$$reg));
16126   %}
16127   ins_pipe(vdop_fp128);
16128 %}
16129 
16130 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16131 %{
16132   predicate(n->as_Vector()->length() == 2);
16133   match(Set dst (SubVD src1 src2));
16134   ins_cost(INSN_COST);
16135   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16136   ins_encode %{
16137     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16138             as_FloatRegister($src1$$reg),
16139             as_FloatRegister($src2$$reg));
16140   %}
16141   ins_pipe(vdop_fp128);
16142 %}
16143 
16144 // --------------------------------- MUL --------------------------------------
16145 
16146 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16147 %{
16148   predicate(n->as_Vector()->length() == 2 ||
16149             n->as_Vector()->length() == 4);
16150   match(Set dst (MulVS src1 src2));
16151   ins_cost(INSN_COST);
16152   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16153   ins_encode %{
16154     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16155             as_FloatRegister($src1$$reg),
16156             as_FloatRegister($src2$$reg));
16157   %}
16158   ins_pipe(vmul64);
16159 %}
16160 
16161 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16162 %{
16163   predicate(n->as_Vector()->length() == 8);
16164   match(Set dst (MulVS src1 src2));
16165   ins_cost(INSN_COST);
16166   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16167   ins_encode %{
16168     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16169             as_FloatRegister($src1$$reg),
16170             as_FloatRegister($src2$$reg));
16171   %}
16172   ins_pipe(vmul128);
16173 %}
16174 
16175 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16176 %{
16177   predicate(n->as_Vector()->length() == 2);
16178   match(Set dst (MulVI src1 src2));
16179   ins_cost(INSN_COST);
16180   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16181   ins_encode %{
16182     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16183             as_FloatRegister($src1$$reg),
16184             as_FloatRegister($src2$$reg));
16185   %}
16186   ins_pipe(vmul64);
16187 %}
16188 
16189 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16190 %{
16191   predicate(n->as_Vector()->length() == 4);
16192   match(Set dst (MulVI src1 src2));
16193   ins_cost(INSN_COST);
16194   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16195   ins_encode %{
16196     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16197             as_FloatRegister($src1$$reg),
16198             as_FloatRegister($src2$$reg));
16199   %}
16200   ins_pipe(vmul128);
16201 %}
16202 
16203 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16204 %{
16205   predicate(n->as_Vector()->length() == 2);
16206   match(Set dst (MulVF src1 src2));
16207   ins_cost(INSN_COST);
16208   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16209   ins_encode %{
16210     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16211             as_FloatRegister($src1$$reg),
16212             as_FloatRegister($src2$$reg));
16213   %}
16214   ins_pipe(vmuldiv_fp64);
16215 %}
16216 
16217 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16218 %{
16219   predicate(n->as_Vector()->length() == 4);
16220   match(Set dst (MulVF src1 src2));
16221   ins_cost(INSN_COST);
16222   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16223   ins_encode %{
16224     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16225             as_FloatRegister($src1$$reg),
16226             as_FloatRegister($src2$$reg));
16227   %}
16228   ins_pipe(vmuldiv_fp128);
16229 %}
16230 
16231 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16232 %{
16233   predicate(n->as_Vector()->length() == 2);
16234   match(Set dst (MulVD src1 src2));
16235   ins_cost(INSN_COST);
16236   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16237   ins_encode %{
16238     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16239             as_FloatRegister($src1$$reg),
16240             as_FloatRegister($src2$$reg));
16241   %}
16242   ins_pipe(vmuldiv_fp128);
16243 %}
16244 
16245 // --------------------------------- MLA --------------------------------------
16246 
16247 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16248 %{
16249   predicate(n->as_Vector()->length() == 2 ||
16250             n->as_Vector()->length() == 4);
16251   match(Set dst (AddVS dst (MulVS src1 src2)));
16252   ins_cost(INSN_COST);
16253   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16254   ins_encode %{
16255     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16256             as_FloatRegister($src1$$reg),
16257             as_FloatRegister($src2$$reg));
16258   %}
16259   ins_pipe(vmla64);
16260 %}
16261 
16262 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16263 %{
16264   predicate(n->as_Vector()->length() == 8);
16265   match(Set dst (AddVS dst (MulVS src1 src2)));
16266   ins_cost(INSN_COST);
16267   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16268   ins_encode %{
16269     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16270             as_FloatRegister($src1$$reg),
16271             as_FloatRegister($src2$$reg));
16272   %}
16273   ins_pipe(vmla128);
16274 %}
16275 
16276 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16277 %{
16278   predicate(n->as_Vector()->length() == 2);
16279   match(Set dst (AddVI dst (MulVI src1 src2)));
16280   ins_cost(INSN_COST);
16281   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16282   ins_encode %{
16283     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16284             as_FloatRegister($src1$$reg),
16285             as_FloatRegister($src2$$reg));
16286   %}
16287   ins_pipe(vmla64);
16288 %}
16289 
16290 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16291 %{
16292   predicate(n->as_Vector()->length() == 4);
16293   match(Set dst (AddVI dst (MulVI src1 src2)));
16294   ins_cost(INSN_COST);
16295   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16296   ins_encode %{
16297     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16298             as_FloatRegister($src1$$reg),
16299             as_FloatRegister($src2$$reg));
16300   %}
16301   ins_pipe(vmla128);
16302 %}
16303 
16304 // --------------------------------- MLS --------------------------------------
16305 
16306 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16307 %{
16308   predicate(n->as_Vector()->length() == 2 ||
16309             n->as_Vector()->length() == 4);
16310   match(Set dst (SubVS dst (MulVS src1 src2)));
16311   ins_cost(INSN_COST);
16312   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16313   ins_encode %{
16314     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16315             as_FloatRegister($src1$$reg),
16316             as_FloatRegister($src2$$reg));
16317   %}
16318   ins_pipe(vmla64);
16319 %}
16320 
16321 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16322 %{
16323   predicate(n->as_Vector()->length() == 8);
16324   match(Set dst (SubVS dst (MulVS src1 src2)));
16325   ins_cost(INSN_COST);
16326   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16327   ins_encode %{
16328     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16329             as_FloatRegister($src1$$reg),
16330             as_FloatRegister($src2$$reg));
16331   %}
16332   ins_pipe(vmla128);
16333 %}
16334 
16335 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16336 %{
16337   predicate(n->as_Vector()->length() == 2);
16338   match(Set dst (SubVI dst (MulVI src1 src2)));
16339   ins_cost(INSN_COST);
16340   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16341   ins_encode %{
16342     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16343             as_FloatRegister($src1$$reg),
16344             as_FloatRegister($src2$$reg));
16345   %}
16346   ins_pipe(vmla64);
16347 %}
16348 
16349 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16350 %{
16351   predicate(n->as_Vector()->length() == 4);
16352   match(Set dst (SubVI dst (MulVI src1 src2)));
16353   ins_cost(INSN_COST);
16354   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16355   ins_encode %{
16356     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16357             as_FloatRegister($src1$$reg),
16358             as_FloatRegister($src2$$reg));
16359   %}
16360   ins_pipe(vmla128);
16361 %}
16362 
16363 // --------------------------------- DIV --------------------------------------
16364 
16365 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16366 %{
16367   predicate(n->as_Vector()->length() == 2);
16368   match(Set dst (DivVF src1 src2));
16369   ins_cost(INSN_COST);
16370   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16371   ins_encode %{
16372     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16373             as_FloatRegister($src1$$reg),
16374             as_FloatRegister($src2$$reg));
16375   %}
16376   ins_pipe(vmuldiv_fp64);
16377 %}
16378 
16379 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16380 %{
16381   predicate(n->as_Vector()->length() == 4);
16382   match(Set dst (DivVF src1 src2));
16383   ins_cost(INSN_COST);
16384   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16385   ins_encode %{
16386     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16387             as_FloatRegister($src1$$reg),
16388             as_FloatRegister($src2$$reg));
16389   %}
16390   ins_pipe(vmuldiv_fp128);
16391 %}
16392 
16393 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16394 %{
16395   predicate(n->as_Vector()->length() == 2);
16396   match(Set dst (DivVD src1 src2));
16397   ins_cost(INSN_COST);
16398   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16399   ins_encode %{
16400     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16401             as_FloatRegister($src1$$reg),
16402             as_FloatRegister($src2$$reg));
16403   %}
16404   ins_pipe(vmuldiv_fp128);
16405 %}
16406 
16407 // --------------------------------- SQRT -------------------------------------
16408 
16409 instruct vsqrt2D(vecX dst, vecX src)
16410 %{
16411   predicate(n->as_Vector()->length() == 2);
16412   match(Set dst (SqrtVD src));
16413   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16414   ins_encode %{
16415     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16416              as_FloatRegister($src$$reg));
16417   %}
16418   ins_pipe(vsqrt_fp128);
16419 %}
16420 
16421 // --------------------------------- ABS --------------------------------------
16422 
16423 instruct vabs2F(vecD dst, vecD src)
16424 %{
16425   predicate(n->as_Vector()->length() == 2);
16426   match(Set dst (AbsVF src));
16427   ins_cost(INSN_COST * 3);
16428   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16429   ins_encode %{
16430     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16431             as_FloatRegister($src$$reg));
16432   %}
16433   ins_pipe(vunop_fp64);
16434 %}
16435 
16436 instruct vabs4F(vecX dst, vecX src)
16437 %{
16438   predicate(n->as_Vector()->length() == 4);
16439   match(Set dst (AbsVF src));
16440   ins_cost(INSN_COST * 3);
16441   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16442   ins_encode %{
16443     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16444             as_FloatRegister($src$$reg));
16445   %}
16446   ins_pipe(vunop_fp128);
16447 %}
16448 
16449 instruct vabs2D(vecX dst, vecX src)
16450 %{
16451   predicate(n->as_Vector()->length() == 2);
16452   match(Set dst (AbsVD src));
16453   ins_cost(INSN_COST * 3);
16454   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16455   ins_encode %{
16456     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16457             as_FloatRegister($src$$reg));
16458   %}
16459   ins_pipe(vunop_fp128);
16460 %}
16461 
16462 // --------------------------------- NEG --------------------------------------
16463 
16464 instruct vneg2F(vecD dst, vecD src)
16465 %{
16466   predicate(n->as_Vector()->length() == 2);
16467   match(Set dst (NegVF src));
16468   ins_cost(INSN_COST * 3);
16469   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16470   ins_encode %{
16471     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16472             as_FloatRegister($src$$reg));
16473   %}
16474   ins_pipe(vunop_fp64);
16475 %}
16476 
16477 instruct vneg4F(vecX dst, vecX src)
16478 %{
16479   predicate(n->as_Vector()->length() == 4);
16480   match(Set dst (NegVF src));
16481   ins_cost(INSN_COST * 3);
16482   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16483   ins_encode %{
16484     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16485             as_FloatRegister($src$$reg));
16486   %}
16487   ins_pipe(vunop_fp128);
16488 %}
16489 
16490 instruct vneg2D(vecX dst, vecX src)
16491 %{
16492   predicate(n->as_Vector()->length() == 2);
16493   match(Set dst (NegVD src));
16494   ins_cost(INSN_COST * 3);
16495   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16496   ins_encode %{
16497     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16498             as_FloatRegister($src$$reg));
16499   %}
16500   ins_pipe(vunop_fp128);
16501 %}
16502 
16503 // --------------------------------- AND --------------------------------------
16504 
16505 instruct vand8B(vecD dst, vecD src1, vecD src2)
16506 %{
16507   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16508             n->as_Vector()->length_in_bytes() == 8);
16509   match(Set dst (AndV src1 src2));
16510   ins_cost(INSN_COST);
16511   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16512   ins_encode %{
16513     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16514             as_FloatRegister($src1$$reg),
16515             as_FloatRegister($src2$$reg));
16516   %}
16517   ins_pipe(vlogical64);
16518 %}
16519 
16520 instruct vand16B(vecX dst, vecX src1, vecX src2)
16521 %{
16522   predicate(n->as_Vector()->length_in_bytes() == 16);
16523   match(Set dst (AndV src1 src2));
16524   ins_cost(INSN_COST);
16525   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16526   ins_encode %{
16527     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16528             as_FloatRegister($src1$$reg),
16529             as_FloatRegister($src2$$reg));
16530   %}
16531   ins_pipe(vlogical128);
16532 %}
16533 
16534 // --------------------------------- OR ---------------------------------------
16535 
16536 instruct vor8B(vecD dst, vecD src1, vecD src2)
16537 %{
16538   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16539             n->as_Vector()->length_in_bytes() == 8);
16540   match(Set dst (OrV src1 src2));
16541   ins_cost(INSN_COST);
16542   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16543   ins_encode %{
16544     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16545             as_FloatRegister($src1$$reg),
16546             as_FloatRegister($src2$$reg));
16547   %}
16548   ins_pipe(vlogical64);
16549 %}
16550 
16551 instruct vor16B(vecX dst, vecX src1, vecX src2)
16552 %{
16553   predicate(n->as_Vector()->length_in_bytes() == 16);
16554   match(Set dst (OrV src1 src2));
16555   ins_cost(INSN_COST);
16556   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16557   ins_encode %{
16558     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16559             as_FloatRegister($src1$$reg),
16560             as_FloatRegister($src2$$reg));
16561   %}
16562   ins_pipe(vlogical128);
16563 %}
16564 
16565 // --------------------------------- XOR --------------------------------------
16566 
16567 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16568 %{
16569   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16570             n->as_Vector()->length_in_bytes() == 8);
16571   match(Set dst (XorV src1 src2));
16572   ins_cost(INSN_COST);
16573   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16574   ins_encode %{
16575     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16576             as_FloatRegister($src1$$reg),
16577             as_FloatRegister($src2$$reg));
16578   %}
16579   ins_pipe(vlogical64);
16580 %}
16581 
16582 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16583 %{
16584   predicate(n->as_Vector()->length_in_bytes() == 16);
16585   match(Set dst (XorV src1 src2));
16586   ins_cost(INSN_COST);
16587   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16588   ins_encode %{
16589     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16590             as_FloatRegister($src1$$reg),
16591             as_FloatRegister($src2$$reg));
16592   %}
16593   ins_pipe(vlogical128);
16594 %}
16595 
16596 // ------------------------------ Shift ---------------------------------------
16597 
16598 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16599   match(Set dst (LShiftCntV cnt));
16600   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16601   ins_encode %{
16602     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16603   %}
16604   ins_pipe(vdup_reg_reg128);
16605 %}
16606 
16607 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16608 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16609   match(Set dst (RShiftCntV cnt));
16610   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16611   ins_encode %{
16612     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16613     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16614   %}
16615   ins_pipe(vdup_reg_reg128);
16616 %}
16617 
16618 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16619   predicate(n->as_Vector()->length() == 4 ||
16620             n->as_Vector()->length() == 8);
16621   match(Set dst (LShiftVB src shift));
16622   match(Set dst (RShiftVB src shift));
16623   ins_cost(INSN_COST);
16624   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16625   ins_encode %{
16626     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16627             as_FloatRegister($src$$reg),
16628             as_FloatRegister($shift$$reg));
16629   %}
16630   ins_pipe(vshift64);
16631 %}
16632 
16633 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16634   predicate(n->as_Vector()->length() == 16);
16635   match(Set dst (LShiftVB src shift));
16636   match(Set dst (RShiftVB src shift));
16637   ins_cost(INSN_COST);
16638   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16639   ins_encode %{
16640     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16641             as_FloatRegister($src$$reg),
16642             as_FloatRegister($shift$$reg));
16643   %}
16644   ins_pipe(vshift128);
16645 %}
16646 
16647 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16648   predicate(n->as_Vector()->length() == 4 ||
16649             n->as_Vector()->length() == 8);
16650   match(Set dst (URShiftVB src shift));
16651   ins_cost(INSN_COST);
16652   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16653   ins_encode %{
16654     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16655             as_FloatRegister($src$$reg),
16656             as_FloatRegister($shift$$reg));
16657   %}
16658   ins_pipe(vshift64);
16659 %}
16660 
16661 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16662   predicate(n->as_Vector()->length() == 16);
16663   match(Set dst (URShiftVB src shift));
16664   ins_cost(INSN_COST);
16665   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16666   ins_encode %{
16667     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16668             as_FloatRegister($src$$reg),
16669             as_FloatRegister($shift$$reg));
16670   %}
16671   ins_pipe(vshift128);
16672 %}
16673 
16674 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16675   predicate(n->as_Vector()->length() == 4 ||
16676             n->as_Vector()->length() == 8);
16677   match(Set dst (LShiftVB src shift));
16678   ins_cost(INSN_COST);
16679   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16680   ins_encode %{
16681     int sh = (int)$shift$$constant & 31;
16682     if (sh >= 8) {
16683       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16684              as_FloatRegister($src$$reg),
16685              as_FloatRegister($src$$reg));
16686     } else {
16687       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16688              as_FloatRegister($src$$reg), sh);
16689     }
16690   %}
16691   ins_pipe(vshift64_imm);
16692 %}
16693 
16694 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16695   predicate(n->as_Vector()->length() == 16);
16696   match(Set dst (LShiftVB src shift));
16697   ins_cost(INSN_COST);
16698   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16699   ins_encode %{
16700     int sh = (int)$shift$$constant & 31;
16701     if (sh >= 8) {
16702       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16703              as_FloatRegister($src$$reg),
16704              as_FloatRegister($src$$reg));
16705     } else {
16706       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16707              as_FloatRegister($src$$reg), sh);
16708     }
16709   %}
16710   ins_pipe(vshift128_imm);
16711 %}
16712 
16713 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16714   predicate(n->as_Vector()->length() == 4 ||
16715             n->as_Vector()->length() == 8);
16716   match(Set dst (RShiftVB src shift));
16717   ins_cost(INSN_COST);
16718   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16719   ins_encode %{
16720     int sh = (int)$shift$$constant & 31;
16721     if (sh >= 8) sh = 7;
16722     sh = -sh & 7;
16723     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16724            as_FloatRegister($src$$reg), sh);
16725   %}
16726   ins_pipe(vshift64_imm);
16727 %}
16728 
16729 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16730   predicate(n->as_Vector()->length() == 16);
16731   match(Set dst (RShiftVB src shift));
16732   ins_cost(INSN_COST);
16733   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16734   ins_encode %{
16735     int sh = (int)$shift$$constant & 31;
16736     if (sh >= 8) sh = 7;
16737     sh = -sh & 7;
16738     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16739            as_FloatRegister($src$$reg), sh);
16740   %}
16741   ins_pipe(vshift128_imm);
16742 %}
16743 
16744 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16745   predicate(n->as_Vector()->length() == 4 ||
16746             n->as_Vector()->length() == 8);
16747   match(Set dst (URShiftVB src shift));
16748   ins_cost(INSN_COST);
16749   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16750   ins_encode %{
16751     int sh = (int)$shift$$constant & 31;
16752     if (sh >= 8) {
16753       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16754              as_FloatRegister($src$$reg),
16755              as_FloatRegister($src$$reg));
16756     } else {
16757       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16758              as_FloatRegister($src$$reg), -sh & 7);
16759     }
16760   %}
16761   ins_pipe(vshift64_imm);
16762 %}
16763 
16764 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16765   predicate(n->as_Vector()->length() == 16);
16766   match(Set dst (URShiftVB src shift));
16767   ins_cost(INSN_COST);
16768   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16769   ins_encode %{
16770     int sh = (int)$shift$$constant & 31;
16771     if (sh >= 8) {
16772       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16773              as_FloatRegister($src$$reg),
16774              as_FloatRegister($src$$reg));
16775     } else {
16776       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16777              as_FloatRegister($src$$reg), -sh & 7);
16778     }
16779   %}
16780   ins_pipe(vshift128_imm);
16781 %}
16782 
16783 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16784   predicate(n->as_Vector()->length() == 2 ||
16785             n->as_Vector()->length() == 4);
16786   match(Set dst (LShiftVS src shift));
16787   match(Set dst (RShiftVS src shift));
16788   ins_cost(INSN_COST);
16789   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16790   ins_encode %{
16791     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16792             as_FloatRegister($src$$reg),
16793             as_FloatRegister($shift$$reg));
16794   %}
16795   ins_pipe(vshift64);
16796 %}
16797 
16798 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16799   predicate(n->as_Vector()->length() == 8);
16800   match(Set dst (LShiftVS src shift));
16801   match(Set dst (RShiftVS src shift));
16802   ins_cost(INSN_COST);
16803   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16804   ins_encode %{
16805     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16806             as_FloatRegister($src$$reg),
16807             as_FloatRegister($shift$$reg));
16808   %}
16809   ins_pipe(vshift128);
16810 %}
16811 
16812 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16813   predicate(n->as_Vector()->length() == 2 ||
16814             n->as_Vector()->length() == 4);
16815   match(Set dst (URShiftVS src shift));
16816   ins_cost(INSN_COST);
16817   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16818   ins_encode %{
16819     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16820             as_FloatRegister($src$$reg),
16821             as_FloatRegister($shift$$reg));
16822   %}
16823   ins_pipe(vshift64);
16824 %}
16825 
16826 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16827   predicate(n->as_Vector()->length() == 8);
16828   match(Set dst (URShiftVS src shift));
16829   ins_cost(INSN_COST);
16830   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16831   ins_encode %{
16832     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16833             as_FloatRegister($src$$reg),
16834             as_FloatRegister($shift$$reg));
16835   %}
16836   ins_pipe(vshift128);
16837 %}
16838 
16839 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16840   predicate(n->as_Vector()->length() == 2 ||
16841             n->as_Vector()->length() == 4);
16842   match(Set dst (LShiftVS src shift));
16843   ins_cost(INSN_COST);
16844   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16845   ins_encode %{
16846     int sh = (int)$shift$$constant & 31;
16847     if (sh >= 16) {
16848       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16849              as_FloatRegister($src$$reg),
16850              as_FloatRegister($src$$reg));
16851     } else {
16852       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16853              as_FloatRegister($src$$reg), sh);
16854     }
16855   %}
16856   ins_pipe(vshift64_imm);
16857 %}
16858 
16859 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16860   predicate(n->as_Vector()->length() == 8);
16861   match(Set dst (LShiftVS src shift));
16862   ins_cost(INSN_COST);
16863   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16864   ins_encode %{
16865     int sh = (int)$shift$$constant & 31;
16866     if (sh >= 16) {
16867       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16868              as_FloatRegister($src$$reg),
16869              as_FloatRegister($src$$reg));
16870     } else {
16871       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16872              as_FloatRegister($src$$reg), sh);
16873     }
16874   %}
16875   ins_pipe(vshift128_imm);
16876 %}
16877 
16878 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16879   predicate(n->as_Vector()->length() == 2 ||
16880             n->as_Vector()->length() == 4);
16881   match(Set dst (RShiftVS src shift));
16882   ins_cost(INSN_COST);
16883   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16884   ins_encode %{
16885     int sh = (int)$shift$$constant & 31;
16886     if (sh >= 16) sh = 15;
16887     sh = -sh & 15;
16888     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16889            as_FloatRegister($src$$reg), sh);
16890   %}
16891   ins_pipe(vshift64_imm);
16892 %}
16893 
16894 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16895   predicate(n->as_Vector()->length() == 8);
16896   match(Set dst (RShiftVS src shift));
16897   ins_cost(INSN_COST);
16898   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16899   ins_encode %{
16900     int sh = (int)$shift$$constant & 31;
16901     if (sh >= 16) sh = 15;
16902     sh = -sh & 15;
16903     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16904            as_FloatRegister($src$$reg), sh);
16905   %}
16906   ins_pipe(vshift128_imm);
16907 %}
16908 
16909 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16910   predicate(n->as_Vector()->length() == 2 ||
16911             n->as_Vector()->length() == 4);
16912   match(Set dst (URShiftVS src shift));
16913   ins_cost(INSN_COST);
16914   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16915   ins_encode %{
16916     int sh = (int)$shift$$constant & 31;
16917     if (sh >= 16) {
16918       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16919              as_FloatRegister($src$$reg),
16920              as_FloatRegister($src$$reg));
16921     } else {
16922       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16923              as_FloatRegister($src$$reg), -sh & 15);
16924     }
16925   %}
16926   ins_pipe(vshift64_imm);
16927 %}
16928 
16929 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16930   predicate(n->as_Vector()->length() == 8);
16931   match(Set dst (URShiftVS src shift));
16932   ins_cost(INSN_COST);
16933   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16934   ins_encode %{
16935     int sh = (int)$shift$$constant & 31;
16936     if (sh >= 16) {
16937       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16938              as_FloatRegister($src$$reg),
16939              as_FloatRegister($src$$reg));
16940     } else {
16941       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16942              as_FloatRegister($src$$reg), -sh & 15);
16943     }
16944   %}
16945   ins_pipe(vshift128_imm);
16946 %}
16947 
16948 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16949   predicate(n->as_Vector()->length() == 2);
16950   match(Set dst (LShiftVI src shift));
16951   match(Set dst (RShiftVI src shift));
16952   ins_cost(INSN_COST);
16953   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16954   ins_encode %{
16955     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16956             as_FloatRegister($src$$reg),
16957             as_FloatRegister($shift$$reg));
16958   %}
16959   ins_pipe(vshift64);
16960 %}
16961 
16962 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16963   predicate(n->as_Vector()->length() == 4);
16964   match(Set dst (LShiftVI src shift));
16965   match(Set dst (RShiftVI src shift));
16966   ins_cost(INSN_COST);
16967   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16968   ins_encode %{
16969     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16970             as_FloatRegister($src$$reg),
16971             as_FloatRegister($shift$$reg));
16972   %}
16973   ins_pipe(vshift128);
16974 %}
16975 
16976 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16977   predicate(n->as_Vector()->length() == 2);
16978   match(Set dst (URShiftVI src shift));
16979   ins_cost(INSN_COST);
16980   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16981   ins_encode %{
16982     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16983             as_FloatRegister($src$$reg),
16984             as_FloatRegister($shift$$reg));
16985   %}
16986   ins_pipe(vshift64);
16987 %}
16988 
16989 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16990   predicate(n->as_Vector()->length() == 4);
16991   match(Set dst (URShiftVI src shift));
16992   ins_cost(INSN_COST);
16993   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16994   ins_encode %{
16995     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16996             as_FloatRegister($src$$reg),
16997             as_FloatRegister($shift$$reg));
16998   %}
16999   ins_pipe(vshift128);
17000 %}
17001 
17002 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17003   predicate(n->as_Vector()->length() == 2);
17004   match(Set dst (LShiftVI src shift));
17005   ins_cost(INSN_COST);
17006   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17007   ins_encode %{
17008     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17009            as_FloatRegister($src$$reg),
17010            (int)$shift$$constant & 31);
17011   %}
17012   ins_pipe(vshift64_imm);
17013 %}
17014 
17015 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17016   predicate(n->as_Vector()->length() == 4);
17017   match(Set dst (LShiftVI src shift));
17018   ins_cost(INSN_COST);
17019   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17020   ins_encode %{
17021     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17022            as_FloatRegister($src$$reg),
17023            (int)$shift$$constant & 31);
17024   %}
17025   ins_pipe(vshift128_imm);
17026 %}
17027 
17028 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17029   predicate(n->as_Vector()->length() == 2);
17030   match(Set dst (RShiftVI src shift));
17031   ins_cost(INSN_COST);
17032   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17033   ins_encode %{
17034     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17035             as_FloatRegister($src$$reg),
17036             -(int)$shift$$constant & 31);
17037   %}
17038   ins_pipe(vshift64_imm);
17039 %}
17040 
17041 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17042   predicate(n->as_Vector()->length() == 4);
17043   match(Set dst (RShiftVI src shift));
17044   ins_cost(INSN_COST);
17045   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17046   ins_encode %{
17047     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17048             as_FloatRegister($src$$reg),
17049             -(int)$shift$$constant & 31);
17050   %}
17051   ins_pipe(vshift128_imm);
17052 %}
17053 
17054 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17055   predicate(n->as_Vector()->length() == 2);
17056   match(Set dst (URShiftVI src shift));
17057   ins_cost(INSN_COST);
17058   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17059   ins_encode %{
17060     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17061             as_FloatRegister($src$$reg),
17062             -(int)$shift$$constant & 31);
17063   %}
17064   ins_pipe(vshift64_imm);
17065 %}
17066 
17067 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17068   predicate(n->as_Vector()->length() == 4);
17069   match(Set dst (URShiftVI src shift));
17070   ins_cost(INSN_COST);
17071   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17072   ins_encode %{
17073     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17074             as_FloatRegister($src$$reg),
17075             -(int)$shift$$constant & 31);
17076   %}
17077   ins_pipe(vshift128_imm);
17078 %}
17079 
17080 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17081   predicate(n->as_Vector()->length() == 2);
17082   match(Set dst (LShiftVL src shift));
17083   match(Set dst (RShiftVL src shift));
17084   ins_cost(INSN_COST);
17085   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17086   ins_encode %{
17087     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17088             as_FloatRegister($src$$reg),
17089             as_FloatRegister($shift$$reg));
17090   %}
17091   ins_pipe(vshift128);
17092 %}
17093 
17094 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
17095   predicate(n->as_Vector()->length() == 2);
17096   match(Set dst (URShiftVL src shift));
17097   ins_cost(INSN_COST);
17098   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
17099   ins_encode %{
17100     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17101             as_FloatRegister($src$$reg),
17102             as_FloatRegister($shift$$reg));
17103   %}
17104   ins_pipe(vshift128);
17105 %}
17106 
17107 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17108   predicate(n->as_Vector()->length() == 2);
17109   match(Set dst (LShiftVL src shift));
17110   ins_cost(INSN_COST);
17111   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17112   ins_encode %{
17113     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17114            as_FloatRegister($src$$reg),
17115            (int)$shift$$constant & 63);
17116   %}
17117   ins_pipe(vshift128_imm);
17118 %}
17119 
17120 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17121   predicate(n->as_Vector()->length() == 2);
17122   match(Set dst (RShiftVL src shift));
17123   ins_cost(INSN_COST);
17124   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17125   ins_encode %{
17126     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17127             as_FloatRegister($src$$reg),
17128             -(int)$shift$$constant & 63);
17129   %}
17130   ins_pipe(vshift128_imm);
17131 %}
17132 
17133 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17134   predicate(n->as_Vector()->length() == 2);
17135   match(Set dst (URShiftVL src shift));
17136   ins_cost(INSN_COST);
17137   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17138   ins_encode %{
17139     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17140             as_FloatRegister($src$$reg),
17141             -(int)$shift$$constant & 63);
17142   %}
17143   ins_pipe(vshift128_imm);
17144 %}
17145 
17146 //----------PEEPHOLE RULES-----------------------------------------------------
17147 // These must follow all instruction definitions as they use the names
17148 // defined in the instructions definitions.
17149 //
17150 // peepmatch ( root_instr_name [preceding_instruction]* );
17151 //
17152 // peepconstraint %{
17153 // (instruction_number.operand_name relational_op instruction_number.operand_name
17154 //  [, ...] );
17155 // // instruction numbers are zero-based using left to right order in peepmatch
17156 //
17157 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17158 // // provide an instruction_number.operand_name for each operand that appears
17159 // // in the replacement instruction's match rule
17160 //
17161 // ---------VM FLAGS---------------------------------------------------------
17162 //
17163 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17164 //
17165 // Each peephole rule is given an identifying number starting with zero and
17166 // increasing by one in the order seen by the parser.  An individual peephole
17167 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17168 // on the command-line.
17169 //
17170 // ---------CURRENT LIMITATIONS----------------------------------------------
17171 //
17172 // Only match adjacent instructions in same basic block
17173 // Only equality constraints
17174 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17175 // Only one replacement instruction
17176 //
17177 // ---------EXAMPLE----------------------------------------------------------
17178 //
17179 // // pertinent parts of existing instructions in architecture description
17180 // instruct movI(iRegINoSp dst, iRegI src)
17181 // %{
17182 //   match(Set dst (CopyI src));
17183 // %}
17184 //
17185 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17186 // %{
17187 //   match(Set dst (AddI dst src));
17188 //   effect(KILL cr);
17189 // %}
17190 //
17191 // // Change (inc mov) to lea
17192 // peephole %{
17193 //   // increment preceeded by register-register move
17194 //   peepmatch ( incI_iReg movI );
17195 //   // require that the destination register of the increment
17196 //   // match the destination register of the move
17197 //   peepconstraint ( 0.dst == 1.dst );
17198 //   // construct a replacement instruction that sets
17199 //   // the destination to ( move's source register + one )
17200 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17201 // %}
17202 //
17203 
17204 // Implementation no longer uses movX instructions since
17205 // machine-independent system no longer uses CopyX nodes.
17206 //
17207 // peephole
17208 // %{
17209 //   peepmatch (incI_iReg movI);
17210 //   peepconstraint (0.dst == 1.dst);
17211 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17212 // %}
17213 
17214 // peephole
17215 // %{
17216 //   peepmatch (decI_iReg movI);
17217 //   peepconstraint (0.dst == 1.dst);
17218 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17219 // %}
17220 
17221 // peephole
17222 // %{
17223 //   peepmatch (addI_iReg_imm movI);
17224 //   peepconstraint (0.dst == 1.dst);
17225 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17226 // %}
17227 
17228 // peephole
17229 // %{
17230 //   peepmatch (incL_iReg movL);
17231 //   peepconstraint (0.dst == 1.dst);
17232 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17233 // %}
17234 
17235 // peephole
17236 // %{
17237 //   peepmatch (decL_iReg movL);
17238 //   peepconstraint (0.dst == 1.dst);
17239 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17240 // %}
17241 
17242 // peephole
17243 // %{
17244 //   peepmatch (addL_iReg_imm movL);
17245 //   peepconstraint (0.dst == 1.dst);
17246 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17247 // %}
17248 
17249 // peephole
17250 // %{
17251 //   peepmatch (addP_iReg_imm movP);
17252 //   peepconstraint (0.dst == 1.dst);
17253 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17254 // %}
17255 
17256 // // Change load of spilled value to only a spill
17257 // instruct storeI(memory mem, iRegI src)
17258 // %{
17259 //   match(Set mem (StoreI mem src));
17260 // %}
17261 //
17262 // instruct loadI(iRegINoSp dst, memory mem)
17263 // %{
17264 //   match(Set dst (LoadI mem));
17265 // %}
17266 //
17267 
17268 //----------SMARTSPILL RULES---------------------------------------------------
17269 // These must follow all instruction definitions as they use the names
17270 // defined in the instructions definitions.
17271 
17272 // Local Variables:
17273 // mode: c++
17274 // End: