1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   // graph traversal helpers
1040 
1041   MemBarNode *parent_membar(const Node *n);
1042   MemBarNode *child_membar(const MemBarNode *n);
1043   bool leading_membar(const MemBarNode *barrier);
1044 
1045   bool is_card_mark_membar(const MemBarNode *barrier);
1046   bool is_CAS(int opcode);
1047 
1048   MemBarNode *leading_to_normal(MemBarNode *leading);
1049   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1050   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1051   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1052   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1053 
1054   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1055 
1056   bool unnecessary_acquire(const Node *barrier);
1057   bool needs_acquiring_load(const Node *load);
1058 
1059   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1060 
1061   bool unnecessary_release(const Node *barrier);
1062   bool unnecessary_volatile(const Node *barrier);
1063   bool needs_releasing_store(const Node *store);
1064 
1065   // predicate controlling translation of CompareAndSwapX
1066   bool needs_acquiring_load_exclusive(const Node *load);
1067 
1068   // predicate controlling translation of StoreCM
1069   bool unnecessary_storestore(const Node *storecm);
1070 
1071   // predicate controlling addressing modes
1072   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1073 %}
1074 
1075 source %{
1076 
1077   // Optimizaton of volatile gets and puts
1078   // -------------------------------------
1079   //
1080   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1081   // use to implement volatile reads and writes. For a volatile read
1082   // we simply need
1083   //
1084   //   ldar<x>
1085   //
1086   // and for a volatile write we need
1087   //
1088   //   stlr<x>
1089   //
1090   // Alternatively, we can implement them by pairing a normal
1091   // load/store with a memory barrier. For a volatile read we need
1092   //
1093   //   ldr<x>
1094   //   dmb ishld
1095   //
1096   // for a volatile write
1097   //
1098   //   dmb ish
1099   //   str<x>
1100   //   dmb ish
1101   //
1102   // We can also use ldaxr and stlxr to implement compare and swap CAS
1103   // sequences. These are normally translated to an instruction
1104   // sequence like the following
1105   //
1106   //   dmb      ish
1107   // retry:
1108   //   ldxr<x>   rval raddr
1109   //   cmp       rval rold
1110   //   b.ne done
1111   //   stlxr<x>  rval, rnew, rold
1112   //   cbnz      rval retry
1113   // done:
1114   //   cset      r0, eq
1115   //   dmb ishld
1116   //
1117   // Note that the exclusive store is already using an stlxr
1118   // instruction. That is required to ensure visibility to other
1119   // threads of the exclusive write (assuming it succeeds) before that
1120   // of any subsequent writes.
1121   //
1122   // The following instruction sequence is an improvement on the above
1123   //
1124   // retry:
1125   //   ldaxr<x>  rval raddr
1126   //   cmp       rval rold
1127   //   b.ne done
1128   //   stlxr<x>  rval, rnew, rold
1129   //   cbnz      rval retry
1130   // done:
1131   //   cset      r0, eq
1132   //
1133   // We don't need the leading dmb ish since the stlxr guarantees
1134   // visibility of prior writes in the case that the swap is
1135   // successful. Crucially we don't have to worry about the case where
1136   // the swap is not successful since no valid program should be
1137   // relying on visibility of prior changes by the attempting thread
1138   // in the case where the CAS fails.
1139   //
1140   // Similarly, we don't need the trailing dmb ishld if we substitute
1141   // an ldaxr instruction since that will provide all the guarantees we
1142   // require regarding observation of changes made by other threads
1143   // before any change to the CAS address observed by the load.
1144   //
1145   // In order to generate the desired instruction sequence we need to
1146   // be able to identify specific 'signature' ideal graph node
1147   // sequences which i) occur as a translation of a volatile reads or
1148   // writes or CAS operations and ii) do not occur through any other
1149   // translation or graph transformation. We can then provide
1150   // alternative aldc matching rules which translate these node
1151   // sequences to the desired machine code sequences. Selection of the
1152   // alternative rules can be implemented by predicates which identify
1153   // the relevant node sequences.
1154   //
1155   // The ideal graph generator translates a volatile read to the node
1156   // sequence
1157   //
1158   //   LoadX[mo_acquire]
1159   //   MemBarAcquire
1160   //
1161   // As a special case when using the compressed oops optimization we
1162   // may also see this variant
1163   //
1164   //   LoadN[mo_acquire]
1165   //   DecodeN
1166   //   MemBarAcquire
1167   //
1168   // A volatile write is translated to the node sequence
1169   //
1170   //   MemBarRelease
1171   //   StoreX[mo_release] {CardMark}-optional
1172   //   MemBarVolatile
1173   //
1174   // n.b. the above node patterns are generated with a strict
1175   // 'signature' configuration of input and output dependencies (see
1176   // the predicates below for exact details). The card mark may be as
1177   // simple as a few extra nodes or, in a few GC configurations, may
1178   // include more complex control flow between the leading and
1179   // trailing memory barriers. However, whatever the card mark
1180   // configuration these signatures are unique to translated volatile
1181   // reads/stores -- they will not appear as a result of any other
1182   // bytecode translation or inlining nor as a consequence of
1183   // optimizing transforms.
1184   //
1185   // We also want to catch inlined unsafe volatile gets and puts and
1186   // be able to implement them using either ldar<x>/stlr<x> or some
1187   // combination of ldr<x>/stlr<x> and dmb instructions.
1188   //
1189   // Inlined unsafe volatiles puts manifest as a minor variant of the
1190   // normal volatile put node sequence containing an extra cpuorder
1191   // membar
1192   //
1193   //   MemBarRelease
1194   //   MemBarCPUOrder
1195   //   StoreX[mo_release] {CardMark}-optional
1196   //   MemBarCPUOrder
1197   //   MemBarVolatile
1198   //
1199   // n.b. as an aside, a cpuorder membar is not itself subject to
1200   // matching and translation by adlc rules.  However, the rule
1201   // predicates need to detect its presence in order to correctly
1202   // select the desired adlc rules.
1203   //
1204   // Inlined unsafe volatile gets manifest as a slightly different
1205   // node sequence to a normal volatile get because of the
1206   // introduction of some CPUOrder memory barriers to bracket the
1207   // Load. However, but the same basic skeleton of a LoadX feeding a
1208   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1209   // present
1210   //
1211   //   MemBarCPUOrder
1212   //        ||       \\
1213   //   MemBarCPUOrder LoadX[mo_acquire]
1214   //        ||            |
1215   //        ||       {DecodeN} optional
1216   //        ||       /
1217   //     MemBarAcquire
1218   //
1219   // In this case the acquire membar does not directly depend on the
1220   // load. However, we can be sure that the load is generated from an
1221   // inlined unsafe volatile get if we see it dependent on this unique
1222   // sequence of membar nodes. Similarly, given an acquire membar we
1223   // can know that it was added because of an inlined unsafe volatile
1224   // get if it is fed and feeds a cpuorder membar and if its feed
1225   // membar also feeds an acquiring load.
1226   //
1227   // Finally an inlined (Unsafe) CAS operation is translated to the
1228   // following ideal graph
1229   //
1230   //   MemBarRelease
1231   //   MemBarCPUOrder
1232   //   CompareAndSwapX {CardMark}-optional
1233   //   MemBarCPUOrder
1234   //   MemBarAcquire
1235   //
1236   // So, where we can identify these volatile read and write
1237   // signatures we can choose to plant either of the above two code
1238   // sequences. For a volatile read we can simply plant a normal
1239   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1240   // also choose to inhibit translation of the MemBarAcquire and
1241   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1242   //
1243   // When we recognise a volatile store signature we can choose to
1244   // plant at a dmb ish as a translation for the MemBarRelease, a
1245   // normal str<x> and then a dmb ish for the MemBarVolatile.
1246   // Alternatively, we can inhibit translation of the MemBarRelease
1247   // and MemBarVolatile and instead plant a simple stlr<x>
1248   // instruction.
1249   //
1250   // when we recognise a CAS signature we can choose to plant a dmb
1251   // ish as a translation for the MemBarRelease, the conventional
1252   // macro-instruction sequence for the CompareAndSwap node (which
1253   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1254   // Alternatively, we can elide generation of the dmb instructions
1255   // and plant the alternative CompareAndSwap macro-instruction
1256   // sequence (which uses ldaxr<x>).
1257   //
1258   // Of course, the above only applies when we see these signature
1259   // configurations. We still want to plant dmb instructions in any
1260   // other cases where we may see a MemBarAcquire, MemBarRelease or
1261   // MemBarVolatile. For example, at the end of a constructor which
1262   // writes final/volatile fields we will see a MemBarRelease
1263   // instruction and this needs a 'dmb ish' lest we risk the
1264   // constructed object being visible without making the
1265   // final/volatile field writes visible.
1266   //
1267   // n.b. the translation rules below which rely on detection of the
1268   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1269   // If we see anything other than the signature configurations we
1270   // always just translate the loads and stores to ldr<x> and str<x>
1271   // and translate acquire, release and volatile membars to the
1272   // relevant dmb instructions.
1273   //
1274 
1275   // graph traversal helpers used for volatile put/get and CAS
1276   // optimization
1277 
1278   // 1) general purpose helpers
1279 
1280   // if node n is linked to a parent MemBarNode by an intervening
1281   // Control and Memory ProjNode return the MemBarNode otherwise return
1282   // NULL.
1283   //
1284   // n may only be a Load or a MemBar.
1285 
1286   MemBarNode *parent_membar(const Node *n)
1287   {
1288     Node *ctl = NULL;
1289     Node *mem = NULL;
1290     Node *membar = NULL;
1291 
1292     if (n->is_Load()) {
1293       ctl = n->lookup(LoadNode::Control);
1294       mem = n->lookup(LoadNode::Memory);
1295     } else if (n->is_MemBar()) {
1296       ctl = n->lookup(TypeFunc::Control);
1297       mem = n->lookup(TypeFunc::Memory);
1298     } else {
1299         return NULL;
1300     }
1301 
1302     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1303       return NULL;
1304     }
1305 
1306     membar = ctl->lookup(0);
1307 
1308     if (!membar || !membar->is_MemBar()) {
1309       return NULL;
1310     }
1311 
1312     if (mem->lookup(0) != membar) {
1313       return NULL;
1314     }
1315 
1316     return membar->as_MemBar();
1317   }
1318 
1319   // if n is linked to a child MemBarNode by intervening Control and
1320   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1321 
1322   MemBarNode *child_membar(const MemBarNode *n)
1323   {
1324     ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
1325     ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
1326 
1327     // MemBar needs to have both a Ctl and Mem projection
1328     if (! ctl || ! mem)
1329       return NULL;
1330 
1331     MemBarNode *child = NULL;
1332     Node *x;
1333 
1334     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1335       x = ctl->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x->is_MemBar()) {
1339           child = x->as_MemBar();
1340           break;
1341       }
1342     }
1343 
1344     if (child == NULL) {
1345       return NULL;
1346     }
1347 
1348     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1349       x = mem->fast_out(i);
1350       // if we see a membar we keep hold of it. we may also see a new
1351       // arena copy of the original but it will appear later
1352       if (x == child) {
1353         return child;
1354       }
1355     }
1356     return NULL;
1357   }
1358 
1359   // helper predicate use to filter candidates for a leading memory
1360   // barrier
1361   //
1362   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1363   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1364 
1365   bool leading_membar(const MemBarNode *barrier)
1366   {
1367     int opcode = barrier->Opcode();
1368     // if this is a release membar we are ok
1369     if (opcode == Op_MemBarRelease) {
1370       return true;
1371     }
1372     // if its a cpuorder membar . . .
1373     if (opcode != Op_MemBarCPUOrder) {
1374       return false;
1375     }
1376     // then the parent has to be a release membar
1377     MemBarNode *parent = parent_membar(barrier);
1378     if (!parent) {
1379       return false;
1380     }
1381     opcode = parent->Opcode();
1382     return opcode == Op_MemBarRelease;
1383   }
1384 
1385   // 2) card mark detection helper
1386 
1387   // helper predicate which can be used to detect a volatile membar
1388   // introduced as part of a conditional card mark sequence either by
1389   // G1 or by CMS when UseCondCardMark is true.
1390   //
1391   // membar can be definitively determined to be part of a card mark
1392   // sequence if and only if all the following hold
1393   //
1394   // i) it is a MemBarVolatile
1395   //
1396   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1397   // true
1398   //
1399   // iii) the node's Mem projection feeds a StoreCM node.
1400 
1401   bool is_card_mark_membar(const MemBarNode *barrier)
1402   {
1403     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1404       return false;
1405     }
1406 
1407     if (barrier->Opcode() != Op_MemBarVolatile) {
1408       return false;
1409     }
1410 
1411     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1412 
1413     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1414       Node *y = mem->fast_out(i);
1415       if (y->Opcode() == Op_StoreCM) {
1416         return true;
1417       }
1418     }
1419 
1420     return false;
1421   }
1422 
1423 
1424   // 3) helper predicates to traverse volatile put or CAS graphs which
1425   // may contain GC barrier subgraphs
1426 
1427   // Preamble
1428   // --------
1429   //
1430   // for volatile writes we can omit generating barriers and employ a
1431   // releasing store when we see a node sequence sequence with a
1432   // leading MemBarRelease and a trailing MemBarVolatile as follows
1433   //
1434   //   MemBarRelease
1435   //  {      ||      } -- optional
1436   //  {MemBarCPUOrder}
1437   //         ||     \\
1438   //         ||     StoreX[mo_release]
1439   //         | \     /
1440   //         | MergeMem
1441   //         | /
1442   //  {MemBarCPUOrder} -- optional
1443   //  {      ||      }
1444   //   MemBarVolatile
1445   //
1446   // where
1447   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1448   //  | \ and / indicate further routing of the Ctl and Mem feeds
1449   //
1450   // this is the graph we see for non-object stores. however, for a
1451   // volatile Object store (StoreN/P) we may see other nodes below the
1452   // leading membar because of the need for a GC pre- or post-write
1453   // barrier.
1454   //
1455   // with most GC configurations we with see this simple variant which
1456   // includes a post-write barrier card mark.
1457   //
1458   //   MemBarRelease______________________________
1459   //         ||    \\               Ctl \        \\
1460   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1461   //         | \     /                       . . .  /
1462   //         | MergeMem
1463   //         | /
1464   //         ||      /
1465   //  {MemBarCPUOrder} -- optional
1466   //  {      ||      }
1467   //   MemBarVolatile
1468   //
1469   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1470   // the object address to an int used to compute the card offset) and
1471   // Ctl+Mem to a StoreB node (which does the actual card mark).
1472   //
1473   // n.b. a StoreCM node will only appear in this configuration when
1474   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1475   // because it implies a requirement to order visibility of the card
1476   // mark (StoreCM) relative to the object put (StoreP/N) using a
1477   // StoreStore memory barrier (arguably this ought to be represented
1478   // explicitly in the ideal graph but that is not how it works). This
1479   // ordering is required for both non-volatile and volatile
1480   // puts. Normally that means we need to translate a StoreCM using
1481   // the sequence
1482   //
1483   //   dmb ishst
1484   //   stlrb
1485   //
1486   // However, in the case of a volatile put if we can recognise this
1487   // configuration and plant an stlr for the object write then we can
1488   // omit the dmb and just plant an strb since visibility of the stlr
1489   // is ordered before visibility of subsequent stores. StoreCM nodes
1490   // also arise when using G1 or using CMS with conditional card
1491   // marking. In these cases (as we shall see) we don't need to insert
1492   // the dmb when translating StoreCM because there is already an
1493   // intervening StoreLoad barrier between it and the StoreP/N.
1494   //
1495   // It is also possible to perform the card mark conditionally on it
1496   // currently being unmarked in which case the volatile put graph
1497   // will look slightly different
1498   //
1499   //   MemBarRelease____________________________________________
1500   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1501   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1502   //         | \     /                              \            |
1503   //         | MergeMem                            . . .      StoreB
1504   //         | /                                                /
1505   //         ||     /
1506   //   MemBarVolatile
1507   //
1508   // It is worth noting at this stage that both the above
1509   // configurations can be uniquely identified by checking that the
1510   // memory flow includes the following subgraph:
1511   //
1512   //   MemBarRelease
1513   //  {MemBarCPUOrder}
1514   //          |  \      . . .
1515   //          |  StoreX[mo_release]  . . .
1516   //          |   /
1517   //         MergeMem
1518   //          |
1519   //  {MemBarCPUOrder}
1520   //   MemBarVolatile
1521   //
1522   // This is referred to as a *normal* subgraph. It can easily be
1523   // detected starting from any candidate MemBarRelease,
1524   // StoreX[mo_release] or MemBarVolatile.
1525   //
1526   // A simple variation on this normal case occurs for an unsafe CAS
1527   // operation. The basic graph for a non-object CAS is
1528   //
1529   //   MemBarRelease
1530   //         ||
1531   //   MemBarCPUOrder
1532   //         ||     \\   . . .
1533   //         ||     CompareAndSwapX
1534   //         ||       |
1535   //         ||     SCMemProj
1536   //         | \     /
1537   //         | MergeMem
1538   //         | /
1539   //   MemBarCPUOrder
1540   //         ||
1541   //   MemBarAcquire
1542   //
1543   // The same basic variations on this arrangement (mutatis mutandis)
1544   // occur when a card mark is introduced. i.e. we se the same basic
1545   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1546   // tail of the graph is a pair comprising a MemBarCPUOrder +
1547   // MemBarAcquire.
1548   //
1549   // So, in the case of a CAS the normal graph has the variant form
1550   //
1551   //   MemBarRelease
1552   //   MemBarCPUOrder
1553   //          |   \      . . .
1554   //          |  CompareAndSwapX  . . .
1555   //          |    |
1556   //          |   SCMemProj
1557   //          |   /  . . .
1558   //         MergeMem
1559   //          |
1560   //   MemBarCPUOrder
1561   //   MemBarAcquire
1562   //
1563   // This graph can also easily be detected starting from any
1564   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1565   //
1566   // the code below uses two helper predicates, leading_to_normal and
1567   // normal_to_leading to identify these normal graphs, one validating
1568   // the layout starting from the top membar and searching down and
1569   // the other validating the layout starting from the lower membar
1570   // and searching up.
1571   //
1572   // There are two special case GC configurations when a normal graph
1573   // may not be generated: when using G1 (which always employs a
1574   // conditional card mark); and when using CMS with conditional card
1575   // marking configured. These GCs are both concurrent rather than
1576   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1577   // graph between the leading and trailing membar nodes, in
1578   // particular enforcing stronger memory serialisation beween the
1579   // object put and the corresponding conditional card mark. CMS
1580   // employs a post-write GC barrier while G1 employs both a pre- and
1581   // post-write GC barrier. Of course the extra nodes may be absent --
1582   // they are only inserted for object puts/swaps. This significantly
1583   // complicates the task of identifying whether a MemBarRelease,
1584   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1585   // when using these GC configurations (see below). It adds similar
1586   // complexity to the task of identifying whether a MemBarRelease,
1587   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1588   //
1589   // In both cases the post-write subtree includes an auxiliary
1590   // MemBarVolatile (StoreLoad barrier) separating the object put/swap
1591   // and the read of the corresponding card. This poses two additional
1592   // problems.
1593   //
1594   // Firstly, a card mark MemBarVolatile needs to be distinguished
1595   // from a normal trailing MemBarVolatile. Resolving this first
1596   // problem is straightforward: a card mark MemBarVolatile always
1597   // projects a Mem feed to a StoreCM node and that is a unique marker
1598   //
1599   //      MemBarVolatile (card mark)
1600   //       C |    \     . . .
1601   //         |   StoreCM   . . .
1602   //       . . .
1603   //
1604   // The second problem is how the code generator is to translate the
1605   // card mark barrier? It always needs to be translated to a "dmb
1606   // ish" instruction whether or not it occurs as part of a volatile
1607   // put. A StoreLoad barrier is needed after the object put to ensure
1608   // i) visibility to GC threads of the object put and ii) visibility
1609   // to the mutator thread of any card clearing write by a GC
1610   // thread. Clearly a normal store (str) will not guarantee this
1611   // ordering but neither will a releasing store (stlr). The latter
1612   // guarantees that the object put is visible but does not guarantee
1613   // that writes by other threads have also been observed.
1614   //
1615   // So, returning to the task of translating the object put and the
1616   // leading/trailing membar nodes: what do the non-normal node graph
1617   // look like for these 2 special cases? and how can we determine the
1618   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1619   // in both normal and non-normal cases?
1620   //
1621   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1622   // which selects conditonal execution based on the value loaded
1623   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1624   // intervening StoreLoad barrier (MemBarVolatile).
1625   //
1626   // So, with CMS we may see a node graph for a volatile object store
1627   // which looks like this
1628   //
1629   //   MemBarRelease
1630   //  {MemBarCPUOrder}_(leading)_________________
1631   //     C |    M \       \\                   C \
1632   //       |       \    StoreN/P[mo_release]  CastP2X
1633   //       |    Bot \    /
1634   //       |       MergeMem
1635   //       |         /
1636   //      MemBarVolatile (card mark)
1637   //     C |  ||    M |
1638   //       | LoadB    |
1639   //       |   |      |
1640   //       | Cmp      |\
1641   //       | /        | \
1642   //       If         |  \
1643   //       | \        |   \
1644   // IfFalse  IfTrue  |    \
1645   //       \     / \  |     \
1646   //        \   / StoreCM    |
1647   //         \ /      |      |
1648   //        Region   . . .   |
1649   //          | \           /
1650   //          |  . . .  \  / Bot
1651   //          |       MergeMem
1652   //          |          |
1653   //       {MemBarCPUOrder}
1654   //        MemBarVolatile (trailing)
1655   //
1656   // The first MergeMem merges the AliasIdxBot Mem slice from the
1657   // leading membar and the oopptr Mem slice from the Store into the
1658   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1659   // Mem slice from the card mark membar and the AliasIdxRaw slice
1660   // from the StoreCM into the trailing membar (n.b. the latter
1661   // proceeds via a Phi associated with the If region).
1662   //
1663   // The graph for a CAS varies slightly, the difference being
1664   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1665   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1666   // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
1667   //
1668   //   MemBarRelease
1669   //   MemBarCPUOrder_(leading)_______________
1670   //     C |    M \       \\                C \
1671   //       |       \    CompareAndSwapN/P  CastP2X
1672   //       |        \      |
1673   //       |         \   SCMemProj
1674   //       |      Bot \   /
1675   //       |        MergeMem
1676   //       |         /
1677   //      MemBarVolatile (card mark)
1678   //     C |  ||    M |
1679   //       | LoadB    |
1680   //       |   |      |
1681   //       | Cmp      |\
1682   //       | /        | \
1683   //       If         |  \
1684   //       | \        |   \
1685   // IfFalse  IfTrue  |    \
1686   //       \     / \  |     \
1687   //        \   / StoreCM    |
1688   //         \ /      |      |
1689   //        Region   . . .   |
1690   //          | \           /
1691   //          |  . . .  \  / Bot
1692   //          |       MergeMem
1693   //          |          |
1694   //        MemBarCPUOrder
1695   //        MemBarVolatile (trailing)
1696   //
1697   //
1698   // G1 is quite a lot more complicated. The nodes inserted on behalf
1699   // of G1 may comprise: a pre-write graph which adds the old value to
1700   // the SATB queue; the releasing store itself; and, finally, a
1701   // post-write graph which performs a card mark.
1702   //
1703   // The pre-write graph may be omitted, but only when the put is
1704   // writing to a newly allocated (young gen) object and then only if
1705   // there is a direct memory chain to the Initialize node for the
1706   // object allocation. This will not happen for a volatile put since
1707   // any memory chain passes through the leading membar.
1708   //
1709   // The pre-write graph includes a series of 3 If tests. The outermost
1710   // If tests whether SATB is enabled (no else case). The next If tests
1711   // whether the old value is non-NULL (no else case). The third tests
1712   // whether the SATB queue index is > 0, if so updating the queue. The
1713   // else case for this third If calls out to the runtime to allocate a
1714   // new queue buffer.
1715   //
1716   // So with G1 the pre-write and releasing store subgraph looks like
1717   // this (the nested Ifs are omitted).
1718   //
1719   //  MemBarRelease
1720   // {MemBarCPUOrder}_(leading)___________
1721   //     C |  ||  M \   M \    M \  M \ . . .
1722   //       | LoadB   \  LoadL  LoadN   \
1723   //       | /        \                 \
1724   //       If         |\                 \
1725   //       | \        | \                 \
1726   //  IfFalse  IfTrue |  \                 \
1727   //       |     |    |   \                 |
1728   //       |     If   |   /\                |
1729   //       |     |          \               |
1730   //       |                 \              |
1731   //       |    . . .         \             |
1732   //       | /       | /       |            |
1733   //      Region  Phi[M]       |            |
1734   //       | \       |         |            |
1735   //       |  \_____ | ___     |            |
1736   //     C | C \     |   C \ M |            |
1737   //       | CastP2X | StoreN/P[mo_release] |
1738   //       |         |         |            |
1739   //     C |       M |       M |          M |
1740   //        \        |         |           /
1741   //                  . . .
1742   //          (post write subtree elided)
1743   //                    . . .
1744   //             C \         M /
1745   //                \         /
1746   //             {MemBarCPUOrder}
1747   //              MemBarVolatile (trailing)
1748   //
1749   // n.b. the LoadB in this subgraph is not the card read -- it's a
1750   // read of the SATB queue active flag.
1751   //
1752   // The G1 post-write subtree is also optional, this time when the
1753   // new value being written is either null or can be identified as a
1754   // newly allocated (young gen) object with no intervening control
1755   // flow. The latter cannot happen but the former may, in which case
1756   // the card mark membar is omitted and the memory feeds form the
1757   // leading membar and the SToreN/P are merged direct into the
1758   // trailing membar as per the normal subgraph. So, the only special
1759   // case which arises is when the post-write subgraph is generated.
1760   //
1761   // The kernel of the post-write G1 subgraph is the card mark itself
1762   // which includes a card mark memory barrier (MemBarVolatile), a
1763   // card test (LoadB), and a conditional update (If feeding a
1764   // StoreCM). These nodes are surrounded by a series of nested Ifs
1765   // which try to avoid doing the card mark. The top level If skips if
1766   // the object reference does not cross regions (i.e. it tests if
1767   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1768   // need not be recorded. The next If, which skips on a NULL value,
1769   // may be absent (it is not generated if the type of value is >=
1770   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1771   // checking if card_val != young).  n.b. although this test requires
1772   // a pre-read of the card it can safely be done before the StoreLoad
1773   // barrier. However that does not bypass the need to reread the card
1774   // after the barrier. A final, 4th If tests if the card is already
1775   // marked.
1776   //
1777   //                (pre-write subtree elided)
1778   //        . . .                  . . .    . . .  . . .
1779   //        C |                    M |     M |    M |
1780   //       Region                  Phi[M] StoreN    |
1781   //          |                     / \      |      |
1782   //         / \_______            /   \     |      |
1783   //      C / C \      . . .            \    |      |
1784   //       If   CastP2X . . .            |   |      |
1785   //       / \                           |   |      |
1786   //      /   \                          |   |      |
1787   // IfFalse IfTrue                      |   |      |
1788   //   |       |                         |   |     /|
1789   //   |       If                        |   |    / |
1790   //   |      / \                        |   |   /  |
1791   //   |     /   \                        \  |  /   |
1792   //   | IfFalse IfTrue                   MergeMem  |
1793   //   |  . . .    / \                       /      |
1794   //   |          /   \                     /       |
1795   //   |     IfFalse IfTrue                /        |
1796   //   |      . . .    |                  /         |
1797   //   |               If                /          |
1798   //   |               / \              /           |
1799   //   |              /   \            /            |
1800   //   |         IfFalse IfTrue       /             |
1801   //   |           . . .   |         /              |
1802   //   |                    \       /               |
1803   //   |                     \     /                |
1804   //   |             MemBarVolatile__(card mark)    |
1805   //   |                ||   C |  M \  M \          |
1806   //   |               LoadB   If    |    |         |
1807   //   |                      / \    |    |         |
1808   //   |                     . . .   |    |         |
1809   //   |                          \  |    |        /
1810   //   |                        StoreCM   |       /
1811   //   |                          . . .   |      /
1812   //   |                        _________/      /
1813   //   |                       /  _____________/
1814   //   |   . . .       . . .  |  /            /
1815   //   |    |                 | /   _________/
1816   //   |    |               Phi[M] /        /
1817   //   |    |                 |   /        /
1818   //   |    |                 |  /        /
1819   //   |  Region  . . .     Phi[M]  _____/
1820   //   |    /                 |    /
1821   //   |                      |   /
1822   //   | . . .   . . .        |  /
1823   //   | /                    | /
1824   // Region           |  |  Phi[M]
1825   //   |              |  |  / Bot
1826   //    \            MergeMem
1827   //     \            /
1828   //    {MemBarCPUOrder}
1829   //     MemBarVolatile
1830   //
1831   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1832   // from the leading membar and the oopptr Mem slice from the Store
1833   // into the card mark membar i.e. the memory flow to the card mark
1834   // membar still looks like a normal graph.
1835   //
1836   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1837   // Mem slices (from the StoreCM and other card mark queue stores).
1838   // However in this case the AliasIdxBot Mem slice does not come
1839   // direct from the card mark membar. It is merged through a series
1840   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1841   // from the leading membar with the Mem feed from the card mark
1842   // membar. Each Phi corresponds to one of the Ifs which may skip
1843   // around the card mark membar. So when the If implementing the NULL
1844   // value check has been elided the total number of Phis is 2
1845   // otherwise it is 3.
1846   //
1847   // The CAS graph when using G1GC also includes a pre-write subgraph
1848   // and an optional post-write subgraph. The same variations are
1849   // introduced as for CMS with conditional card marking i.e. the
1850   // StoreP/N is swapped for a CompareAndSwapP/N with a following
1851   // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
1852   // MemBarAcquire pair. There may be an extra If test introduced in
1853   // the CAS case, when the boolean result of the CAS is tested by the
1854   // caller. In that case an extra Region and AliasIdxBot Phi may be
1855   // introduced before the MergeMem
1856   //
1857   // So, the upshot is that in all cases the subgraph will include a
1858   // *normal* memory subgraph betwen the leading membar and its child
1859   // membar: either a normal volatile put graph including a releasing
1860   // StoreX and terminating with a trailing volatile membar or card
1861   // mark volatile membar; or a normal CAS graph including a
1862   // CompareAndSwapX + SCMemProj pair and terminating with a card mark
1863   // volatile membar or a trailing cpu order and acquire membar
1864   // pair. If the child membar is not a (volatile) card mark membar
1865   // then it marks the end of the volatile put or CAS subgraph. If the
1866   // child is a card mark membar then the normal subgraph will form
1867   // part of a larger volatile put or CAS subgraph if and only if the
1868   // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
1869   // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
1870   // Phi nodes merging the leading barrier memory flow (for G1).
1871   //
1872   // The predicates controlling generation of instructions for store
1873   // and barrier nodes employ a few simple helper functions (described
1874   // below) which identify the presence or absence of all these
1875   // subgraph configurations and provide a means of traversing from
1876   // one node in the subgraph to another.
1877 
1878   // is_CAS(int opcode)
1879   //
1880   // return true if opcode is one of the possible CompareAndSwapX
1881   // values otherwise false.
1882 
1883   bool is_CAS(int opcode)
1884   {
1885     switch(opcode) {
1886       // We handle these
1887     case Op_CompareAndSwapI:
1888     case Op_CompareAndSwapL:
1889     case Op_CompareAndSwapP:
1890     case Op_CompareAndSwapN:
1891  // case Op_CompareAndSwapB:
1892  // case Op_CompareAndSwapS:
1893       return true;
1894       // These are TBD
1895     case Op_WeakCompareAndSwapB:
1896     case Op_WeakCompareAndSwapS:
1897     case Op_WeakCompareAndSwapI:
1898     case Op_WeakCompareAndSwapL:
1899     case Op_WeakCompareAndSwapP:
1900     case Op_WeakCompareAndSwapN:
1901     case Op_CompareAndExchangeB:
1902     case Op_CompareAndExchangeS:
1903     case Op_CompareAndExchangeI:
1904     case Op_CompareAndExchangeL:
1905     case Op_CompareAndExchangeP:
1906     case Op_CompareAndExchangeN:
1907       return false;
1908     default:
1909       return false;
1910     }
1911   }
1912 
1913   // helper to determine the maximum number of Phi nodes we may need to
1914   // traverse when searching from a card mark membar for the merge mem
1915   // feeding a trailing membar or vice versa
1916 
1917   int max_phis()
1918   {
1919     if (UseG1GC) {
1920       return 4;
1921     } else if (UseConcMarkSweepGC && UseCondCardMark) {
1922       return 1;
1923     } else {
1924       return 0;
1925     }
1926   }
1927 
1928   // leading_to_normal
1929   //
1930   // graph traversal helper which detects the normal case Mem feed
1931   // from a release membar (or, optionally, its cpuorder child) to a
1932   // dependent volatile or acquire membar i.e. it ensures that one of
1933   // the following 3 Mem flow subgraphs is present.
1934   //
1935   //   MemBarRelease
1936   //  {MemBarCPUOrder} {leading}
1937   //          |  \      . . .
1938   //          |  StoreN/P[mo_release]  . . .
1939   //          |   /
1940   //         MergeMem
1941   //          |
1942   //  {MemBarCPUOrder}
1943   //   MemBarVolatile {trailing or card mark}
1944   //
1945   //   MemBarRelease
1946   //   MemBarCPUOrder {leading}
1947   //          |  \      . . .
1948   //          |  CompareAndSwapX  . . .
1949   //          |   /
1950   //         MergeMem
1951   //          |
1952   //   MemBarVolatile {card mark}
1953   //
1954   //   MemBarRelease
1955   //   MemBarCPUOrder {leading}
1956   //          |  \      . . .
1957   //          |  CompareAndSwapX  . . .
1958   //          |   /
1959   //         MergeMem
1960   //          |
1961   //   MemBarCPUOrder
1962   //   MemBarAcquire {trailing}
1963   //
1964   // if the correct configuration is present returns the trailing
1965   // or cardmark membar otherwise NULL.
1966   //
1967   // the input membar is expected to be either a cpuorder membar or a
1968   // release membar. in the latter case it should not have a cpu membar
1969   // child.
1970   //
1971   // the returned value may be a card mark or trailing membar
1972   //
1973 
1974   MemBarNode *leading_to_normal(MemBarNode *leading)
1975   {
1976     assert((leading->Opcode() == Op_MemBarRelease ||
1977             leading->Opcode() == Op_MemBarCPUOrder),
1978            "expecting a volatile or cpuroder membar!");
1979 
1980     // check the mem flow
1981     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1982 
1983     if (!mem) {
1984       return NULL;
1985     }
1986 
1987     Node *x = NULL;
1988     StoreNode * st = NULL;
1989     LoadStoreNode *cas = NULL;
1990     MergeMemNode *mm = NULL;
1991 
1992     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1993       x = mem->fast_out(i);
1994       if (x->is_MergeMem()) {
1995         if (mm != NULL) {
1996           return NULL;
1997         }
1998         // two merge mems is one too many
1999         mm = x->as_MergeMem();
2000       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2001         // two releasing stores/CAS nodes is one too many
2002         if (st != NULL || cas != NULL) {
2003           return NULL;
2004         }
2005         st = x->as_Store();
2006       } else if (is_CAS(x->Opcode())) {
2007         if (st != NULL || cas != NULL) {
2008           return NULL;
2009         }
2010         cas = x->as_LoadStore();
2011       }
2012     }
2013 
2014     // must have a store or a cas
2015     if (!st && !cas) {
2016       return NULL;
2017     }
2018 
2019     // must have a merge
2020     if (!mm) {
2021       return NULL;
2022     }
2023 
2024     Node *feed = NULL;
2025     if (cas) {
2026       // look for an SCMemProj
2027       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2028         x = cas->fast_out(i);
2029         if (x->Opcode() == Op_SCMemProj) {
2030           feed = x;
2031           break;
2032         }
2033       }
2034       if (feed == NULL) {
2035         return NULL;
2036       }
2037     } else {
2038       feed = st;
2039     }
2040     // ensure the feed node feeds the existing mergemem;
2041     for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2042       x = feed->fast_out(i);
2043       if (x == mm) {
2044         break;
2045       }
2046     }
2047     if (x != mm) {
2048       return NULL;
2049     }
2050 
2051     MemBarNode *mbar = NULL;
2052     // ensure the merge feeds to the expected type of membar
2053     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2054       x = mm->fast_out(i);
2055       if (x->is_MemBar()) {
2056         if (x->Opcode() == Op_MemBarCPUOrder) {
2057           // with a store any cpu order membar should precede a
2058           // trailing volatile membar. with a cas it should precede a
2059           // trailing acquire membar. in either case try to skip to
2060           // that next membar
2061           MemBarNode *y =  x->as_MemBar();
2062           y = child_membar(y);
2063           if (y != NULL) {
2064             // skip to this new membar to do the check
2065             x = y;
2066           }
2067           
2068         }
2069         if (x->Opcode() == Op_MemBarVolatile) {
2070           mbar = x->as_MemBar();
2071           // for a volatile store this can be either a trailing membar
2072           // or a card mark membar. for a cas it must be a card mark
2073           // membar
2074           guarantee(cas == NULL || is_card_mark_membar(mbar),
2075                     "in CAS graph volatile membar must be a card mark");
2076         } else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
2077           mbar = x->as_MemBar();
2078         }
2079         break;
2080       }
2081     }
2082 
2083     return mbar;
2084   }
2085 
2086   // normal_to_leading
2087   //
2088   // graph traversal helper which detects the normal case Mem feed
2089   // from either a card mark or a trailing membar to a preceding
2090   // release membar (optionally its cpuorder child) i.e. it ensures
2091   // that one of the following 3 Mem flow subgraphs is present.
2092   //
2093   //   MemBarRelease
2094   //  {MemBarCPUOrder} {leading}
2095   //          |  \      . . .
2096   //          |  StoreN/P[mo_release]  . . .
2097   //          |   /
2098   //         MergeMem
2099   //          |
2100   //  {MemBarCPUOrder}
2101   //   MemBarVolatile {trailing or card mark}
2102   //
2103   //   MemBarRelease
2104   //   MemBarCPUOrder {leading}
2105   //          |  \      . . .
2106   //          |  CompareAndSwapX  . . .
2107   //          |   /
2108   //         MergeMem
2109   //          |
2110   //   MemBarVolatile {card mark}
2111   //
2112   //   MemBarRelease
2113   //   MemBarCPUOrder {leading}
2114   //          |  \      . . .
2115   //          |  CompareAndSwapX  . . .
2116   //          |   /
2117   //         MergeMem
2118   //          |
2119   //   MemBarCPUOrder
2120   //   MemBarAcquire {trailing}
2121   //
2122   // this predicate checks for the same flow as the previous predicate
2123   // but starting from the bottom rather than the top.
2124   //
2125   // if the configuration is present returns the cpuorder member for
2126   // preference or when absent the release membar otherwise NULL.
2127   //
2128   // n.b. the input membar is expected to be a MemBarVolatile but
2129   // need not be a card mark membar.
2130 
2131   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2132   {
2133     // input must be a volatile membar
2134     assert((barrier->Opcode() == Op_MemBarVolatile ||
2135             barrier->Opcode() == Op_MemBarAcquire),
2136            "expecting a volatile or an acquire membar");
2137     bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
2138 
2139     // if we have an intervening cpu order membar then start the
2140     // search from it
2141     
2142     Node *x = parent_membar(barrier);
2143 
2144     if (x == NULL) {
2145       // stick with the original barrier
2146       x = (Node *)barrier;
2147     } else if (x->Opcode() != Op_MemBarCPUOrder) {
2148       // any other barrier means this is not the graph we want
2149       return NULL;
2150     }
2151 
2152     // the Mem feed to the membar should be a merge
2153     x = x ->in(TypeFunc::Memory);
2154     if (!x->is_MergeMem())
2155       return NULL;
2156 
2157     MergeMemNode *mm = x->as_MergeMem();
2158 
2159     // the merge should get its Bottom mem feed from the leading membar
2160     x = mm->in(Compile::AliasIdxBot);
2161 
2162     // ensure this is a non control projection
2163     if (!x->is_Proj() || x->is_CFG()) {
2164       return NULL;
2165     }
2166     // if it is fed by a membar that's the one we want
2167     x = x->in(0);
2168 
2169     if (!x->is_MemBar()) {
2170       return NULL;
2171     }
2172 
2173     MemBarNode *leading = x->as_MemBar();
2174     // reject invalid candidates
2175     if (!leading_membar(leading)) {
2176       return NULL;
2177     }
2178 
2179     // ok, we have a leading membar, now for the sanity clauses
2180 
2181     // the leading membar must feed Mem to a releasing store or CAS
2182     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2183     StoreNode *st = NULL;
2184     LoadStoreNode *cas = NULL;
2185     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2186       x = mem->fast_out(i);
2187       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2188         // two stores or CASes is one too many
2189         if (st != NULL || cas != NULL) {
2190           return NULL;
2191         }
2192         st = x->as_Store();
2193       } else if (is_CAS(x->Opcode())) {
2194         if (st != NULL || cas != NULL) {
2195           return NULL;
2196         }
2197         cas = x->as_LoadStore();
2198       }
2199     }
2200 
2201     // we cannot have both a store and a cas
2202     if (st == NULL && cas == NULL) {
2203       // we have neither -- this is not a normal graph
2204       return NULL;
2205     }
2206     if (st == NULL) {
2207       // if we started from a volatile membar and found a CAS then the
2208       // original membar ought to be for a card mark
2209       guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
2210                 "unexpected volatile barrier (i.e. not card mark) in CAS graph");
2211       // check that the CAS feeds the merge we used to get here via an
2212       // intermediary SCMemProj
2213       Node *scmemproj = NULL;
2214       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2215         x = cas->fast_out(i);
2216         if (x->Opcode() == Op_SCMemProj) {
2217           scmemproj = x;
2218           break;
2219         }
2220       }
2221       if (scmemproj == NULL) {
2222         return NULL;
2223       }
2224       for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
2225         x = scmemproj->fast_out(i);
2226         if (x == mm) {
2227           return leading;
2228         }
2229       }
2230     } else {
2231       // we should not have found a store if we started from an acquire
2232       guarantee(!barrier_is_acquire,
2233                 "unexpected trailing acquire barrier in volatile store graph");
2234 
2235       // the store should feed the merge we used to get here
2236       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2237         if (st->fast_out(i) == mm) {
2238           return leading;
2239         }
2240       }
2241     }
2242 
2243     return NULL;
2244   }
2245 
2246   // card_mark_to_trailing
2247   //
2248   // graph traversal helper which detects extra, non-normal Mem feed
2249   // from a card mark volatile membar to a trailing membar i.e. it
2250   // ensures that one of the following three GC post-write Mem flow
2251   // subgraphs is present.
2252   //
2253   // 1)
2254   //     . . .
2255   //       |
2256   //   MemBarVolatile (card mark)
2257   //      |          |
2258   //      |        StoreCM
2259   //      |          |
2260   //      |        . . .
2261   //  Bot |  /
2262   //   MergeMem
2263   //      |
2264   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2265   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2266   //                                 
2267   //
2268   // 2)
2269   //   MemBarRelease/CPUOrder (leading)
2270   //    |
2271   //    |
2272   //    |\       . . .
2273   //    | \        |
2274   //    |  \  MemBarVolatile (card mark)
2275   //    |   \   |     |
2276   //     \   \  |   StoreCM    . . .
2277   //      \   \ |
2278   //       \  Phi
2279   //        \ /
2280   //        Phi  . . .
2281   //     Bot |   /
2282   //       MergeMem
2283   //         |
2284   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2285   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2286   //
2287   // 3)
2288   //   MemBarRelease/CPUOrder (leading)
2289   //    |
2290   //    |\
2291   //    | \
2292   //    |  \      . . .
2293   //    |   \       |
2294   //    |\   \  MemBarVolatile (card mark)
2295   //    | \   \   |     |
2296   //    |  \   \  |   StoreCM    . . .
2297   //    |   \   \ |
2298   //     \   \  Phi
2299   //      \   \ /
2300   //       \  Phi
2301   //        \ /
2302   //        Phi  . . .
2303   //     Bot |   /
2304   //       MergeMem
2305   //         |
2306   //         |
2307   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2308   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2309   //
2310   // 4)
2311   //   MemBarRelease/CPUOrder (leading)
2312   //    |
2313   //    |\
2314   //    | \
2315   //    |  \
2316   //    |   \
2317   //    |\   \
2318   //    | \   \
2319   //    |  \   \        . . .
2320   //    |   \   \         |
2321   //    |\   \   \   MemBarVolatile (card mark)
2322   //    | \   \   \   /   |
2323   //    |  \   \   \ /  StoreCM    . . .
2324   //    |   \   \  Phi
2325   //     \   \   \ /
2326   //      \   \  Phi
2327   //       \   \ /
2328   //        \  Phi
2329   //         \ /
2330   //         Phi  . . .
2331   //      Bot |   /
2332   //       MergeMem
2333   //          |
2334   //          |
2335   //    MemBarCPUOrder
2336   //    MemBarAcquire {trailing}
2337   //
2338   // configuration 1 is only valid if UseConcMarkSweepGC &&
2339   // UseCondCardMark
2340   //
2341   // configuration 2, is only valid if UseConcMarkSweepGC &&
2342   // UseCondCardMark or if UseG1GC
2343   //
2344   // configurations 3 and 4 are only valid if UseG1GC.
2345   //
2346   // if a valid configuration is present returns the trailing membar
2347   // otherwise NULL.
2348   //
2349   // n.b. the supplied membar is expected to be a card mark
2350   // MemBarVolatile i.e. the caller must ensure the input node has the
2351   // correct operand and feeds Mem to a StoreCM node
2352 
2353   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2354   {
2355     // input must be a card mark volatile membar
2356     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2357 
2358     Node *feed = barrier->proj_out(TypeFunc::Memory);
2359     Node *x;
2360     MergeMemNode *mm = NULL;
2361 
2362     const int MAX_PHIS = max_phis(); // max phis we will search through
2363     int phicount = 0;                // current search count
2364 
2365     bool retry_feed = true;
2366     while (retry_feed) {
2367       // see if we have a direct MergeMem feed
2368       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2369         x = feed->fast_out(i);
2370         // the correct Phi will be merging a Bot memory slice
2371         if (x->is_MergeMem()) {
2372           mm = x->as_MergeMem();
2373           break;
2374         }
2375       }
2376       if (mm) {
2377         retry_feed = false;
2378       } else if (phicount++ < MAX_PHIS) {
2379         // the barrier may feed indirectly via one or two Phi nodes
2380         PhiNode *phi = NULL;
2381         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2382           x = feed->fast_out(i);
2383           // the correct Phi will be merging a Bot memory slice
2384           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2385             phi = x->as_Phi();
2386             break;
2387           }
2388         }
2389         if (!phi) {
2390           return NULL;
2391         }
2392         // look for another merge below this phi
2393         feed = phi;
2394       } else {
2395         // couldn't find a merge
2396         return NULL;
2397       }
2398     }
2399 
2400     // sanity check this feed turns up as the expected slice
2401     guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2402 
2403     MemBarNode *trailing = NULL;
2404     // be sure we have a trailing membar fed by the merge
2405     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2406       x = mm->fast_out(i);
2407       if (x->is_MemBar()) {
2408         // if this is an intervening cpu order membar skip to the
2409         // following membar
2410         if (x->Opcode() == Op_MemBarCPUOrder) {
2411           MemBarNode *y =  x->as_MemBar();
2412           y = child_membar(y);
2413           if (y != NULL) {
2414             x = y;
2415           }
2416         }
2417         if (x->Opcode() == Op_MemBarVolatile ||
2418             x->Opcode() == Op_MemBarAcquire) {
2419           trailing = x->as_MemBar();
2420         }
2421         break;
2422       }
2423     }
2424 
2425     return trailing;
2426   }
2427 
2428   // trailing_to_card_mark
2429   //
2430   // graph traversal helper which detects extra, non-normal Mem feed
2431   // from a trailing volatile membar to a preceding card mark volatile
2432   // membar i.e. it identifies whether one of the three possible extra
2433   // GC post-write Mem flow subgraphs is present
2434   //
2435   // this predicate checks for the same flow as the previous predicate
2436   // but starting from the bottom rather than the top.
2437   //
2438   // if the configuration is present returns the card mark membar
2439   // otherwise NULL
2440   //
2441   // n.b. the supplied membar is expected to be a trailing
2442   // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
2443   // input node has the correct opcode
2444 
2445   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2446   {
2447     assert(trailing->Opcode() == Op_MemBarVolatile ||
2448            trailing->Opcode() == Op_MemBarAcquire,
2449            "expecting a volatile or acquire membar");
2450     assert(!is_card_mark_membar(trailing),
2451            "not expecting a card mark membar");
2452 
2453     Node *x = (Node *)trailing;
2454 
2455     // look for a preceding cpu order membar
2456     MemBarNode *y = parent_membar(x->as_MemBar());
2457     if (y != NULL) {
2458       // make sure it is a cpu order membar
2459       if (y->Opcode() != Op_MemBarCPUOrder) {
2460         // this is nto the graph we were looking for
2461         return NULL;
2462       }
2463       // start the search from here
2464       x = y;
2465     }
2466 
2467     // the Mem feed to the membar should be a merge
2468     x = x->in(TypeFunc::Memory);
2469     if (!x->is_MergeMem()) {
2470       return NULL;
2471     }
2472 
2473     MergeMemNode *mm = x->as_MergeMem();
2474 
2475     x = mm->in(Compile::AliasIdxBot);
2476     // with G1 we may possibly see a Phi or two before we see a Memory
2477     // Proj from the card mark membar
2478 
2479     const int MAX_PHIS = max_phis(); // max phis we will search through
2480     int phicount = 0;                    // current search count
2481 
2482     bool retry_feed = !x->is_Proj();
2483 
2484     while (retry_feed) {
2485       if (x->is_Phi() && phicount++ < MAX_PHIS) {
2486         PhiNode *phi = x->as_Phi();
2487         ProjNode *proj = NULL;
2488         PhiNode *nextphi = NULL;
2489         bool found_leading = false;
2490         for (uint i = 1; i < phi->req(); i++) {
2491           x = phi->in(i);
2492           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2493             nextphi = x->as_Phi();
2494           } else if (x->is_Proj()) {
2495             int opcode = x->in(0)->Opcode();
2496             if (opcode == Op_MemBarVolatile) {
2497               proj = x->as_Proj();
2498             } else if (opcode == Op_MemBarRelease ||
2499                        opcode == Op_MemBarCPUOrder) {
2500               // probably a leading membar
2501               found_leading = true;
2502             }
2503           }
2504         }
2505         // if we found a correct looking proj then retry from there
2506         // otherwise we must see a leading and a phi or this the
2507         // wrong config
2508         if (proj != NULL) {
2509           x = proj;
2510           retry_feed = false;
2511         } else if (found_leading && nextphi != NULL) {
2512           // retry from this phi to check phi2
2513           x = nextphi;
2514         } else {
2515           // not what we were looking for
2516           return NULL;
2517         }
2518       } else {
2519         return NULL;
2520       }
2521     }
2522     // the proj has to come from the card mark membar
2523     x = x->in(0);
2524     if (!x->is_MemBar()) {
2525       return NULL;
2526     }
2527 
2528     MemBarNode *card_mark_membar = x->as_MemBar();
2529 
2530     if (!is_card_mark_membar(card_mark_membar)) {
2531       return NULL;
2532     }
2533 
2534     return card_mark_membar;
2535   }
2536 
2537   // trailing_to_leading
2538   //
2539   // graph traversal helper which checks the Mem flow up the graph
2540   // from a (non-card mark) trailing membar attempting to locate and
2541   // return an associated leading membar. it first looks for a
2542   // subgraph in the normal configuration (relying on helper
2543   // normal_to_leading). failing that it then looks for one of the
2544   // possible post-write card mark subgraphs linking the trailing node
2545   // to a the card mark membar (relying on helper
2546   // trailing_to_card_mark), and then checks that the card mark membar
2547   // is fed by a leading membar (once again relying on auxiliary
2548   // predicate normal_to_leading).
2549   //
2550   // if the configuration is valid returns the cpuorder member for
2551   // preference or when absent the release membar otherwise NULL.
2552   //
2553   // n.b. the input membar is expected to be either a volatile or
2554   // acquire membar but in the former case must *not* be a card mark
2555   // membar.
2556 
2557   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2558   {
2559     assert((trailing->Opcode() == Op_MemBarAcquire ||
2560             trailing->Opcode() == Op_MemBarVolatile),
2561            "expecting an acquire or volatile membar");
2562     assert((trailing->Opcode() != Op_MemBarVolatile ||
2563             !is_card_mark_membar(trailing)),
2564            "not expecting a card mark membar");
2565 
2566     MemBarNode *leading = normal_to_leading(trailing);
2567 
2568     if (leading) {
2569       return leading;
2570     }
2571 
2572     // there is no normal path from trailing to leading membar. see if
2573     // we can arrive via a card mark membar
2574 
2575     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2576 
2577     if (!card_mark_membar) {
2578       return NULL;
2579     }
2580 
2581     return normal_to_leading(card_mark_membar);
2582   }
2583 
2584   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2585 
2586 bool unnecessary_acquire(const Node *barrier)
2587 {
2588   assert(barrier->is_MemBar(), "expecting a membar");
2589 
2590   if (UseBarriersForVolatile) {
2591     // we need to plant a dmb
2592     return false;
2593   }
2594 
2595   // a volatile read derived from bytecode (or also from an inlined
2596   // SHA field read via LibraryCallKit::load_field_from_object)
2597   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2598   // with a bogus read dependency on it's preceding load. so in those
2599   // cases we will find the load node at the PARMS offset of the
2600   // acquire membar.  n.b. there may be an intervening DecodeN node.
2601 
2602   Node *x = barrier->lookup(TypeFunc::Parms);
2603   if (x) {
2604     // we are starting from an acquire and it has a fake dependency
2605     //
2606     // need to check for
2607     //
2608     //   LoadX[mo_acquire]
2609     //   {  |1   }
2610     //   {DecodeN}
2611     //      |Parms
2612     //   MemBarAcquire*
2613     //
2614     // where * tags node we were passed
2615     // and |k means input k
2616     if (x->is_DecodeNarrowPtr()) {
2617       x = x->in(1);
2618     }
2619 
2620     return (x->is_Load() && x->as_Load()->is_acquire());
2621   }
2622 
2623   // other option for unnecessary membar is that it is a trailing node
2624   // belonging to a CAS
2625 
2626   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2627 
2628   return leading != NULL;
2629 }
2630 
2631 bool needs_acquiring_load(const Node *n)
2632 {
2633   assert(n->is_Load(), "expecting a load");
2634   if (UseBarriersForVolatile) {
2635     // we use a normal load and a dmb
2636     return false;
2637   }
2638 
2639   LoadNode *ld = n->as_Load();
2640 
2641   if (!ld->is_acquire()) {
2642     return false;
2643   }
2644 
2645   // check if this load is feeding an acquire membar
2646   //
2647   //   LoadX[mo_acquire]
2648   //   {  |1   }
2649   //   {DecodeN}
2650   //      |Parms
2651   //   MemBarAcquire*
2652   //
2653   // where * tags node we were passed
2654   // and |k means input k
2655 
2656   Node *start = ld;
2657   Node *mbacq = NULL;
2658 
2659   // if we hit a DecodeNarrowPtr we reset the start node and restart
2660   // the search through the outputs
2661  restart:
2662 
2663   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2664     Node *x = start->fast_out(i);
2665     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2666       mbacq = x;
2667     } else if (!mbacq &&
2668                (x->is_DecodeNarrowPtr() ||
2669                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2670       start = x;
2671       goto restart;
2672     }
2673   }
2674 
2675   if (mbacq) {
2676     return true;
2677   }
2678 
2679   return false;
2680 }
2681 
2682 bool unnecessary_release(const Node *n)
2683 {
2684   assert((n->is_MemBar() &&
2685           n->Opcode() == Op_MemBarRelease),
2686          "expecting a release membar");
2687 
2688   if (UseBarriersForVolatile) {
2689     // we need to plant a dmb
2690     return false;
2691   }
2692 
2693   // if there is a dependent CPUOrder barrier then use that as the
2694   // leading
2695 
2696   MemBarNode *barrier = n->as_MemBar();
2697   // check for an intervening cpuorder membar
2698   MemBarNode *b = child_membar(barrier);
2699   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2700     // ok, so start the check from the dependent cpuorder barrier
2701     barrier = b;
2702   }
2703 
2704   // must start with a normal feed
2705   MemBarNode *child_barrier = leading_to_normal(barrier);
2706 
2707   if (!child_barrier) {
2708     return false;
2709   }
2710 
2711   if (!is_card_mark_membar(child_barrier)) {
2712     // this is the trailing membar and we are done
2713     return true;
2714   }
2715 
2716   // must be sure this card mark feeds a trailing membar
2717   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2718   return (trailing != NULL);
2719 }
2720 
2721 bool unnecessary_volatile(const Node *n)
2722 {
2723   // assert n->is_MemBar();
2724   if (UseBarriersForVolatile) {
2725     // we need to plant a dmb
2726     return false;
2727   }
2728 
2729   MemBarNode *mbvol = n->as_MemBar();
2730 
2731   // first we check if this is part of a card mark. if so then we have
2732   // to generate a StoreLoad barrier
2733 
2734   if (is_card_mark_membar(mbvol)) {
2735       return false;
2736   }
2737 
2738   // ok, if it's not a card mark then we still need to check if it is
2739   // a trailing membar of a volatile put graph.
2740 
2741   return (trailing_to_leading(mbvol) != NULL);
2742 }
2743 
2744 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2745 
2746 bool needs_releasing_store(const Node *n)
2747 {
2748   // assert n->is_Store();
2749   if (UseBarriersForVolatile) {
2750     // we use a normal store and dmb combination
2751     return false;
2752   }
2753 
2754   StoreNode *st = n->as_Store();
2755 
2756   // the store must be marked as releasing
2757   if (!st->is_release()) {
2758     return false;
2759   }
2760 
2761   // the store must be fed by a membar
2762 
2763   Node *x = st->lookup(StoreNode::Memory);
2764 
2765   if (! x || !x->is_Proj()) {
2766     return false;
2767   }
2768 
2769   ProjNode *proj = x->as_Proj();
2770 
2771   x = proj->lookup(0);
2772 
2773   if (!x || !x->is_MemBar()) {
2774     return false;
2775   }
2776 
2777   MemBarNode *barrier = x->as_MemBar();
2778 
2779   // if the barrier is a release membar or a cpuorder mmebar fed by a
2780   // release membar then we need to check whether that forms part of a
2781   // volatile put graph.
2782 
2783   // reject invalid candidates
2784   if (!leading_membar(barrier)) {
2785     return false;
2786   }
2787 
2788   // does this lead a normal subgraph?
2789   MemBarNode *mbvol = leading_to_normal(barrier);
2790 
2791   if (!mbvol) {
2792     return false;
2793   }
2794 
2795   // all done unless this is a card mark
2796   if (!is_card_mark_membar(mbvol)) {
2797     return true;
2798   }
2799 
2800   // we found a card mark -- just make sure we have a trailing barrier
2801 
2802   return (card_mark_to_trailing(mbvol) != NULL);
2803 }
2804 
2805 // predicate controlling translation of CAS
2806 //
2807 // returns true if CAS needs to use an acquiring load otherwise false
2808 
2809 bool needs_acquiring_load_exclusive(const Node *n)
2810 {
2811   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2812   if (UseBarriersForVolatile) {
2813     return false;
2814   }
2815 
2816   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2817 #ifdef ASSERT
2818   LoadStoreNode *st = n->as_LoadStore();
2819 
2820   // the store must be fed by a membar
2821 
2822   Node *x = st->lookup(StoreNode::Memory);
2823 
2824   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2825 
2826   ProjNode *proj = x->as_Proj();
2827 
2828   x = proj->lookup(0);
2829 
2830   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2831 
2832   MemBarNode *barrier = x->as_MemBar();
2833 
2834   // the barrier must be a cpuorder mmebar fed by a release membar
2835 
2836   guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
2837             "CAS not fed by cpuorder membar!");
2838 
2839   MemBarNode *b = parent_membar(barrier);
2840   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2841           "CAS not fed by cpuorder+release membar pair!");
2842 
2843   // does this lead a normal subgraph?
2844   MemBarNode *mbar = leading_to_normal(barrier);
2845 
2846   guarantee(mbar != NULL, "CAS not embedded in normal graph!");
2847 
2848   // if this is a card mark membar check we have a trailing acquire
2849 
2850   if (is_card_mark_membar(mbar)) {
2851     mbar = card_mark_to_trailing(mbar);
2852   }
2853 
2854   guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
2855 
2856   guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2857 #endif // ASSERT
2858   // so we can just return true here
2859   return true;
2860 }
2861 
2862 // predicate controlling translation of StoreCM
2863 //
2864 // returns true if a StoreStore must precede the card write otherwise
2865 // false
2866 
2867 bool unnecessary_storestore(const Node *storecm)
2868 {
2869   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2870 
2871   // we only ever need to generate a dmb ishst between an object put
2872   // and the associated card mark when we are using CMS without
2873   // conditional card marking
2874 
2875   if (!UseConcMarkSweepGC || UseCondCardMark) {
2876     return true;
2877   }
2878 
2879   // if we are implementing volatile puts using barriers then the
2880   // object put is an str so we must insert the dmb ishst
2881 
2882   if (UseBarriersForVolatile) {
2883     return false;
2884   }
2885 
2886   // we can omit the dmb ishst if this StoreCM is part of a volatile
2887   // put because in thta case the put will be implemented by stlr
2888   //
2889   // we need to check for a normal subgraph feeding this StoreCM.
2890   // that means the StoreCM must be fed Memory from a leading membar,
2891   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2892   // leading membar must be part of a normal subgraph
2893 
2894   Node *x = storecm->in(StoreNode::Memory);
2895 
2896   if (!x->is_Proj()) {
2897     return false;
2898   }
2899 
2900   x = x->in(0);
2901 
2902   if (!x->is_MemBar()) {
2903     return false;
2904   }
2905 
2906   MemBarNode *leading = x->as_MemBar();
2907 
2908   // reject invalid candidates
2909   if (!leading_membar(leading)) {
2910     return false;
2911   }
2912 
2913   // we can omit the StoreStore if it is the head of a normal subgraph
2914   return (leading_to_normal(leading) != NULL);
2915 }
2916 
2917 
2918 #define __ _masm.
2919 
2920 // advance declarations for helper functions to convert register
2921 // indices to register objects
2922 
2923 // the ad file has to provide implementations of certain methods
2924 // expected by the generic code
2925 //
2926 // REQUIRED FUNCTIONALITY
2927 
2928 //=============================================================================
2929 
2930 // !!!!! Special hack to get all types of calls to specify the byte offset
2931 //       from the start of the call to the point where the return address
2932 //       will point.
2933 
2934 int MachCallStaticJavaNode::ret_addr_offset()
2935 {
2936   // call should be a simple bl
2937   int off = 4;
2938   return off;
2939 }
2940 
2941 int MachCallDynamicJavaNode::ret_addr_offset()
2942 {
2943   return 16; // movz, movk, movk, bl
2944 }
2945 
2946 int MachCallRuntimeNode::ret_addr_offset() {
2947   // for generated stubs the call will be
2948   //   far_call(addr)
2949   // for real runtime callouts it will be six instructions
2950   // see aarch64_enc_java_to_runtime
2951   //   adr(rscratch2, retaddr)
2952   //   lea(rscratch1, RuntimeAddress(addr)
2953   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2954   //   blrt rscratch1
2955   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2956   if (cb) {
2957     return MacroAssembler::far_branch_size();
2958   } else {
2959     return 6 * NativeInstruction::instruction_size;
2960   }
2961 }
2962 
2963 // Indicate if the safepoint node needs the polling page as an input
2964 
2965 // the shared code plants the oop data at the start of the generated
2966 // code for the safepoint node and that needs ot be at the load
2967 // instruction itself. so we cannot plant a mov of the safepoint poll
2968 // address followed by a load. setting this to true means the mov is
2969 // scheduled as a prior instruction. that's better for scheduling
2970 // anyway.
2971 
2972 bool SafePointNode::needs_polling_address_input()
2973 {
2974   return true;
2975 }
2976 
2977 //=============================================================================
2978 
2979 #ifndef PRODUCT
2980 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2981   st->print("BREAKPOINT");
2982 }
2983 #endif
2984 
2985 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2986   MacroAssembler _masm(&cbuf);
2987   __ brk(0);
2988 }
2989 
2990 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2991   return MachNode::size(ra_);
2992 }
2993 
2994 //=============================================================================
2995 
2996 #ifndef PRODUCT
2997   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2998     st->print("nop \t# %d bytes pad for loops and calls", _count);
2999   }
3000 #endif
3001 
3002   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
3003     MacroAssembler _masm(&cbuf);
3004     for (int i = 0; i < _count; i++) {
3005       __ nop();
3006     }
3007   }
3008 
3009   uint MachNopNode::size(PhaseRegAlloc*) const {
3010     return _count * NativeInstruction::instruction_size;
3011   }
3012 
3013 //=============================================================================
3014 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
3015 
3016 int Compile::ConstantTable::calculate_table_base_offset() const {
3017   return 0;  // absolute addressing, no offset
3018 }
3019 
3020 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
3021 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
3022   ShouldNotReachHere();
3023 }
3024 
3025 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
3026   // Empty encoding
3027 }
3028 
3029 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
3030   return 0;
3031 }
3032 
3033 #ifndef PRODUCT
3034 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3035   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3036 }
3037 #endif
3038 
3039 #ifndef PRODUCT
3040 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3041   Compile* C = ra_->C;
3042 
3043   int framesize = C->frame_slots() << LogBytesPerInt;
3044 
3045   if (C->need_stack_bang(framesize))
3046     st->print("# stack bang size=%d\n\t", framesize);
3047 
3048   if (framesize < ((1 << 9) + 2 * wordSize)) {
3049     st->print("sub  sp, sp, #%d\n\t", framesize);
3050     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3051     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3052   } else {
3053     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3054     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3055     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3056     st->print("sub  sp, sp, rscratch1");
3057   }
3058 }
3059 #endif
3060 
3061 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3062   Compile* C = ra_->C;
3063   MacroAssembler _masm(&cbuf);
3064 
3065   // n.b. frame size includes space for return pc and rfp
3066   const long framesize = C->frame_size_in_bytes();
3067   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3068 
3069   // insert a nop at the start of the prolog so we can patch in a
3070   // branch if we need to invalidate the method later
3071   __ nop();
3072 
3073   int bangsize = C->bang_size_in_bytes();
3074   if (C->need_stack_bang(bangsize) && UseStackBanging)
3075     __ generate_stack_overflow_check(bangsize);
3076 
3077   __ build_frame(framesize);
3078 
3079   if (NotifySimulator) {
3080     __ notify(Assembler::method_entry);
3081   }
3082 
3083   if (VerifyStackAtCalls) {
3084     Unimplemented();
3085   }
3086 
3087   C->set_frame_complete(cbuf.insts_size());
3088 
3089   if (C->has_mach_constant_base_node()) {
3090     // NOTE: We set the table base offset here because users might be
3091     // emitted before MachConstantBaseNode.
3092     Compile::ConstantTable& constant_table = C->constant_table();
3093     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3094   }
3095 }
3096 
3097 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3098 {
3099   return MachNode::size(ra_); // too many variables; just compute it
3100                               // the hard way
3101 }
3102 
3103 int MachPrologNode::reloc() const
3104 {
3105   return 0;
3106 }
3107 
3108 //=============================================================================
3109 
3110 #ifndef PRODUCT
3111 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3112   Compile* C = ra_->C;
3113   int framesize = C->frame_slots() << LogBytesPerInt;
3114 
3115   st->print("# pop frame %d\n\t",framesize);
3116 
3117   if (framesize == 0) {
3118     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3119   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3120     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3121     st->print("add  sp, sp, #%d\n\t", framesize);
3122   } else {
3123     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3124     st->print("add  sp, sp, rscratch1\n\t");
3125     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3126   }
3127 
3128   if (do_polling() && C->is_method_compilation()) {
3129     st->print("# touch polling page\n\t");
3130     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3131     st->print("ldr zr, [rscratch1]");
3132   }
3133 }
3134 #endif
3135 
3136 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3137   Compile* C = ra_->C;
3138   MacroAssembler _masm(&cbuf);
3139   int framesize = C->frame_slots() << LogBytesPerInt;
3140 
3141   __ remove_frame(framesize);
3142 
3143   if (NotifySimulator) {
3144     __ notify(Assembler::method_reentry);
3145   }
3146 
3147   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3148     __ reserved_stack_check();
3149   }
3150 
3151   if (do_polling() && C->is_method_compilation()) {
3152     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3153   }
3154 }
3155 
3156 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3157   // Variable size. Determine dynamically.
3158   return MachNode::size(ra_);
3159 }
3160 
3161 int MachEpilogNode::reloc() const {
3162   // Return number of relocatable values contained in this instruction.
3163   return 1; // 1 for polling page.
3164 }
3165 
3166 const Pipeline * MachEpilogNode::pipeline() const {
3167   return MachNode::pipeline_class();
3168 }
3169 
3170 // This method seems to be obsolete. It is declared in machnode.hpp
3171 // and defined in all *.ad files, but it is never called. Should we
3172 // get rid of it?
3173 int MachEpilogNode::safepoint_offset() const {
3174   assert(do_polling(), "no return for this epilog node");
3175   return 4;
3176 }
3177 
3178 //=============================================================================
3179 
3180 // Figure out which register class each belongs in: rc_int, rc_float or
3181 // rc_stack.
3182 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3183 
3184 static enum RC rc_class(OptoReg::Name reg) {
3185 
3186   if (reg == OptoReg::Bad) {
3187     return rc_bad;
3188   }
3189 
3190   // we have 30 int registers * 2 halves
3191   // (rscratch1 and rscratch2 are omitted)
3192 
3193   if (reg < 60) {
3194     return rc_int;
3195   }
3196 
3197   // we have 32 float register * 2 halves
3198   if (reg < 60 + 128) {
3199     return rc_float;
3200   }
3201 
3202   // Between float regs & stack is the flags regs.
3203   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3204 
3205   return rc_stack;
3206 }
3207 
3208 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3209   Compile* C = ra_->C;
3210 
3211   // Get registers to move.
3212   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3213   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3214   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3215   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3216 
3217   enum RC src_hi_rc = rc_class(src_hi);
3218   enum RC src_lo_rc = rc_class(src_lo);
3219   enum RC dst_hi_rc = rc_class(dst_hi);
3220   enum RC dst_lo_rc = rc_class(dst_lo);
3221 
3222   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3223 
3224   if (src_hi != OptoReg::Bad) {
3225     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3226            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3227            "expected aligned-adjacent pairs");
3228   }
3229 
3230   if (src_lo == dst_lo && src_hi == dst_hi) {
3231     return 0;            // Self copy, no move.
3232   }
3233 
3234   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3235               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3236   int src_offset = ra_->reg2offset(src_lo);
3237   int dst_offset = ra_->reg2offset(dst_lo);
3238 
3239   if (bottom_type()->isa_vect() != NULL) {
3240     uint ireg = ideal_reg();
3241     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3242     if (cbuf) {
3243       MacroAssembler _masm(cbuf);
3244       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3245       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3246         // stack->stack
3247         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3248         if (ireg == Op_VecD) {
3249           __ unspill(rscratch1, true, src_offset);
3250           __ spill(rscratch1, true, dst_offset);
3251         } else {
3252           __ spill_copy128(src_offset, dst_offset);
3253         }
3254       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3255         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3256                ireg == Op_VecD ? __ T8B : __ T16B,
3257                as_FloatRegister(Matcher::_regEncode[src_lo]));
3258       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3259         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3260                        ireg == Op_VecD ? __ D : __ Q,
3261                        ra_->reg2offset(dst_lo));
3262       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3263         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3264                        ireg == Op_VecD ? __ D : __ Q,
3265                        ra_->reg2offset(src_lo));
3266       } else {
3267         ShouldNotReachHere();
3268       }
3269     }
3270   } else if (cbuf) {
3271     MacroAssembler _masm(cbuf);
3272     switch (src_lo_rc) {
3273     case rc_int:
3274       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3275         if (is64) {
3276             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3277                    as_Register(Matcher::_regEncode[src_lo]));
3278         } else {
3279             MacroAssembler _masm(cbuf);
3280             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3281                     as_Register(Matcher::_regEncode[src_lo]));
3282         }
3283       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3284         if (is64) {
3285             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3286                      as_Register(Matcher::_regEncode[src_lo]));
3287         } else {
3288             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3289                      as_Register(Matcher::_regEncode[src_lo]));
3290         }
3291       } else {                    // gpr --> stack spill
3292         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3293         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3294       }
3295       break;
3296     case rc_float:
3297       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3298         if (is64) {
3299             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3300                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3301         } else {
3302             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3303                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3304         }
3305       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3306           if (cbuf) {
3307             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3308                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3309         } else {
3310             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3311                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3312         }
3313       } else {                    // fpr --> stack spill
3314         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3315         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3316                  is64 ? __ D : __ S, dst_offset);
3317       }
3318       break;
3319     case rc_stack:
3320       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3321         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3322       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3323         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3324                    is64 ? __ D : __ S, src_offset);
3325       } else {                    // stack --> stack copy
3326         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3327         __ unspill(rscratch1, is64, src_offset);
3328         __ spill(rscratch1, is64, dst_offset);
3329       }
3330       break;
3331     default:
3332       assert(false, "bad rc_class for spill");
3333       ShouldNotReachHere();
3334     }
3335   }
3336 
3337   if (st) {
3338     st->print("spill ");
3339     if (src_lo_rc == rc_stack) {
3340       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3341     } else {
3342       st->print("%s -> ", Matcher::regName[src_lo]);
3343     }
3344     if (dst_lo_rc == rc_stack) {
3345       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3346     } else {
3347       st->print("%s", Matcher::regName[dst_lo]);
3348     }
3349     if (bottom_type()->isa_vect() != NULL) {
3350       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3351     } else {
3352       st->print("\t# spill size = %d", is64 ? 64:32);
3353     }
3354   }
3355 
3356   return 0;
3357 
3358 }
3359 
3360 #ifndef PRODUCT
3361 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3362   if (!ra_)
3363     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3364   else
3365     implementation(NULL, ra_, false, st);
3366 }
3367 #endif
3368 
3369 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3370   implementation(&cbuf, ra_, false, NULL);
3371 }
3372 
3373 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3374   return MachNode::size(ra_);
3375 }
3376 
3377 //=============================================================================
3378 
3379 #ifndef PRODUCT
3380 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3381   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3382   int reg = ra_->get_reg_first(this);
3383   st->print("add %s, rsp, #%d]\t# box lock",
3384             Matcher::regName[reg], offset);
3385 }
3386 #endif
3387 
3388 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3389   MacroAssembler _masm(&cbuf);
3390 
3391   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3392   int reg    = ra_->get_encode(this);
3393 
3394   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3395     __ add(as_Register(reg), sp, offset);
3396   } else {
3397     ShouldNotReachHere();
3398   }
3399 }
3400 
3401 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3402   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3403   return 4;
3404 }
3405 
3406 //=============================================================================
3407 
3408 #ifndef PRODUCT
3409 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3410 {
3411   st->print_cr("# MachUEPNode");
3412   if (UseCompressedClassPointers) {
3413     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3414     if (Universe::narrow_klass_shift() != 0) {
3415       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3416     }
3417   } else {
3418    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3419   }
3420   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3421   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3422 }
3423 #endif
3424 
3425 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3426 {
3427   // This is the unverified entry point.
3428   MacroAssembler _masm(&cbuf);
3429 
3430   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3431   Label skip;
3432   // TODO
3433   // can we avoid this skip and still use a reloc?
3434   __ br(Assembler::EQ, skip);
3435   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3436   __ bind(skip);
3437 }
3438 
3439 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3440 {
3441   return MachNode::size(ra_);
3442 }
3443 
3444 // REQUIRED EMIT CODE
3445 
3446 //=============================================================================
3447 
3448 // Emit exception handler code.
3449 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3450 {
3451   // mov rscratch1 #exception_blob_entry_point
3452   // br rscratch1
3453   // Note that the code buffer's insts_mark is always relative to insts.
3454   // That's why we must use the macroassembler to generate a handler.
3455   MacroAssembler _masm(&cbuf);
3456   address base = __ start_a_stub(size_exception_handler());
3457   if (base == NULL) {
3458     ciEnv::current()->record_failure("CodeCache is full");
3459     return 0;  // CodeBuffer::expand failed
3460   }
3461   int offset = __ offset();
3462   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3463   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3464   __ end_a_stub();
3465   return offset;
3466 }
3467 
3468 // Emit deopt handler code.
3469 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3470 {
3471   // Note that the code buffer's insts_mark is always relative to insts.
3472   // That's why we must use the macroassembler to generate a handler.
3473   MacroAssembler _masm(&cbuf);
3474   address base = __ start_a_stub(size_deopt_handler());
3475   if (base == NULL) {
3476     ciEnv::current()->record_failure("CodeCache is full");
3477     return 0;  // CodeBuffer::expand failed
3478   }
3479   int offset = __ offset();
3480 
3481   __ adr(lr, __ pc());
3482   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3483 
3484   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3485   __ end_a_stub();
3486   return offset;
3487 }
3488 
3489 // REQUIRED MATCHER CODE
3490 
3491 //=============================================================================
3492 
3493 const bool Matcher::match_rule_supported(int opcode) {
3494 
3495   switch (opcode) {
3496   default:
3497     break;
3498   }
3499 
3500   if (!has_match_rule(opcode)) {
3501     return false;
3502   }
3503 
3504   return true;  // Per default match rules are supported.
3505 }
3506 
3507 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3508 
3509   // TODO
3510   // identify extra cases that we might want to provide match rules for
3511   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3512   bool ret_value = match_rule_supported(opcode);
3513   // Add rules here.
3514 
3515   return ret_value;  // Per default match rules are supported.
3516 }
3517 
3518 const bool Matcher::has_predicated_vectors(void) {
3519   return false;
3520 }
3521 
3522 const int Matcher::float_pressure(int default_pressure_threshold) {
3523   return default_pressure_threshold;
3524 }
3525 
3526 int Matcher::regnum_to_fpu_offset(int regnum)
3527 {
3528   Unimplemented();
3529   return 0;
3530 }
3531 
3532 // Is this branch offset short enough that a short branch can be used?
3533 //
3534 // NOTE: If the platform does not provide any short branch variants, then
3535 //       this method should return false for offset 0.
3536 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3537   // The passed offset is relative to address of the branch.
3538 
3539   return (-32768 <= offset && offset < 32768);
3540 }
3541 
3542 const bool Matcher::isSimpleConstant64(jlong value) {
3543   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3544   // Probably always true, even if a temp register is required.
3545   return true;
3546 }
3547 
3548 // true just means we have fast l2f conversion
3549 const bool Matcher::convL2FSupported(void) {
3550   return true;
3551 }
3552 
3553 // Vector width in bytes.
3554 const int Matcher::vector_width_in_bytes(BasicType bt) {
3555   int size = MIN2(16,(int)MaxVectorSize);
3556   // Minimum 2 values in vector
3557   if (size < 2*type2aelembytes(bt)) size = 0;
3558   // But never < 4
3559   if (size < 4) size = 0;
3560   return size;
3561 }
3562 
3563 // Limits on vector size (number of elements) loaded into vector.
3564 const int Matcher::max_vector_size(const BasicType bt) {
3565   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3566 }
3567 const int Matcher::min_vector_size(const BasicType bt) {
3568 //  For the moment limit the vector size to 8 bytes
3569     int size = 8 / type2aelembytes(bt);
3570     if (size < 2) size = 2;
3571     return size;
3572 }
3573 
3574 // Vector ideal reg.
3575 const uint Matcher::vector_ideal_reg(int len) {
3576   switch(len) {
3577     case  8: return Op_VecD;
3578     case 16: return Op_VecX;
3579   }
3580   ShouldNotReachHere();
3581   return 0;
3582 }
3583 
3584 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3585   return Op_VecX;
3586 }
3587 
3588 // AES support not yet implemented
3589 const bool Matcher::pass_original_key_for_aes() {
3590   return false;
3591 }
3592 
3593 // x86 supports misaligned vectors store/load.
3594 const bool Matcher::misaligned_vectors_ok() {
3595   return !AlignVector; // can be changed by flag
3596 }
3597 
3598 // false => size gets scaled to BytesPerLong, ok.
3599 const bool Matcher::init_array_count_is_in_bytes = false;
3600 
3601 // Use conditional move (CMOVL)
3602 const int Matcher::long_cmove_cost() {
3603   // long cmoves are no more expensive than int cmoves
3604   return 0;
3605 }
3606 
3607 const int Matcher::float_cmove_cost() {
3608   // float cmoves are no more expensive than int cmoves
3609   return 0;
3610 }
3611 
3612 // Does the CPU require late expand (see block.cpp for description of late expand)?
3613 const bool Matcher::require_postalloc_expand = false;
3614 
3615 // Do we need to mask the count passed to shift instructions or does
3616 // the cpu only look at the lower 5/6 bits anyway?
3617 const bool Matcher::need_masked_shift_count = false;
3618 
3619 // This affects two different things:
3620 //  - how Decode nodes are matched
3621 //  - how ImplicitNullCheck opportunities are recognized
3622 // If true, the matcher will try to remove all Decodes and match them
3623 // (as operands) into nodes. NullChecks are not prepared to deal with
3624 // Decodes by final_graph_reshaping().
3625 // If false, final_graph_reshaping() forces the decode behind the Cmp
3626 // for a NullCheck. The matcher matches the Decode node into a register.
3627 // Implicit_null_check optimization moves the Decode along with the
3628 // memory operation back up before the NullCheck.
3629 bool Matcher::narrow_oop_use_complex_address() {
3630   return Universe::narrow_oop_shift() == 0;
3631 }
3632 
3633 bool Matcher::narrow_klass_use_complex_address() {
3634 // TODO
3635 // decide whether we need to set this to true
3636   return false;
3637 }
3638 
3639 bool Matcher::const_oop_prefer_decode() {
3640   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3641   return Universe::narrow_oop_base() == NULL;
3642 }
3643 
3644 bool Matcher::const_klass_prefer_decode() {
3645   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3646   return Universe::narrow_klass_base() == NULL;
3647 }
3648 
3649 // Is it better to copy float constants, or load them directly from
3650 // memory?  Intel can load a float constant from a direct address,
3651 // requiring no extra registers.  Most RISCs will have to materialize
3652 // an address into a register first, so they would do better to copy
3653 // the constant from stack.
3654 const bool Matcher::rematerialize_float_constants = false;
3655 
3656 // If CPU can load and store mis-aligned doubles directly then no
3657 // fixup is needed.  Else we split the double into 2 integer pieces
3658 // and move it piece-by-piece.  Only happens when passing doubles into
3659 // C code as the Java calling convention forces doubles to be aligned.
3660 const bool Matcher::misaligned_doubles_ok = true;
3661 
3662 // No-op on amd64
3663 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3664   Unimplemented();
3665 }
3666 
3667 // Advertise here if the CPU requires explicit rounding operations to
3668 // implement the UseStrictFP mode.
3669 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3670 
3671 // Are floats converted to double when stored to stack during
3672 // deoptimization?
3673 bool Matcher::float_in_double() { return false; }
3674 
3675 // Do ints take an entire long register or just half?
3676 // The relevant question is how the int is callee-saved:
3677 // the whole long is written but de-opt'ing will have to extract
3678 // the relevant 32 bits.
3679 const bool Matcher::int_in_long = true;
3680 
3681 // Return whether or not this register is ever used as an argument.
3682 // This function is used on startup to build the trampoline stubs in
3683 // generateOptoStub.  Registers not mentioned will be killed by the VM
3684 // call in the trampoline, and arguments in those registers not be
3685 // available to the callee.
3686 bool Matcher::can_be_java_arg(int reg)
3687 {
3688   return
3689     reg ==  R0_num || reg == R0_H_num ||
3690     reg ==  R1_num || reg == R1_H_num ||
3691     reg ==  R2_num || reg == R2_H_num ||
3692     reg ==  R3_num || reg == R3_H_num ||
3693     reg ==  R4_num || reg == R4_H_num ||
3694     reg ==  R5_num || reg == R5_H_num ||
3695     reg ==  R6_num || reg == R6_H_num ||
3696     reg ==  R7_num || reg == R7_H_num ||
3697     reg ==  V0_num || reg == V0_H_num ||
3698     reg ==  V1_num || reg == V1_H_num ||
3699     reg ==  V2_num || reg == V2_H_num ||
3700     reg ==  V3_num || reg == V3_H_num ||
3701     reg ==  V4_num || reg == V4_H_num ||
3702     reg ==  V5_num || reg == V5_H_num ||
3703     reg ==  V6_num || reg == V6_H_num ||
3704     reg ==  V7_num || reg == V7_H_num;
3705 }
3706 
3707 bool Matcher::is_spillable_arg(int reg)
3708 {
3709   return can_be_java_arg(reg);
3710 }
3711 
3712 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3713   return false;
3714 }
3715 
3716 RegMask Matcher::divI_proj_mask() {
3717   ShouldNotReachHere();
3718   return RegMask();
3719 }
3720 
3721 // Register for MODI projection of divmodI.
3722 RegMask Matcher::modI_proj_mask() {
3723   ShouldNotReachHere();
3724   return RegMask();
3725 }
3726 
3727 // Register for DIVL projection of divmodL.
3728 RegMask Matcher::divL_proj_mask() {
3729   ShouldNotReachHere();
3730   return RegMask();
3731 }
3732 
3733 // Register for MODL projection of divmodL.
3734 RegMask Matcher::modL_proj_mask() {
3735   ShouldNotReachHere();
3736   return RegMask();
3737 }
3738 
3739 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3740   return FP_REG_mask();
3741 }
3742 
3743 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3744   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3745     Node* u = addp->fast_out(i);
3746     if (u->is_Mem()) {
3747       int opsize = u->as_Mem()->memory_size();
3748       assert(opsize > 0, "unexpected memory operand size");
3749       if (u->as_Mem()->memory_size() != (1<<shift)) {
3750         return false;
3751       }
3752     }
3753   }
3754   return true;
3755 }
3756 
3757 const bool Matcher::convi2l_type_required = false;
3758 
3759 // Should the Matcher clone shifts on addressing modes, expecting them
3760 // to be subsumed into complex addressing expressions or compute them
3761 // into registers?
3762 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3763   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3764     return true;
3765   }
3766 
3767   Node *off = m->in(AddPNode::Offset);
3768   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3769       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3770       // Are there other uses besides address expressions?
3771       !is_visited(off)) {
3772     address_visited.set(off->_idx); // Flag as address_visited
3773     mstack.push(off->in(2), Visit);
3774     Node *conv = off->in(1);
3775     if (conv->Opcode() == Op_ConvI2L &&
3776         // Are there other uses besides address expressions?
3777         !is_visited(conv)) {
3778       address_visited.set(conv->_idx); // Flag as address_visited
3779       mstack.push(conv->in(1), Pre_Visit);
3780     } else {
3781       mstack.push(conv, Pre_Visit);
3782     }
3783     address_visited.test_set(m->_idx); // Flag as address_visited
3784     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3785     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3786     return true;
3787   } else if (off->Opcode() == Op_ConvI2L &&
3788              // Are there other uses besides address expressions?
3789              !is_visited(off)) {
3790     address_visited.test_set(m->_idx); // Flag as address_visited
3791     address_visited.set(off->_idx); // Flag as address_visited
3792     mstack.push(off->in(1), Pre_Visit);
3793     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3794     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3795     return true;
3796   }
3797   return false;
3798 }
3799 
3800 void Compile::reshape_address(AddPNode* addp) {
3801 }
3802 
3803 // helper for encoding java_to_runtime calls on sim
3804 //
3805 // this is needed to compute the extra arguments required when
3806 // planting a call to the simulator blrt instruction. the TypeFunc
3807 // can be queried to identify the counts for integral, and floating
3808 // arguments and the return type
3809 
3810 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3811 {
3812   int gps = 0;
3813   int fps = 0;
3814   const TypeTuple *domain = tf->domain();
3815   int max = domain->cnt();
3816   for (int i = TypeFunc::Parms; i < max; i++) {
3817     const Type *t = domain->field_at(i);
3818     switch(t->basic_type()) {
3819     case T_FLOAT:
3820     case T_DOUBLE:
3821       fps++;
3822     default:
3823       gps++;
3824     }
3825   }
3826   gpcnt = gps;
3827   fpcnt = fps;
3828   BasicType rt = tf->return_type();
3829   switch (rt) {
3830   case T_VOID:
3831     rtype = MacroAssembler::ret_type_void;
3832     break;
3833   default:
3834     rtype = MacroAssembler::ret_type_integral;
3835     break;
3836   case T_FLOAT:
3837     rtype = MacroAssembler::ret_type_float;
3838     break;
3839   case T_DOUBLE:
3840     rtype = MacroAssembler::ret_type_double;
3841     break;
3842   }
3843 }
3844 
3845 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3846   MacroAssembler _masm(&cbuf);                                          \
3847   {                                                                     \
3848     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3849     guarantee(DISP == 0, "mode not permitted for volatile");            \
3850     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3851     __ INSN(REG, as_Register(BASE));                                    \
3852   }
3853 
3854 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3855 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3856 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3857                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3858 
3859   // Used for all non-volatile memory accesses.  The use of
3860   // $mem->opcode() to discover whether this pattern uses sign-extended
3861   // offsets is something of a kludge.
3862   static void loadStore(MacroAssembler masm, mem_insn insn,
3863                          Register reg, int opcode,
3864                          Register base, int index, int size, int disp)
3865   {
3866     Address::extend scale;
3867 
3868     // Hooboy, this is fugly.  We need a way to communicate to the
3869     // encoder that the index needs to be sign extended, so we have to
3870     // enumerate all the cases.
3871     switch (opcode) {
3872     case INDINDEXSCALEDI2L:
3873     case INDINDEXSCALEDI2LN:
3874     case INDINDEXI2L:
3875     case INDINDEXI2LN:
3876       scale = Address::sxtw(size);
3877       break;
3878     default:
3879       scale = Address::lsl(size);
3880     }
3881 
3882     if (index == -1) {
3883       (masm.*insn)(reg, Address(base, disp));
3884     } else {
3885       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3886       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3887     }
3888   }
3889 
3890   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3891                          FloatRegister reg, int opcode,
3892                          Register base, int index, int size, int disp)
3893   {
3894     Address::extend scale;
3895 
3896     switch (opcode) {
3897     case INDINDEXSCALEDI2L:
3898     case INDINDEXSCALEDI2LN:
3899       scale = Address::sxtw(size);
3900       break;
3901     default:
3902       scale = Address::lsl(size);
3903     }
3904 
3905      if (index == -1) {
3906       (masm.*insn)(reg, Address(base, disp));
3907     } else {
3908       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3909       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3910     }
3911   }
3912 
3913   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3914                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3915                          int opcode, Register base, int index, int size, int disp)
3916   {
3917     if (index == -1) {
3918       (masm.*insn)(reg, T, Address(base, disp));
3919     } else {
3920       assert(disp == 0, "unsupported address mode");
3921       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3922     }
3923   }
3924 
3925 %}
3926 
3927 
3928 
3929 //----------ENCODING BLOCK-----------------------------------------------------
3930 // This block specifies the encoding classes used by the compiler to
3931 // output byte streams.  Encoding classes are parameterized macros
3932 // used by Machine Instruction Nodes in order to generate the bit
3933 // encoding of the instruction.  Operands specify their base encoding
3934 // interface with the interface keyword.  There are currently
3935 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3936 // COND_INTER.  REG_INTER causes an operand to generate a function
3937 // which returns its register number when queried.  CONST_INTER causes
3938 // an operand to generate a function which returns the value of the
3939 // constant when queried.  MEMORY_INTER causes an operand to generate
3940 // four functions which return the Base Register, the Index Register,
3941 // the Scale Value, and the Offset Value of the operand when queried.
3942 // COND_INTER causes an operand to generate six functions which return
3943 // the encoding code (ie - encoding bits for the instruction)
3944 // associated with each basic boolean condition for a conditional
3945 // instruction.
3946 //
3947 // Instructions specify two basic values for encoding.  Again, a
3948 // function is available to check if the constant displacement is an
3949 // oop. They use the ins_encode keyword to specify their encoding
3950 // classes (which must be a sequence of enc_class names, and their
3951 // parameters, specified in the encoding block), and they use the
3952 // opcode keyword to specify, in order, their primary, secondary, and
3953 // tertiary opcode.  Only the opcode sections which a particular
3954 // instruction needs for encoding need to be specified.
3955 encode %{
3956   // Build emit functions for each basic byte or larger field in the
3957   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3958   // from C++ code in the enc_class source block.  Emit functions will
3959   // live in the main source block for now.  In future, we can
3960   // generalize this by adding a syntax that specifies the sizes of
3961   // fields in an order, so that the adlc can build the emit functions
3962   // automagically
3963 
3964   // catch all for unimplemented encodings
3965   enc_class enc_unimplemented %{
3966     MacroAssembler _masm(&cbuf);
3967     __ unimplemented("C2 catch all");
3968   %}
3969 
3970   // BEGIN Non-volatile memory access
3971 
3972   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3973     Register dst_reg = as_Register($dst$$reg);
3974     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3975                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3976   %}
3977 
3978   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3979     Register dst_reg = as_Register($dst$$reg);
3980     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3982   %}
3983 
3984   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3985     Register dst_reg = as_Register($dst$$reg);
3986     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3987                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3988   %}
3989 
3990   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3991     Register dst_reg = as_Register($dst$$reg);
3992     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3993                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3994   %}
3995 
3996   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3997     Register dst_reg = as_Register($dst$$reg);
3998     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3999                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4000   %}
4001 
4002   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
4003     Register dst_reg = as_Register($dst$$reg);
4004     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
4005                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4006   %}
4007 
4008   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
4009     Register dst_reg = as_Register($dst$$reg);
4010     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4011                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4012   %}
4013 
4014   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
4015     Register dst_reg = as_Register($dst$$reg);
4016     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4018   %}
4019 
4020   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
4021     Register dst_reg = as_Register($dst$$reg);
4022     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4023                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4024   %}
4025 
4026   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
4027     Register dst_reg = as_Register($dst$$reg);
4028     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4030   %}
4031 
4032   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
4033     Register dst_reg = as_Register($dst$$reg);
4034     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
4035                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4036   %}
4037 
4038   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4039     Register dst_reg = as_Register($dst$$reg);
4040     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4042   %}
4043 
4044   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4045     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4046     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4047                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4048   %}
4049 
4050   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4051     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4052     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4054   %}
4055 
4056   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4057     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4058     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4059        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4060   %}
4061 
4062   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4063     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4064     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4065        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4066   %}
4067 
4068   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4069     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4070     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4071        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4072   %}
4073 
4074   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4075     Register src_reg = as_Register($src$$reg);
4076     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4077                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4078   %}
4079 
4080   enc_class aarch64_enc_strb0(memory mem) %{
4081     MacroAssembler _masm(&cbuf);
4082     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4083                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4084   %}
4085 
4086   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4087     MacroAssembler _masm(&cbuf);
4088     __ membar(Assembler::StoreStore);
4089     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4091   %}
4092 
4093   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4094     Register src_reg = as_Register($src$$reg);
4095     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4096                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4097   %}
4098 
4099   enc_class aarch64_enc_strh0(memory mem) %{
4100     MacroAssembler _masm(&cbuf);
4101     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4102                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4103   %}
4104 
4105   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4106     Register src_reg = as_Register($src$$reg);
4107     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4108                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4109   %}
4110 
4111   enc_class aarch64_enc_strw0(memory mem) %{
4112     MacroAssembler _masm(&cbuf);
4113     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4114                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4115   %}
4116 
4117   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4118     Register src_reg = as_Register($src$$reg);
4119     // we sometimes get asked to store the stack pointer into the
4120     // current thread -- we cannot do that directly on AArch64
4121     if (src_reg == r31_sp) {
4122       MacroAssembler _masm(&cbuf);
4123       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4124       __ mov(rscratch2, sp);
4125       src_reg = rscratch2;
4126     }
4127     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4128                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4129   %}
4130 
4131   enc_class aarch64_enc_str0(memory mem) %{
4132     MacroAssembler _masm(&cbuf);
4133     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4134                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4135   %}
4136 
4137   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4138     FloatRegister src_reg = as_FloatRegister($src$$reg);
4139     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4140                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4141   %}
4142 
4143   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4144     FloatRegister src_reg = as_FloatRegister($src$$reg);
4145     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4146                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4147   %}
4148 
4149   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4150     FloatRegister src_reg = as_FloatRegister($src$$reg);
4151     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4152        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4153   %}
4154 
4155   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4156     FloatRegister src_reg = as_FloatRegister($src$$reg);
4157     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4158        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4159   %}
4160 
4161   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4162     FloatRegister src_reg = as_FloatRegister($src$$reg);
4163     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4164        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4165   %}
4166 
4167   // END Non-volatile memory access
4168 
4169   // volatile loads and stores
4170 
4171   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4172     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4173                  rscratch1, stlrb);
4174   %}
4175 
4176   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4177     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4178                  rscratch1, stlrh);
4179   %}
4180 
4181   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4182     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4183                  rscratch1, stlrw);
4184   %}
4185 
4186 
4187   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4188     Register dst_reg = as_Register($dst$$reg);
4189     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4190              rscratch1, ldarb);
4191     __ sxtbw(dst_reg, dst_reg);
4192   %}
4193 
4194   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4195     Register dst_reg = as_Register($dst$$reg);
4196     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4197              rscratch1, ldarb);
4198     __ sxtb(dst_reg, dst_reg);
4199   %}
4200 
4201   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4202     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4203              rscratch1, ldarb);
4204   %}
4205 
4206   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4207     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4208              rscratch1, ldarb);
4209   %}
4210 
4211   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4212     Register dst_reg = as_Register($dst$$reg);
4213     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4214              rscratch1, ldarh);
4215     __ sxthw(dst_reg, dst_reg);
4216   %}
4217 
4218   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4219     Register dst_reg = as_Register($dst$$reg);
4220     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4221              rscratch1, ldarh);
4222     __ sxth(dst_reg, dst_reg);
4223   %}
4224 
4225   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4226     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4227              rscratch1, ldarh);
4228   %}
4229 
4230   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4232              rscratch1, ldarh);
4233   %}
4234 
4235   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4237              rscratch1, ldarw);
4238   %}
4239 
4240   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4241     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4242              rscratch1, ldarw);
4243   %}
4244 
4245   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4246     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4247              rscratch1, ldar);
4248   %}
4249 
4250   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4251     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4252              rscratch1, ldarw);
4253     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4254   %}
4255 
4256   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4257     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4258              rscratch1, ldar);
4259     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4260   %}
4261 
4262   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4263     Register src_reg = as_Register($src$$reg);
4264     // we sometimes get asked to store the stack pointer into the
4265     // current thread -- we cannot do that directly on AArch64
4266     if (src_reg == r31_sp) {
4267         MacroAssembler _masm(&cbuf);
4268       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4269       __ mov(rscratch2, sp);
4270       src_reg = rscratch2;
4271     }
4272     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4273                  rscratch1, stlr);
4274   %}
4275 
4276   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4277     {
4278       MacroAssembler _masm(&cbuf);
4279       FloatRegister src_reg = as_FloatRegister($src$$reg);
4280       __ fmovs(rscratch2, src_reg);
4281     }
4282     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4283                  rscratch1, stlrw);
4284   %}
4285 
4286   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4287     {
4288       MacroAssembler _masm(&cbuf);
4289       FloatRegister src_reg = as_FloatRegister($src$$reg);
4290       __ fmovd(rscratch2, src_reg);
4291     }
4292     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4293                  rscratch1, stlr);
4294   %}
4295 
4296   // synchronized read/update encodings
4297 
4298   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4299     MacroAssembler _masm(&cbuf);
4300     Register dst_reg = as_Register($dst$$reg);
4301     Register base = as_Register($mem$$base);
4302     int index = $mem$$index;
4303     int scale = $mem$$scale;
4304     int disp = $mem$$disp;
4305     if (index == -1) {
4306        if (disp != 0) {
4307         __ lea(rscratch1, Address(base, disp));
4308         __ ldaxr(dst_reg, rscratch1);
4309       } else {
4310         // TODO
4311         // should we ever get anything other than this case?
4312         __ ldaxr(dst_reg, base);
4313       }
4314     } else {
4315       Register index_reg = as_Register(index);
4316       if (disp == 0) {
4317         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4318         __ ldaxr(dst_reg, rscratch1);
4319       } else {
4320         __ lea(rscratch1, Address(base, disp));
4321         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4322         __ ldaxr(dst_reg, rscratch1);
4323       }
4324     }
4325   %}
4326 
4327   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4328     MacroAssembler _masm(&cbuf);
4329     Register src_reg = as_Register($src$$reg);
4330     Register base = as_Register($mem$$base);
4331     int index = $mem$$index;
4332     int scale = $mem$$scale;
4333     int disp = $mem$$disp;
4334     if (index == -1) {
4335        if (disp != 0) {
4336         __ lea(rscratch2, Address(base, disp));
4337         __ stlxr(rscratch1, src_reg, rscratch2);
4338       } else {
4339         // TODO
4340         // should we ever get anything other than this case?
4341         __ stlxr(rscratch1, src_reg, base);
4342       }
4343     } else {
4344       Register index_reg = as_Register(index);
4345       if (disp == 0) {
4346         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4347         __ stlxr(rscratch1, src_reg, rscratch2);
4348       } else {
4349         __ lea(rscratch2, Address(base, disp));
4350         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4351         __ stlxr(rscratch1, src_reg, rscratch2);
4352       }
4353     }
4354     __ cmpw(rscratch1, zr);
4355   %}
4356 
4357   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4358     MacroAssembler _masm(&cbuf);
4359     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4360     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4361                Assembler::xword, /*acquire*/ false, /*release*/ true,
4362                /*weak*/ false, noreg);
4363   %}
4364 
4365   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4366     MacroAssembler _masm(&cbuf);
4367     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4368     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4369                Assembler::word, /*acquire*/ false, /*release*/ true,
4370                /*weak*/ false, noreg);
4371   %}
4372 
4373   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4374     MacroAssembler _masm(&cbuf);
4375     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4376     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4377                Assembler::halfword, /*acquire*/ false, /*release*/ true,
4378                /*weak*/ false, noreg);
4379   %}
4380 
4381   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4382     MacroAssembler _masm(&cbuf);
4383     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4384     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4385                Assembler::byte, /*acquire*/ false, /*release*/ true,
4386                /*weak*/ false, noreg);
4387   %}
4388 
4389 
4390   // The only difference between aarch64_enc_cmpxchg and
4391   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4392   // CompareAndSwap sequence to serve as a barrier on acquiring a
4393   // lock.
4394   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4395     MacroAssembler _masm(&cbuf);
4396     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4397     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4398                Assembler::xword, /*acquire*/ true, /*release*/ true,
4399                /*weak*/ false, noreg);
4400   %}
4401 
4402   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4403     MacroAssembler _masm(&cbuf);
4404     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4405     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4406                Assembler::word, /*acquire*/ true, /*release*/ true,
4407                /*weak*/ false, noreg);
4408   %}
4409 
4410 
4411   // auxiliary used for CompareAndSwapX to set result register
4412   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4413     MacroAssembler _masm(&cbuf);
4414     Register res_reg = as_Register($res$$reg);
4415     __ cset(res_reg, Assembler::EQ);
4416   %}
4417 
4418   // prefetch encodings
4419 
4420   enc_class aarch64_enc_prefetchw(memory mem) %{
4421     MacroAssembler _masm(&cbuf);
4422     Register base = as_Register($mem$$base);
4423     int index = $mem$$index;
4424     int scale = $mem$$scale;
4425     int disp = $mem$$disp;
4426     if (index == -1) {
4427       __ prfm(Address(base, disp), PSTL1KEEP);
4428     } else {
4429       Register index_reg = as_Register(index);
4430       if (disp == 0) {
4431         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4432       } else {
4433         __ lea(rscratch1, Address(base, disp));
4434         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4435       }
4436     }
4437   %}
4438 
4439   /// mov envcodings
4440 
4441   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4442     MacroAssembler _masm(&cbuf);
4443     u_int32_t con = (u_int32_t)$src$$constant;
4444     Register dst_reg = as_Register($dst$$reg);
4445     if (con == 0) {
4446       __ movw(dst_reg, zr);
4447     } else {
4448       __ movw(dst_reg, con);
4449     }
4450   %}
4451 
4452   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4453     MacroAssembler _masm(&cbuf);
4454     Register dst_reg = as_Register($dst$$reg);
4455     u_int64_t con = (u_int64_t)$src$$constant;
4456     if (con == 0) {
4457       __ mov(dst_reg, zr);
4458     } else {
4459       __ mov(dst_reg, con);
4460     }
4461   %}
4462 
4463   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4464     MacroAssembler _masm(&cbuf);
4465     Register dst_reg = as_Register($dst$$reg);
4466     address con = (address)$src$$constant;
4467     if (con == NULL || con == (address)1) {
4468       ShouldNotReachHere();
4469     } else {
4470       relocInfo::relocType rtype = $src->constant_reloc();
4471       if (rtype == relocInfo::oop_type) {
4472         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4473       } else if (rtype == relocInfo::metadata_type) {
4474         __ mov_metadata(dst_reg, (Metadata*)con);
4475       } else {
4476         assert(rtype == relocInfo::none, "unexpected reloc type");
4477         if (con < (address)(uintptr_t)os::vm_page_size()) {
4478           __ mov(dst_reg, con);
4479         } else {
4480           unsigned long offset;
4481           __ adrp(dst_reg, con, offset);
4482           __ add(dst_reg, dst_reg, offset);
4483         }
4484       }
4485     }
4486   %}
4487 
4488   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4489     MacroAssembler _masm(&cbuf);
4490     Register dst_reg = as_Register($dst$$reg);
4491     __ mov(dst_reg, zr);
4492   %}
4493 
4494   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4495     MacroAssembler _masm(&cbuf);
4496     Register dst_reg = as_Register($dst$$reg);
4497     __ mov(dst_reg, (u_int64_t)1);
4498   %}
4499 
4500   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4501     MacroAssembler _masm(&cbuf);
4502     address page = (address)$src$$constant;
4503     Register dst_reg = as_Register($dst$$reg);
4504     unsigned long off;
4505     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4506     assert(off == 0, "assumed offset == 0");
4507   %}
4508 
4509   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4510     MacroAssembler _masm(&cbuf);
4511     __ load_byte_map_base($dst$$Register);
4512   %}
4513 
4514   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4515     MacroAssembler _masm(&cbuf);
4516     Register dst_reg = as_Register($dst$$reg);
4517     address con = (address)$src$$constant;
4518     if (con == NULL) {
4519       ShouldNotReachHere();
4520     } else {
4521       relocInfo::relocType rtype = $src->constant_reloc();
4522       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4523       __ set_narrow_oop(dst_reg, (jobject)con);
4524     }
4525   %}
4526 
4527   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4528     MacroAssembler _masm(&cbuf);
4529     Register dst_reg = as_Register($dst$$reg);
4530     __ mov(dst_reg, zr);
4531   %}
4532 
4533   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4534     MacroAssembler _masm(&cbuf);
4535     Register dst_reg = as_Register($dst$$reg);
4536     address con = (address)$src$$constant;
4537     if (con == NULL) {
4538       ShouldNotReachHere();
4539     } else {
4540       relocInfo::relocType rtype = $src->constant_reloc();
4541       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4542       __ set_narrow_klass(dst_reg, (Klass *)con);
4543     }
4544   %}
4545 
4546   // arithmetic encodings
4547 
4548   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4549     MacroAssembler _masm(&cbuf);
4550     Register dst_reg = as_Register($dst$$reg);
4551     Register src_reg = as_Register($src1$$reg);
4552     int32_t con = (int32_t)$src2$$constant;
4553     // add has primary == 0, subtract has primary == 1
4554     if ($primary) { con = -con; }
4555     if (con < 0) {
4556       __ subw(dst_reg, src_reg, -con);
4557     } else {
4558       __ addw(dst_reg, src_reg, con);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register dst_reg = as_Register($dst$$reg);
4565     Register src_reg = as_Register($src1$$reg);
4566     int32_t con = (int32_t)$src2$$constant;
4567     // add has primary == 0, subtract has primary == 1
4568     if ($primary) { con = -con; }
4569     if (con < 0) {
4570       __ sub(dst_reg, src_reg, -con);
4571     } else {
4572       __ add(dst_reg, src_reg, con);
4573     }
4574   %}
4575 
4576   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4577     MacroAssembler _masm(&cbuf);
4578    Register dst_reg = as_Register($dst$$reg);
4579    Register src1_reg = as_Register($src1$$reg);
4580    Register src2_reg = as_Register($src2$$reg);
4581     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4582   %}
4583 
4584   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4585     MacroAssembler _masm(&cbuf);
4586    Register dst_reg = as_Register($dst$$reg);
4587    Register src1_reg = as_Register($src1$$reg);
4588    Register src2_reg = as_Register($src2$$reg);
4589     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4590   %}
4591 
4592   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4593     MacroAssembler _masm(&cbuf);
4594    Register dst_reg = as_Register($dst$$reg);
4595    Register src1_reg = as_Register($src1$$reg);
4596    Register src2_reg = as_Register($src2$$reg);
4597     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4598   %}
4599 
4600   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4601     MacroAssembler _masm(&cbuf);
4602    Register dst_reg = as_Register($dst$$reg);
4603    Register src1_reg = as_Register($src1$$reg);
4604    Register src2_reg = as_Register($src2$$reg);
4605     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4606   %}
4607 
4608   // compare instruction encodings
4609 
4610   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4611     MacroAssembler _masm(&cbuf);
4612     Register reg1 = as_Register($src1$$reg);
4613     Register reg2 = as_Register($src2$$reg);
4614     __ cmpw(reg1, reg2);
4615   %}
4616 
4617   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4618     MacroAssembler _masm(&cbuf);
4619     Register reg = as_Register($src1$$reg);
4620     int32_t val = $src2$$constant;
4621     if (val >= 0) {
4622       __ subsw(zr, reg, val);
4623     } else {
4624       __ addsw(zr, reg, -val);
4625     }
4626   %}
4627 
4628   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4629     MacroAssembler _masm(&cbuf);
4630     Register reg1 = as_Register($src1$$reg);
4631     u_int32_t val = (u_int32_t)$src2$$constant;
4632     __ movw(rscratch1, val);
4633     __ cmpw(reg1, rscratch1);
4634   %}
4635 
4636   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4637     MacroAssembler _masm(&cbuf);
4638     Register reg1 = as_Register($src1$$reg);
4639     Register reg2 = as_Register($src2$$reg);
4640     __ cmp(reg1, reg2);
4641   %}
4642 
4643   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4644     MacroAssembler _masm(&cbuf);
4645     Register reg = as_Register($src1$$reg);
4646     int64_t val = $src2$$constant;
4647     if (val >= 0) {
4648       __ subs(zr, reg, val);
4649     } else if (val != -val) {
4650       __ adds(zr, reg, -val);
4651     } else {
4652     // aargh, Long.MIN_VALUE is a special case
4653       __ orr(rscratch1, zr, (u_int64_t)val);
4654       __ subs(zr, reg, rscratch1);
4655     }
4656   %}
4657 
4658   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4659     MacroAssembler _masm(&cbuf);
4660     Register reg1 = as_Register($src1$$reg);
4661     u_int64_t val = (u_int64_t)$src2$$constant;
4662     __ mov(rscratch1, val);
4663     __ cmp(reg1, rscratch1);
4664   %}
4665 
4666   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4667     MacroAssembler _masm(&cbuf);
4668     Register reg1 = as_Register($src1$$reg);
4669     Register reg2 = as_Register($src2$$reg);
4670     __ cmp(reg1, reg2);
4671   %}
4672 
4673   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4674     MacroAssembler _masm(&cbuf);
4675     Register reg1 = as_Register($src1$$reg);
4676     Register reg2 = as_Register($src2$$reg);
4677     __ cmpw(reg1, reg2);
4678   %}
4679 
4680   enc_class aarch64_enc_testp(iRegP src) %{
4681     MacroAssembler _masm(&cbuf);
4682     Register reg = as_Register($src$$reg);
4683     __ cmp(reg, zr);
4684   %}
4685 
4686   enc_class aarch64_enc_testn(iRegN src) %{
4687     MacroAssembler _masm(&cbuf);
4688     Register reg = as_Register($src$$reg);
4689     __ cmpw(reg, zr);
4690   %}
4691 
4692   enc_class aarch64_enc_b(label lbl) %{
4693     MacroAssembler _masm(&cbuf);
4694     Label *L = $lbl$$label;
4695     __ b(*L);
4696   %}
4697 
4698   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4699     MacroAssembler _masm(&cbuf);
4700     Label *L = $lbl$$label;
4701     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4702   %}
4703 
4704   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4705     MacroAssembler _masm(&cbuf);
4706     Label *L = $lbl$$label;
4707     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4708   %}
4709 
4710   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4711   %{
4712      Register sub_reg = as_Register($sub$$reg);
4713      Register super_reg = as_Register($super$$reg);
4714      Register temp_reg = as_Register($temp$$reg);
4715      Register result_reg = as_Register($result$$reg);
4716 
4717      Label miss;
4718      MacroAssembler _masm(&cbuf);
4719      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4720                                      NULL, &miss,
4721                                      /*set_cond_codes:*/ true);
4722      if ($primary) {
4723        __ mov(result_reg, zr);
4724      }
4725      __ bind(miss);
4726   %}
4727 
4728   enc_class aarch64_enc_java_static_call(method meth) %{
4729     MacroAssembler _masm(&cbuf);
4730 
4731     address addr = (address)$meth$$method;
4732     address call;
4733     if (!_method) {
4734       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4735       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4736     } else {
4737       int method_index = resolved_method_index(cbuf);
4738       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4739                                                   : static_call_Relocation::spec(method_index);
4740       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4741 
4742       // Emit stub for static call
4743       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4744       if (stub == NULL) {
4745         ciEnv::current()->record_failure("CodeCache is full");
4746         return;
4747       }
4748     }
4749     if (call == NULL) {
4750       ciEnv::current()->record_failure("CodeCache is full");
4751       return;
4752     }
4753   %}
4754 
4755   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4756     MacroAssembler _masm(&cbuf);
4757     int method_index = resolved_method_index(cbuf);
4758     address call = __ ic_call((address)$meth$$method, method_index);
4759     if (call == NULL) {
4760       ciEnv::current()->record_failure("CodeCache is full");
4761       return;
4762     }
4763   %}
4764 
4765   enc_class aarch64_enc_call_epilog() %{
4766     MacroAssembler _masm(&cbuf);
4767     if (VerifyStackAtCalls) {
4768       // Check that stack depth is unchanged: find majik cookie on stack
4769       __ call_Unimplemented();
4770     }
4771   %}
4772 
4773   enc_class aarch64_enc_java_to_runtime(method meth) %{
4774     MacroAssembler _masm(&cbuf);
4775 
4776     // some calls to generated routines (arraycopy code) are scheduled
4777     // by C2 as runtime calls. if so we can call them using a br (they
4778     // will be in a reachable segment) otherwise we have to use a blrt
4779     // which loads the absolute address into a register.
4780     address entry = (address)$meth$$method;
4781     CodeBlob *cb = CodeCache::find_blob(entry);
4782     if (cb) {
4783       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4784       if (call == NULL) {
4785         ciEnv::current()->record_failure("CodeCache is full");
4786         return;
4787       }
4788     } else {
4789       int gpcnt;
4790       int fpcnt;
4791       int rtype;
4792       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4793       Label retaddr;
4794       __ adr(rscratch2, retaddr);
4795       __ lea(rscratch1, RuntimeAddress(entry));
4796       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4797       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4798       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4799       __ bind(retaddr);
4800       __ add(sp, sp, 2 * wordSize);
4801     }
4802   %}
4803 
4804   enc_class aarch64_enc_rethrow() %{
4805     MacroAssembler _masm(&cbuf);
4806     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4807   %}
4808 
4809   enc_class aarch64_enc_ret() %{
4810     MacroAssembler _masm(&cbuf);
4811     __ ret(lr);
4812   %}
4813 
4814   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4815     MacroAssembler _masm(&cbuf);
4816     Register target_reg = as_Register($jump_target$$reg);
4817     __ br(target_reg);
4818   %}
4819 
4820   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4821     MacroAssembler _masm(&cbuf);
4822     Register target_reg = as_Register($jump_target$$reg);
4823     // exception oop should be in r0
4824     // ret addr has been popped into lr
4825     // callee expects it in r3
4826     __ mov(r3, lr);
4827     __ br(target_reg);
4828   %}
4829 
4830   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4831     MacroAssembler _masm(&cbuf);
4832     Register oop = as_Register($object$$reg);
4833     Register box = as_Register($box$$reg);
4834     Register disp_hdr = as_Register($tmp$$reg);
4835     Register tmp = as_Register($tmp2$$reg);
4836     Label cont;
4837     Label object_has_monitor;
4838     Label cas_failed;
4839 
4840     assert_different_registers(oop, box, tmp, disp_hdr);
4841 
4842     // Load markOop from object into displaced_header.
4843     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4844 
4845     // Always do locking in runtime.
4846     if (EmitSync & 0x01) {
4847       __ cmp(oop, zr);
4848       return;
4849     }
4850 
4851     if (UseBiasedLocking && !UseOptoBiasInlining) {
4852       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4853     }
4854 
4855     // Handle existing monitor
4856     if ((EmitSync & 0x02) == 0) {
4857       // we can use AArch64's bit test and branch here but
4858       // markoopDesc does not define a bit index just the bit value
4859       // so assert in case the bit pos changes
4860 #     define __monitor_value_log2 1
4861       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4862       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4863 #     undef __monitor_value_log2
4864     }
4865 
4866     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4867     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4868 
4869     // Load Compare Value application register.
4870 
4871     // Initialize the box. (Must happen before we update the object mark!)
4872     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4873 
4874     // Compare object markOop with mark and if equal exchange scratch1
4875     // with object markOop.
4876     if (UseLSE) {
4877       __ mov(tmp, disp_hdr);
4878       __ casal(Assembler::xword, tmp, box, oop);
4879       __ cmp(tmp, disp_hdr);
4880       __ br(Assembler::EQ, cont);
4881     } else {
4882       Label retry_load;
4883       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4884         __ prfm(Address(oop), PSTL1STRM);
4885       __ bind(retry_load);
4886       __ ldaxr(tmp, oop);
4887       __ cmp(tmp, disp_hdr);
4888       __ br(Assembler::NE, cas_failed);
4889       // use stlxr to ensure update is immediately visible
4890       __ stlxr(tmp, box, oop);
4891       __ cbzw(tmp, cont);
4892       __ b(retry_load);
4893     }
4894 
4895     // Formerly:
4896     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4897     //               /*newv=*/box,
4898     //               /*addr=*/oop,
4899     //               /*tmp=*/tmp,
4900     //               cont,
4901     //               /*fail*/NULL);
4902 
4903     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4904 
4905     // If the compare-and-exchange succeeded, then we found an unlocked
4906     // object, will have now locked it will continue at label cont
4907 
4908     __ bind(cas_failed);
4909     // We did not see an unlocked object so try the fast recursive case.
4910 
4911     // Check if the owner is self by comparing the value in the
4912     // markOop of object (disp_hdr) with the stack pointer.
4913     __ mov(rscratch1, sp);
4914     __ sub(disp_hdr, disp_hdr, rscratch1);
4915     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4916     // If condition is true we are cont and hence we can store 0 as the
4917     // displaced header in the box, which indicates that it is a recursive lock.
4918     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4919     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4920 
4921     // Handle existing monitor.
4922     if ((EmitSync & 0x02) == 0) {
4923       __ b(cont);
4924 
4925       __ bind(object_has_monitor);
4926       // The object's monitor m is unlocked iff m->owner == NULL,
4927       // otherwise m->owner may contain a thread or a stack address.
4928       //
4929       // Try to CAS m->owner from NULL to current thread.
4930       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4931       __ mov(disp_hdr, zr);
4932 
4933       if (UseLSE) {
4934         __ mov(rscratch1, disp_hdr);
4935         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4936         __ cmp(rscratch1, disp_hdr);
4937       } else {
4938         Label retry_load, fail;
4939         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4940           __ prfm(Address(tmp), PSTL1STRM);
4941         __ bind(retry_load);
4942         __ ldaxr(rscratch1, tmp);
4943         __ cmp(disp_hdr, rscratch1);
4944         __ br(Assembler::NE, fail);
4945         // use stlxr to ensure update is immediately visible
4946         __ stlxr(rscratch1, rthread, tmp);
4947         __ cbnzw(rscratch1, retry_load);
4948         __ bind(fail);
4949       }
4950 
4951       // Label next;
4952       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4953       //               /*newv=*/rthread,
4954       //               /*addr=*/tmp,
4955       //               /*tmp=*/rscratch1,
4956       //               /*succeed*/next,
4957       //               /*fail*/NULL);
4958       // __ bind(next);
4959 
4960       // store a non-null value into the box.
4961       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4962 
4963       // PPC port checks the following invariants
4964       // #ifdef ASSERT
4965       // bne(flag, cont);
4966       // We have acquired the monitor, check some invariants.
4967       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4968       // Invariant 1: _recursions should be 0.
4969       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4970       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4971       //                        "monitor->_recursions should be 0", -1);
4972       // Invariant 2: OwnerIsThread shouldn't be 0.
4973       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4974       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4975       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4976       // #endif
4977     }
4978 
4979     __ bind(cont);
4980     // flag == EQ indicates success
4981     // flag == NE indicates failure
4982 
4983   %}
4984 
4985   // TODO
4986   // reimplement this with custom cmpxchgptr code
4987   // which avoids some of the unnecessary branching
4988   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4989     MacroAssembler _masm(&cbuf);
4990     Register oop = as_Register($object$$reg);
4991     Register box = as_Register($box$$reg);
4992     Register disp_hdr = as_Register($tmp$$reg);
4993     Register tmp = as_Register($tmp2$$reg);
4994     Label cont;
4995     Label object_has_monitor;
4996     Label cas_failed;
4997 
4998     assert_different_registers(oop, box, tmp, disp_hdr);
4999 
5000     // Always do locking in runtime.
5001     if (EmitSync & 0x01) {
5002       __ cmp(oop, zr); // Oop can't be 0 here => always false.
5003       return;
5004     }
5005 
5006     if (UseBiasedLocking && !UseOptoBiasInlining) {
5007       __ biased_locking_exit(oop, tmp, cont);
5008     }
5009 
5010     // Find the lock address and load the displaced header from the stack.
5011     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5012 
5013     // If the displaced header is 0, we have a recursive unlock.
5014     __ cmp(disp_hdr, zr);
5015     __ br(Assembler::EQ, cont);
5016 
5017 
5018     // Handle existing monitor.
5019     if ((EmitSync & 0x02) == 0) {
5020       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5021       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5022     }
5023 
5024     // Check if it is still a light weight lock, this is is true if we
5025     // see the stack address of the basicLock in the markOop of the
5026     // object.
5027 
5028       if (UseLSE) {
5029         __ mov(tmp, box);
5030         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5031         __ cmp(tmp, box);
5032       } else {
5033         Label retry_load;
5034         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
5035           __ prfm(Address(oop), PSTL1STRM);
5036         __ bind(retry_load);
5037         __ ldxr(tmp, oop);
5038         __ cmp(box, tmp);
5039         __ br(Assembler::NE, cas_failed);
5040         // use stlxr to ensure update is immediately visible
5041         __ stlxr(tmp, disp_hdr, oop);
5042         __ cbzw(tmp, cont);
5043         __ b(retry_load);
5044       }
5045 
5046     // __ cmpxchgptr(/*compare_value=*/box,
5047     //               /*exchange_value=*/disp_hdr,
5048     //               /*where=*/oop,
5049     //               /*result=*/tmp,
5050     //               cont,
5051     //               /*cas_failed*/NULL);
5052     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5053 
5054     __ bind(cas_failed);
5055 
5056     // Handle existing monitor.
5057     if ((EmitSync & 0x02) == 0) {
5058       __ b(cont);
5059 
5060       __ bind(object_has_monitor);
5061       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5062       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5063       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5064       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5065       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5066       __ cmp(rscratch1, zr);
5067       __ br(Assembler::NE, cont);
5068 
5069       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5070       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5071       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5072       __ cmp(rscratch1, zr);
5073       __ cbnz(rscratch1, cont);
5074       // need a release store here
5075       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5076       __ stlr(rscratch1, tmp); // rscratch1 is zero
5077     }
5078 
5079     __ bind(cont);
5080     // flag == EQ indicates success
5081     // flag == NE indicates failure
5082   %}
5083 
5084 %}
5085 
5086 //----------FRAME--------------------------------------------------------------
5087 // Definition of frame structure and management information.
5088 //
5089 //  S T A C K   L A Y O U T    Allocators stack-slot number
5090 //                             |   (to get allocators register number
5091 //  G  Owned by    |        |  v    add OptoReg::stack0())
5092 //  r   CALLER     |        |
5093 //  o     |        +--------+      pad to even-align allocators stack-slot
5094 //  w     V        |  pad0  |        numbers; owned by CALLER
5095 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5096 //  h     ^        |   in   |  5
5097 //        |        |  args  |  4   Holes in incoming args owned by SELF
5098 //  |     |        |        |  3
5099 //  |     |        +--------+
5100 //  V     |        | old out|      Empty on Intel, window on Sparc
5101 //        |    old |preserve|      Must be even aligned.
5102 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5103 //        |        |   in   |  3   area for Intel ret address
5104 //     Owned by    |preserve|      Empty on Sparc.
5105 //       SELF      +--------+
5106 //        |        |  pad2  |  2   pad to align old SP
5107 //        |        +--------+  1
5108 //        |        | locks  |  0
5109 //        |        +--------+----> OptoReg::stack0(), even aligned
5110 //        |        |  pad1  | 11   pad to align new SP
5111 //        |        +--------+
5112 //        |        |        | 10
5113 //        |        | spills |  9   spills
5114 //        V        |        |  8   (pad0 slot for callee)
5115 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5116 //        ^        |  out   |  7
5117 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5118 //     Owned by    +--------+
5119 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5120 //        |    new |preserve|      Must be even-aligned.
5121 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5122 //        |        |        |
5123 //
5124 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5125 //         known from SELF's arguments and the Java calling convention.
5126 //         Region 6-7 is determined per call site.
5127 // Note 2: If the calling convention leaves holes in the incoming argument
5128 //         area, those holes are owned by SELF.  Holes in the outgoing area
5129 //         are owned by the CALLEE.  Holes should not be nessecary in the
5130 //         incoming area, as the Java calling convention is completely under
5131 //         the control of the AD file.  Doubles can be sorted and packed to
5132 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5133 //         varargs C calling conventions.
5134 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5135 //         even aligned with pad0 as needed.
5136 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5137 //           (the latter is true on Intel but is it false on AArch64?)
5138 //         region 6-11 is even aligned; it may be padded out more so that
5139 //         the region from SP to FP meets the minimum stack alignment.
5140 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5141 //         alignment.  Region 11, pad1, may be dynamically extended so that
5142 //         SP meets the minimum alignment.
5143 
5144 frame %{
5145   // What direction does stack grow in (assumed to be same for C & Java)
5146   stack_direction(TOWARDS_LOW);
5147 
5148   // These three registers define part of the calling convention
5149   // between compiled code and the interpreter.
5150 
5151   // Inline Cache Register or methodOop for I2C.
5152   inline_cache_reg(R12);
5153 
5154   // Method Oop Register when calling interpreter.
5155   interpreter_method_oop_reg(R12);
5156 
5157   // Number of stack slots consumed by locking an object
5158   sync_stack_slots(2);
5159 
5160   // Compiled code's Frame Pointer
5161   frame_pointer(R31);
5162 
5163   // Interpreter stores its frame pointer in a register which is
5164   // stored to the stack by I2CAdaptors.
5165   // I2CAdaptors convert from interpreted java to compiled java.
5166   interpreter_frame_pointer(R29);
5167 
5168   // Stack alignment requirement
5169   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5170 
5171   // Number of stack slots between incoming argument block and the start of
5172   // a new frame.  The PROLOG must add this many slots to the stack.  The
5173   // EPILOG must remove this many slots. aarch64 needs two slots for
5174   // return address and fp.
5175   // TODO think this is correct but check
5176   in_preserve_stack_slots(4);
5177 
5178   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5179   // for calls to C.  Supports the var-args backing area for register parms.
5180   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5181 
5182   // The after-PROLOG location of the return address.  Location of
5183   // return address specifies a type (REG or STACK) and a number
5184   // representing the register number (i.e. - use a register name) or
5185   // stack slot.
5186   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5187   // Otherwise, it is above the locks and verification slot and alignment word
5188   // TODO this may well be correct but need to check why that - 2 is there
5189   // ppc port uses 0 but we definitely need to allow for fixed_slots
5190   // which folds in the space used for monitors
5191   return_addr(STACK - 2 +
5192               align_up((Compile::current()->in_preserve_stack_slots() +
5193                         Compile::current()->fixed_slots()),
5194                        stack_alignment_in_slots()));
5195 
5196   // Body of function which returns an integer array locating
5197   // arguments either in registers or in stack slots.  Passed an array
5198   // of ideal registers called "sig" and a "length" count.  Stack-slot
5199   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5200   // arguments for a CALLEE.  Incoming stack arguments are
5201   // automatically biased by the preserve_stack_slots field above.
5202 
5203   calling_convention
5204   %{
5205     // No difference between ingoing/outgoing just pass false
5206     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5207   %}
5208 
5209   c_calling_convention
5210   %{
5211     // This is obviously always outgoing
5212     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5213   %}
5214 
5215   // Location of compiled Java return values.  Same as C for now.
5216   return_value
5217   %{
5218     // TODO do we allow ideal_reg == Op_RegN???
5219     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5220            "only return normal values");
5221 
5222     static const int lo[Op_RegL + 1] = { // enum name
5223       0,                                 // Op_Node
5224       0,                                 // Op_Set
5225       R0_num,                            // Op_RegN
5226       R0_num,                            // Op_RegI
5227       R0_num,                            // Op_RegP
5228       V0_num,                            // Op_RegF
5229       V0_num,                            // Op_RegD
5230       R0_num                             // Op_RegL
5231     };
5232 
5233     static const int hi[Op_RegL + 1] = { // enum name
5234       0,                                 // Op_Node
5235       0,                                 // Op_Set
5236       OptoReg::Bad,                       // Op_RegN
5237       OptoReg::Bad,                      // Op_RegI
5238       R0_H_num,                          // Op_RegP
5239       OptoReg::Bad,                      // Op_RegF
5240       V0_H_num,                          // Op_RegD
5241       R0_H_num                           // Op_RegL
5242     };
5243 
5244     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5245   %}
5246 %}
5247 
5248 //----------ATTRIBUTES---------------------------------------------------------
5249 //----------Operand Attributes-------------------------------------------------
5250 op_attrib op_cost(1);        // Required cost attribute
5251 
5252 //----------Instruction Attributes---------------------------------------------
5253 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5254 ins_attrib ins_size(32);        // Required size attribute (in bits)
5255 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5256                                 // a non-matching short branch variant
5257                                 // of some long branch?
5258 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5259                                 // be a power of 2) specifies the
5260                                 // alignment that some part of the
5261                                 // instruction (not necessarily the
5262                                 // start) requires.  If > 1, a
5263                                 // compute_padding() function must be
5264                                 // provided for the instruction
5265 
5266 //----------OPERANDS-----------------------------------------------------------
5267 // Operand definitions must precede instruction definitions for correct parsing
5268 // in the ADLC because operands constitute user defined types which are used in
5269 // instruction definitions.
5270 
5271 //----------Simple Operands----------------------------------------------------
5272 
5273 // Integer operands 32 bit
5274 // 32 bit immediate
5275 operand immI()
5276 %{
5277   match(ConI);
5278 
5279   op_cost(0);
5280   format %{ %}
5281   interface(CONST_INTER);
5282 %}
5283 
5284 // 32 bit zero
5285 operand immI0()
5286 %{
5287   predicate(n->get_int() == 0);
5288   match(ConI);
5289 
5290   op_cost(0);
5291   format %{ %}
5292   interface(CONST_INTER);
5293 %}
5294 
5295 // 32 bit unit increment
5296 operand immI_1()
5297 %{
5298   predicate(n->get_int() == 1);
5299   match(ConI);
5300 
5301   op_cost(0);
5302   format %{ %}
5303   interface(CONST_INTER);
5304 %}
5305 
5306 // 32 bit unit decrement
5307 operand immI_M1()
5308 %{
5309   predicate(n->get_int() == -1);
5310   match(ConI);
5311 
5312   op_cost(0);
5313   format %{ %}
5314   interface(CONST_INTER);
5315 %}
5316 
5317 // Shift values for add/sub extension shift
5318 operand immIExt()
5319 %{
5320   predicate(0 <= n->get_int() && (n->get_int() <= 4));
5321   match(ConI);
5322 
5323   op_cost(0);
5324   format %{ %}
5325   interface(CONST_INTER);
5326 %}
5327 
5328 operand immI_le_4()
5329 %{
5330   predicate(n->get_int() <= 4);
5331   match(ConI);
5332 
5333   op_cost(0);
5334   format %{ %}
5335   interface(CONST_INTER);
5336 %}
5337 
5338 operand immI_31()
5339 %{
5340   predicate(n->get_int() == 31);
5341   match(ConI);
5342 
5343   op_cost(0);
5344   format %{ %}
5345   interface(CONST_INTER);
5346 %}
5347 
5348 operand immI_8()
5349 %{
5350   predicate(n->get_int() == 8);
5351   match(ConI);
5352 
5353   op_cost(0);
5354   format %{ %}
5355   interface(CONST_INTER);
5356 %}
5357 
5358 operand immI_16()
5359 %{
5360   predicate(n->get_int() == 16);
5361   match(ConI);
5362 
5363   op_cost(0);
5364   format %{ %}
5365   interface(CONST_INTER);
5366 %}
5367 
5368 operand immI_24()
5369 %{
5370   predicate(n->get_int() == 24);
5371   match(ConI);
5372 
5373   op_cost(0);
5374   format %{ %}
5375   interface(CONST_INTER);
5376 %}
5377 
5378 operand immI_32()
5379 %{
5380   predicate(n->get_int() == 32);
5381   match(ConI);
5382 
5383   op_cost(0);
5384   format %{ %}
5385   interface(CONST_INTER);
5386 %}
5387 
5388 operand immI_48()
5389 %{
5390   predicate(n->get_int() == 48);
5391   match(ConI);
5392 
5393   op_cost(0);
5394   format %{ %}
5395   interface(CONST_INTER);
5396 %}
5397 
5398 operand immI_56()
5399 %{
5400   predicate(n->get_int() == 56);
5401   match(ConI);
5402 
5403   op_cost(0);
5404   format %{ %}
5405   interface(CONST_INTER);
5406 %}
5407 
5408 operand immI_63()
5409 %{
5410   predicate(n->get_int() == 63);
5411   match(ConI);
5412 
5413   op_cost(0);
5414   format %{ %}
5415   interface(CONST_INTER);
5416 %}
5417 
5418 operand immI_64()
5419 %{
5420   predicate(n->get_int() == 64);
5421   match(ConI);
5422 
5423   op_cost(0);
5424   format %{ %}
5425   interface(CONST_INTER);
5426 %}
5427 
5428 operand immI_255()
5429 %{
5430   predicate(n->get_int() == 255);
5431   match(ConI);
5432 
5433   op_cost(0);
5434   format %{ %}
5435   interface(CONST_INTER);
5436 %}
5437 
5438 operand immI_65535()
5439 %{
5440   predicate(n->get_int() == 65535);
5441   match(ConI);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 operand immL_255()
5449 %{
5450   predicate(n->get_long() == 255L);
5451   match(ConL);
5452 
5453   op_cost(0);
5454   format %{ %}
5455   interface(CONST_INTER);
5456 %}
5457 
5458 operand immL_65535()
5459 %{
5460   predicate(n->get_long() == 65535L);
5461   match(ConL);
5462 
5463   op_cost(0);
5464   format %{ %}
5465   interface(CONST_INTER);
5466 %}
5467 
5468 operand immL_4294967295()
5469 %{
5470   predicate(n->get_long() == 4294967295L);
5471   match(ConL);
5472 
5473   op_cost(0);
5474   format %{ %}
5475   interface(CONST_INTER);
5476 %}
5477 
5478 operand immL_bitmask()
5479 %{
5480   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5481             && is_power_of_2(n->get_long() + 1));
5482   match(ConL);
5483 
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 operand immI_bitmask()
5490 %{
5491   predicate(((n->get_int() & 0xc0000000) == 0)
5492             && is_power_of_2(n->get_int() + 1));
5493   match(ConI);
5494 
5495   op_cost(0);
5496   format %{ %}
5497   interface(CONST_INTER);
5498 %}
5499 
5500 // Scale values for scaled offset addressing modes (up to long but not quad)
5501 operand immIScale()
5502 %{
5503   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5504   match(ConI);
5505 
5506   op_cost(0);
5507   format %{ %}
5508   interface(CONST_INTER);
5509 %}
5510 
5511 // 26 bit signed offset -- for pc-relative branches
5512 operand immI26()
5513 %{
5514   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5515   match(ConI);
5516 
5517   op_cost(0);
5518   format %{ %}
5519   interface(CONST_INTER);
5520 %}
5521 
5522 // 19 bit signed offset -- for pc-relative loads
5523 operand immI19()
5524 %{
5525   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5526   match(ConI);
5527 
5528   op_cost(0);
5529   format %{ %}
5530   interface(CONST_INTER);
5531 %}
5532 
5533 // 12 bit unsigned offset -- for base plus immediate loads
5534 operand immIU12()
5535 %{
5536   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5537   match(ConI);
5538 
5539   op_cost(0);
5540   format %{ %}
5541   interface(CONST_INTER);
5542 %}
5543 
5544 operand immLU12()
5545 %{
5546   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5547   match(ConL);
5548 
5549   op_cost(0);
5550   format %{ %}
5551   interface(CONST_INTER);
5552 %}
5553 
5554 // Offset for scaled or unscaled immediate loads and stores
5555 operand immIOffset()
5556 %{
5557   predicate(Address::offset_ok_for_immed(n->get_int()));
5558   match(ConI);
5559 
5560   op_cost(0);
5561   format %{ %}
5562   interface(CONST_INTER);
5563 %}
5564 
5565 operand immIOffset4()
5566 %{
5567   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5568   match(ConI);
5569 
5570   op_cost(0);
5571   format %{ %}
5572   interface(CONST_INTER);
5573 %}
5574 
5575 operand immIOffset8()
5576 %{
5577   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5578   match(ConI);
5579 
5580   op_cost(0);
5581   format %{ %}
5582   interface(CONST_INTER);
5583 %}
5584 
5585 operand immIOffset16()
5586 %{
5587   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5588   match(ConI);
5589 
5590   op_cost(0);
5591   format %{ %}
5592   interface(CONST_INTER);
5593 %}
5594 
5595 operand immLoffset()
5596 %{
5597   predicate(Address::offset_ok_for_immed(n->get_long()));
5598   match(ConL);
5599 
5600   op_cost(0);
5601   format %{ %}
5602   interface(CONST_INTER);
5603 %}
5604 
5605 operand immLoffset4()
5606 %{
5607   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5608   match(ConL);
5609 
5610   op_cost(0);
5611   format %{ %}
5612   interface(CONST_INTER);
5613 %}
5614 
5615 operand immLoffset8()
5616 %{
5617   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5618   match(ConL);
5619 
5620   op_cost(0);
5621   format %{ %}
5622   interface(CONST_INTER);
5623 %}
5624 
5625 operand immLoffset16()
5626 %{
5627   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5628   match(ConL);
5629 
5630   op_cost(0);
5631   format %{ %}
5632   interface(CONST_INTER);
5633 %}
5634 
5635 // 32 bit integer valid for add sub immediate
5636 operand immIAddSub()
5637 %{
5638   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5639   match(ConI);
5640   op_cost(0);
5641   format %{ %}
5642   interface(CONST_INTER);
5643 %}
5644 
5645 // 32 bit unsigned integer valid for logical immediate
5646 // TODO -- check this is right when e.g the mask is 0x80000000
5647 operand immILog()
5648 %{
5649   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5650   match(ConI);
5651 
5652   op_cost(0);
5653   format %{ %}
5654   interface(CONST_INTER);
5655 %}
5656 
5657 // Integer operands 64 bit
5658 // 64 bit immediate
5659 operand immL()
5660 %{
5661   match(ConL);
5662 
5663   op_cost(0);
5664   format %{ %}
5665   interface(CONST_INTER);
5666 %}
5667 
5668 // 64 bit zero
5669 operand immL0()
5670 %{
5671   predicate(n->get_long() == 0);
5672   match(ConL);
5673 
5674   op_cost(0);
5675   format %{ %}
5676   interface(CONST_INTER);
5677 %}
5678 
5679 // 64 bit unit increment
5680 operand immL_1()
5681 %{
5682   predicate(n->get_long() == 1);
5683   match(ConL);
5684 
5685   op_cost(0);
5686   format %{ %}
5687   interface(CONST_INTER);
5688 %}
5689 
5690 // 64 bit unit decrement
5691 operand immL_M1()
5692 %{
5693   predicate(n->get_long() == -1);
5694   match(ConL);
5695 
5696   op_cost(0);
5697   format %{ %}
5698   interface(CONST_INTER);
5699 %}
5700 
5701 // 32 bit offset of pc in thread anchor
5702 
5703 operand immL_pc_off()
5704 %{
5705   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5706                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5707   match(ConL);
5708 
5709   op_cost(0);
5710   format %{ %}
5711   interface(CONST_INTER);
5712 %}
5713 
5714 // 64 bit integer valid for add sub immediate
5715 operand immLAddSub()
5716 %{
5717   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5718   match(ConL);
5719   op_cost(0);
5720   format %{ %}
5721   interface(CONST_INTER);
5722 %}
5723 
5724 // 64 bit integer valid for logical immediate
5725 operand immLLog()
5726 %{
5727   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5728   match(ConL);
5729   op_cost(0);
5730   format %{ %}
5731   interface(CONST_INTER);
5732 %}
5733 
5734 // Long Immediate: low 32-bit mask
5735 operand immL_32bits()
5736 %{
5737   predicate(n->get_long() == 0xFFFFFFFFL);
5738   match(ConL);
5739   op_cost(0);
5740   format %{ %}
5741   interface(CONST_INTER);
5742 %}
5743 
5744 // Pointer operands
5745 // Pointer Immediate
5746 operand immP()
5747 %{
5748   match(ConP);
5749 
5750   op_cost(0);
5751   format %{ %}
5752   interface(CONST_INTER);
5753 %}
5754 
5755 // NULL Pointer Immediate
5756 operand immP0()
5757 %{
5758   predicate(n->get_ptr() == 0);
5759   match(ConP);
5760 
5761   op_cost(0);
5762   format %{ %}
5763   interface(CONST_INTER);
5764 %}
5765 
5766 // Pointer Immediate One
5767 // this is used in object initialization (initial object header)
5768 operand immP_1()
5769 %{
5770   predicate(n->get_ptr() == 1);
5771   match(ConP);
5772 
5773   op_cost(0);
5774   format %{ %}
5775   interface(CONST_INTER);
5776 %}
5777 
5778 // Polling Page Pointer Immediate
5779 operand immPollPage()
5780 %{
5781   predicate((address)n->get_ptr() == os::get_polling_page());
5782   match(ConP);
5783 
5784   op_cost(0);
5785   format %{ %}
5786   interface(CONST_INTER);
5787 %}
5788 
5789 // Card Table Byte Map Base
5790 operand immByteMapBase()
5791 %{
5792   // Get base of card map
5793   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
5794             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
5795   match(ConP);
5796 
5797   op_cost(0);
5798   format %{ %}
5799   interface(CONST_INTER);
5800 %}
5801 
5802 // Pointer Immediate Minus One
5803 // this is used when we want to write the current PC to the thread anchor
5804 operand immP_M1()
5805 %{
5806   predicate(n->get_ptr() == -1);
5807   match(ConP);
5808 
5809   op_cost(0);
5810   format %{ %}
5811   interface(CONST_INTER);
5812 %}
5813 
5814 // Pointer Immediate Minus Two
5815 // this is used when we want to write the current PC to the thread anchor
5816 operand immP_M2()
5817 %{
5818   predicate(n->get_ptr() == -2);
5819   match(ConP);
5820 
5821   op_cost(0);
5822   format %{ %}
5823   interface(CONST_INTER);
5824 %}
5825 
5826 // Float and Double operands
5827 // Double Immediate
5828 operand immD()
5829 %{
5830   match(ConD);
5831   op_cost(0);
5832   format %{ %}
5833   interface(CONST_INTER);
5834 %}
5835 
5836 // Double Immediate: +0.0d
5837 operand immD0()
5838 %{
5839   predicate(jlong_cast(n->getd()) == 0);
5840   match(ConD);
5841 
5842   op_cost(0);
5843   format %{ %}
5844   interface(CONST_INTER);
5845 %}
5846 
5847 // constant 'double +0.0'.
5848 operand immDPacked()
5849 %{
5850   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5851   match(ConD);
5852   op_cost(0);
5853   format %{ %}
5854   interface(CONST_INTER);
5855 %}
5856 
5857 // Float Immediate
5858 operand immF()
5859 %{
5860   match(ConF);
5861   op_cost(0);
5862   format %{ %}
5863   interface(CONST_INTER);
5864 %}
5865 
5866 // Float Immediate: +0.0f.
5867 operand immF0()
5868 %{
5869   predicate(jint_cast(n->getf()) == 0);
5870   match(ConF);
5871 
5872   op_cost(0);
5873   format %{ %}
5874   interface(CONST_INTER);
5875 %}
5876 
5877 //
5878 operand immFPacked()
5879 %{
5880   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5881   match(ConF);
5882   op_cost(0);
5883   format %{ %}
5884   interface(CONST_INTER);
5885 %}
5886 
5887 // Narrow pointer operands
5888 // Narrow Pointer Immediate
5889 operand immN()
5890 %{
5891   match(ConN);
5892 
5893   op_cost(0);
5894   format %{ %}
5895   interface(CONST_INTER);
5896 %}
5897 
5898 // Narrow NULL Pointer Immediate
5899 operand immN0()
5900 %{
5901   predicate(n->get_narrowcon() == 0);
5902   match(ConN);
5903 
5904   op_cost(0);
5905   format %{ %}
5906   interface(CONST_INTER);
5907 %}
5908 
5909 operand immNKlass()
5910 %{
5911   match(ConNKlass);
5912 
5913   op_cost(0);
5914   format %{ %}
5915   interface(CONST_INTER);
5916 %}
5917 
5918 // Integer 32 bit Register Operands
5919 // Integer 32 bitRegister (excludes SP)
5920 operand iRegI()
5921 %{
5922   constraint(ALLOC_IN_RC(any_reg32));
5923   match(RegI);
5924   match(iRegINoSp);
5925   op_cost(0);
5926   format %{ %}
5927   interface(REG_INTER);
5928 %}
5929 
5930 // Integer 32 bit Register not Special
5931 operand iRegINoSp()
5932 %{
5933   constraint(ALLOC_IN_RC(no_special_reg32));
5934   match(RegI);
5935   op_cost(0);
5936   format %{ %}
5937   interface(REG_INTER);
5938 %}
5939 
5940 // Integer 64 bit Register Operands
5941 // Integer 64 bit Register (includes SP)
5942 operand iRegL()
5943 %{
5944   constraint(ALLOC_IN_RC(any_reg));
5945   match(RegL);
5946   match(iRegLNoSp);
5947   op_cost(0);
5948   format %{ %}
5949   interface(REG_INTER);
5950 %}
5951 
5952 // Integer 64 bit Register not Special
5953 operand iRegLNoSp()
5954 %{
5955   constraint(ALLOC_IN_RC(no_special_reg));
5956   match(RegL);
5957   match(iRegL_R0);
5958   format %{ %}
5959   interface(REG_INTER);
5960 %}
5961 
5962 // Pointer Register Operands
5963 // Pointer Register
5964 operand iRegP()
5965 %{
5966   constraint(ALLOC_IN_RC(ptr_reg));
5967   match(RegP);
5968   match(iRegPNoSp);
5969   match(iRegP_R0);
5970   //match(iRegP_R2);
5971   //match(iRegP_R4);
5972   //match(iRegP_R5);
5973   match(thread_RegP);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 // Pointer 64 bit Register not Special
5980 operand iRegPNoSp()
5981 %{
5982   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5983   match(RegP);
5984   // match(iRegP);
5985   // match(iRegP_R0);
5986   // match(iRegP_R2);
5987   // match(iRegP_R4);
5988   // match(iRegP_R5);
5989   // match(thread_RegP);
5990   op_cost(0);
5991   format %{ %}
5992   interface(REG_INTER);
5993 %}
5994 
5995 // Pointer 64 bit Register R0 only
5996 operand iRegP_R0()
5997 %{
5998   constraint(ALLOC_IN_RC(r0_reg));
5999   match(RegP);
6000   // match(iRegP);
6001   match(iRegPNoSp);
6002   op_cost(0);
6003   format %{ %}
6004   interface(REG_INTER);
6005 %}
6006 
6007 // Pointer 64 bit Register R1 only
6008 operand iRegP_R1()
6009 %{
6010   constraint(ALLOC_IN_RC(r1_reg));
6011   match(RegP);
6012   // match(iRegP);
6013   match(iRegPNoSp);
6014   op_cost(0);
6015   format %{ %}
6016   interface(REG_INTER);
6017 %}
6018 
6019 // Pointer 64 bit Register R2 only
6020 operand iRegP_R2()
6021 %{
6022   constraint(ALLOC_IN_RC(r2_reg));
6023   match(RegP);
6024   // match(iRegP);
6025   match(iRegPNoSp);
6026   op_cost(0);
6027   format %{ %}
6028   interface(REG_INTER);
6029 %}
6030 
6031 // Pointer 64 bit Register R3 only
6032 operand iRegP_R3()
6033 %{
6034   constraint(ALLOC_IN_RC(r3_reg));
6035   match(RegP);
6036   // match(iRegP);
6037   match(iRegPNoSp);
6038   op_cost(0);
6039   format %{ %}
6040   interface(REG_INTER);
6041 %}
6042 
6043 // Pointer 64 bit Register R4 only
6044 operand iRegP_R4()
6045 %{
6046   constraint(ALLOC_IN_RC(r4_reg));
6047   match(RegP);
6048   // match(iRegP);
6049   match(iRegPNoSp);
6050   op_cost(0);
6051   format %{ %}
6052   interface(REG_INTER);
6053 %}
6054 
6055 // Pointer 64 bit Register R5 only
6056 operand iRegP_R5()
6057 %{
6058   constraint(ALLOC_IN_RC(r5_reg));
6059   match(RegP);
6060   // match(iRegP);
6061   match(iRegPNoSp);
6062   op_cost(0);
6063   format %{ %}
6064   interface(REG_INTER);
6065 %}
6066 
6067 // Pointer 64 bit Register R10 only
6068 operand iRegP_R10()
6069 %{
6070   constraint(ALLOC_IN_RC(r10_reg));
6071   match(RegP);
6072   // match(iRegP);
6073   match(iRegPNoSp);
6074   op_cost(0);
6075   format %{ %}
6076   interface(REG_INTER);
6077 %}
6078 
6079 // Long 64 bit Register R0 only
6080 operand iRegL_R0()
6081 %{
6082   constraint(ALLOC_IN_RC(r0_reg));
6083   match(RegL);
6084   match(iRegLNoSp);
6085   op_cost(0);
6086   format %{ %}
6087   interface(REG_INTER);
6088 %}
6089 
6090 // Long 64 bit Register R2 only
6091 operand iRegL_R2()
6092 %{
6093   constraint(ALLOC_IN_RC(r2_reg));
6094   match(RegL);
6095   match(iRegLNoSp);
6096   op_cost(0);
6097   format %{ %}
6098   interface(REG_INTER);
6099 %}
6100 
6101 // Long 64 bit Register R3 only
6102 operand iRegL_R3()
6103 %{
6104   constraint(ALLOC_IN_RC(r3_reg));
6105   match(RegL);
6106   match(iRegLNoSp);
6107   op_cost(0);
6108   format %{ %}
6109   interface(REG_INTER);
6110 %}
6111 
6112 // Long 64 bit Register R11 only
6113 operand iRegL_R11()
6114 %{
6115   constraint(ALLOC_IN_RC(r11_reg));
6116   match(RegL);
6117   match(iRegLNoSp);
6118   op_cost(0);
6119   format %{ %}
6120   interface(REG_INTER);
6121 %}
6122 
6123 // Pointer 64 bit Register FP only
6124 operand iRegP_FP()
6125 %{
6126   constraint(ALLOC_IN_RC(fp_reg));
6127   match(RegP);
6128   // match(iRegP);
6129   op_cost(0);
6130   format %{ %}
6131   interface(REG_INTER);
6132 %}
6133 
6134 // Register R0 only
6135 operand iRegI_R0()
6136 %{
6137   constraint(ALLOC_IN_RC(int_r0_reg));
6138   match(RegI);
6139   match(iRegINoSp);
6140   op_cost(0);
6141   format %{ %}
6142   interface(REG_INTER);
6143 %}
6144 
6145 // Register R2 only
6146 operand iRegI_R2()
6147 %{
6148   constraint(ALLOC_IN_RC(int_r2_reg));
6149   match(RegI);
6150   match(iRegINoSp);
6151   op_cost(0);
6152   format %{ %}
6153   interface(REG_INTER);
6154 %}
6155 
6156 // Register R3 only
6157 operand iRegI_R3()
6158 %{
6159   constraint(ALLOC_IN_RC(int_r3_reg));
6160   match(RegI);
6161   match(iRegINoSp);
6162   op_cost(0);
6163   format %{ %}
6164   interface(REG_INTER);
6165 %}
6166 
6167 
6168 // Register R4 only
6169 operand iRegI_R4()
6170 %{
6171   constraint(ALLOC_IN_RC(int_r4_reg));
6172   match(RegI);
6173   match(iRegINoSp);
6174   op_cost(0);
6175   format %{ %}
6176   interface(REG_INTER);
6177 %}
6178 
6179 
6180 // Pointer Register Operands
6181 // Narrow Pointer Register
6182 operand iRegN()
6183 %{
6184   constraint(ALLOC_IN_RC(any_reg32));
6185   match(RegN);
6186   match(iRegNNoSp);
6187   op_cost(0);
6188   format %{ %}
6189   interface(REG_INTER);
6190 %}
6191 
6192 operand iRegN_R0()
6193 %{
6194   constraint(ALLOC_IN_RC(r0_reg));
6195   match(iRegN);
6196   op_cost(0);
6197   format %{ %}
6198   interface(REG_INTER);
6199 %}
6200 
6201 operand iRegN_R2()
6202 %{
6203   constraint(ALLOC_IN_RC(r2_reg));
6204   match(iRegN);
6205   op_cost(0);
6206   format %{ %}
6207   interface(REG_INTER);
6208 %}
6209 
6210 operand iRegN_R3()
6211 %{
6212   constraint(ALLOC_IN_RC(r3_reg));
6213   match(iRegN);
6214   op_cost(0);
6215   format %{ %}
6216   interface(REG_INTER);
6217 %}
6218 
6219 // Integer 64 bit Register not Special
6220 operand iRegNNoSp()
6221 %{
6222   constraint(ALLOC_IN_RC(no_special_reg32));
6223   match(RegN);
6224   op_cost(0);
6225   format %{ %}
6226   interface(REG_INTER);
6227 %}
6228 
6229 // heap base register -- used for encoding immN0
6230 
6231 operand iRegIHeapbase()
6232 %{
6233   constraint(ALLOC_IN_RC(heapbase_reg));
6234   match(RegI);
6235   op_cost(0);
6236   format %{ %}
6237   interface(REG_INTER);
6238 %}
6239 
6240 // Float Register
6241 // Float register operands
6242 operand vRegF()
6243 %{
6244   constraint(ALLOC_IN_RC(float_reg));
6245   match(RegF);
6246 
6247   op_cost(0);
6248   format %{ %}
6249   interface(REG_INTER);
6250 %}
6251 
6252 // Double Register
6253 // Double register operands
6254 operand vRegD()
6255 %{
6256   constraint(ALLOC_IN_RC(double_reg));
6257   match(RegD);
6258 
6259   op_cost(0);
6260   format %{ %}
6261   interface(REG_INTER);
6262 %}
6263 
6264 operand vecD()
6265 %{
6266   constraint(ALLOC_IN_RC(vectord_reg));
6267   match(VecD);
6268 
6269   op_cost(0);
6270   format %{ %}
6271   interface(REG_INTER);
6272 %}
6273 
6274 operand vecX()
6275 %{
6276   constraint(ALLOC_IN_RC(vectorx_reg));
6277   match(VecX);
6278 
6279   op_cost(0);
6280   format %{ %}
6281   interface(REG_INTER);
6282 %}
6283 
6284 operand vRegD_V0()
6285 %{
6286   constraint(ALLOC_IN_RC(v0_reg));
6287   match(RegD);
6288   op_cost(0);
6289   format %{ %}
6290   interface(REG_INTER);
6291 %}
6292 
6293 operand vRegD_V1()
6294 %{
6295   constraint(ALLOC_IN_RC(v1_reg));
6296   match(RegD);
6297   op_cost(0);
6298   format %{ %}
6299   interface(REG_INTER);
6300 %}
6301 
6302 operand vRegD_V2()
6303 %{
6304   constraint(ALLOC_IN_RC(v2_reg));
6305   match(RegD);
6306   op_cost(0);
6307   format %{ %}
6308   interface(REG_INTER);
6309 %}
6310 
6311 operand vRegD_V3()
6312 %{
6313   constraint(ALLOC_IN_RC(v3_reg));
6314   match(RegD);
6315   op_cost(0);
6316   format %{ %}
6317   interface(REG_INTER);
6318 %}
6319 
6320 // Flags register, used as output of signed compare instructions
6321 
6322 // note that on AArch64 we also use this register as the output for
6323 // for floating point compare instructions (CmpF CmpD). this ensures
6324 // that ordered inequality tests use GT, GE, LT or LE none of which
6325 // pass through cases where the result is unordered i.e. one or both
6326 // inputs to the compare is a NaN. this means that the ideal code can
6327 // replace e.g. a GT with an LE and not end up capturing the NaN case
6328 // (where the comparison should always fail). EQ and NE tests are
6329 // always generated in ideal code so that unordered folds into the NE
6330 // case, matching the behaviour of AArch64 NE.
6331 //
6332 // This differs from x86 where the outputs of FP compares use a
6333 // special FP flags registers and where compares based on this
6334 // register are distinguished into ordered inequalities (cmpOpUCF) and
6335 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6336 // to explicitly handle the unordered case in branches. x86 also has
6337 // to include extra CMoveX rules to accept a cmpOpUCF input.
6338 
6339 operand rFlagsReg()
6340 %{
6341   constraint(ALLOC_IN_RC(int_flags));
6342   match(RegFlags);
6343 
6344   op_cost(0);
6345   format %{ "RFLAGS" %}
6346   interface(REG_INTER);
6347 %}
6348 
6349 // Flags register, used as output of unsigned compare instructions
6350 operand rFlagsRegU()
6351 %{
6352   constraint(ALLOC_IN_RC(int_flags));
6353   match(RegFlags);
6354 
6355   op_cost(0);
6356   format %{ "RFLAGSU" %}
6357   interface(REG_INTER);
6358 %}
6359 
6360 // Special Registers
6361 
6362 // Method Register
6363 operand inline_cache_RegP(iRegP reg)
6364 %{
6365   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6366   match(reg);
6367   match(iRegPNoSp);
6368   op_cost(0);
6369   format %{ %}
6370   interface(REG_INTER);
6371 %}
6372 
6373 operand interpreter_method_oop_RegP(iRegP reg)
6374 %{
6375   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6376   match(reg);
6377   match(iRegPNoSp);
6378   op_cost(0);
6379   format %{ %}
6380   interface(REG_INTER);
6381 %}
6382 
6383 // Thread Register
6384 operand thread_RegP(iRegP reg)
6385 %{
6386   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6387   match(reg);
6388   op_cost(0);
6389   format %{ %}
6390   interface(REG_INTER);
6391 %}
6392 
6393 operand lr_RegP(iRegP reg)
6394 %{
6395   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6396   match(reg);
6397   op_cost(0);
6398   format %{ %}
6399   interface(REG_INTER);
6400 %}
6401 
6402 //----------Memory Operands----------------------------------------------------
6403 
6404 operand indirect(iRegP reg)
6405 %{
6406   constraint(ALLOC_IN_RC(ptr_reg));
6407   match(reg);
6408   op_cost(0);
6409   format %{ "[$reg]" %}
6410   interface(MEMORY_INTER) %{
6411     base($reg);
6412     index(0xffffffff);
6413     scale(0x0);
6414     disp(0x0);
6415   %}
6416 %}
6417 
6418 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6419 %{
6420   constraint(ALLOC_IN_RC(ptr_reg));
6421   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6422   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6423   op_cost(0);
6424   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6425   interface(MEMORY_INTER) %{
6426     base($reg);
6427     index($ireg);
6428     scale($scale);
6429     disp(0x0);
6430   %}
6431 %}
6432 
6433 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6434 %{
6435   constraint(ALLOC_IN_RC(ptr_reg));
6436   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6437   match(AddP reg (LShiftL lreg scale));
6438   op_cost(0);
6439   format %{ "$reg, $lreg lsl($scale)" %}
6440   interface(MEMORY_INTER) %{
6441     base($reg);
6442     index($lreg);
6443     scale($scale);
6444     disp(0x0);
6445   %}
6446 %}
6447 
6448 operand indIndexI2L(iRegP reg, iRegI ireg)
6449 %{
6450   constraint(ALLOC_IN_RC(ptr_reg));
6451   match(AddP reg (ConvI2L ireg));
6452   op_cost(0);
6453   format %{ "$reg, $ireg, 0, I2L" %}
6454   interface(MEMORY_INTER) %{
6455     base($reg);
6456     index($ireg);
6457     scale(0x0);
6458     disp(0x0);
6459   %}
6460 %}
6461 
6462 operand indIndex(iRegP reg, iRegL lreg)
6463 %{
6464   constraint(ALLOC_IN_RC(ptr_reg));
6465   match(AddP reg lreg);
6466   op_cost(0);
6467   format %{ "$reg, $lreg" %}
6468   interface(MEMORY_INTER) %{
6469     base($reg);
6470     index($lreg);
6471     scale(0x0);
6472     disp(0x0);
6473   %}
6474 %}
6475 
6476 operand indOffI(iRegP reg, immIOffset off)
6477 %{
6478   constraint(ALLOC_IN_RC(ptr_reg));
6479   match(AddP reg off);
6480   op_cost(0);
6481   format %{ "[$reg, $off]" %}
6482   interface(MEMORY_INTER) %{
6483     base($reg);
6484     index(0xffffffff);
6485     scale(0x0);
6486     disp($off);
6487   %}
6488 %}
6489 
6490 operand indOffI4(iRegP reg, immIOffset4 off)
6491 %{
6492   constraint(ALLOC_IN_RC(ptr_reg));
6493   match(AddP reg off);
6494   op_cost(0);
6495   format %{ "[$reg, $off]" %}
6496   interface(MEMORY_INTER) %{
6497     base($reg);
6498     index(0xffffffff);
6499     scale(0x0);
6500     disp($off);
6501   %}
6502 %}
6503 
6504 operand indOffI8(iRegP reg, immIOffset8 off)
6505 %{
6506   constraint(ALLOC_IN_RC(ptr_reg));
6507   match(AddP reg off);
6508   op_cost(0);
6509   format %{ "[$reg, $off]" %}
6510   interface(MEMORY_INTER) %{
6511     base($reg);
6512     index(0xffffffff);
6513     scale(0x0);
6514     disp($off);
6515   %}
6516 %}
6517 
6518 operand indOffI16(iRegP reg, immIOffset16 off)
6519 %{
6520   constraint(ALLOC_IN_RC(ptr_reg));
6521   match(AddP reg off);
6522   op_cost(0);
6523   format %{ "[$reg, $off]" %}
6524   interface(MEMORY_INTER) %{
6525     base($reg);
6526     index(0xffffffff);
6527     scale(0x0);
6528     disp($off);
6529   %}
6530 %}
6531 
6532 operand indOffL(iRegP reg, immLoffset off)
6533 %{
6534   constraint(ALLOC_IN_RC(ptr_reg));
6535   match(AddP reg off);
6536   op_cost(0);
6537   format %{ "[$reg, $off]" %}
6538   interface(MEMORY_INTER) %{
6539     base($reg);
6540     index(0xffffffff);
6541     scale(0x0);
6542     disp($off);
6543   %}
6544 %}
6545 
6546 operand indOffL4(iRegP reg, immLoffset4 off)
6547 %{
6548   constraint(ALLOC_IN_RC(ptr_reg));
6549   match(AddP reg off);
6550   op_cost(0);
6551   format %{ "[$reg, $off]" %}
6552   interface(MEMORY_INTER) %{
6553     base($reg);
6554     index(0xffffffff);
6555     scale(0x0);
6556     disp($off);
6557   %}
6558 %}
6559 
6560 operand indOffL8(iRegP reg, immLoffset8 off)
6561 %{
6562   constraint(ALLOC_IN_RC(ptr_reg));
6563   match(AddP reg off);
6564   op_cost(0);
6565   format %{ "[$reg, $off]" %}
6566   interface(MEMORY_INTER) %{
6567     base($reg);
6568     index(0xffffffff);
6569     scale(0x0);
6570     disp($off);
6571   %}
6572 %}
6573 
6574 operand indOffL16(iRegP reg, immLoffset16 off)
6575 %{
6576   constraint(ALLOC_IN_RC(ptr_reg));
6577   match(AddP reg off);
6578   op_cost(0);
6579   format %{ "[$reg, $off]" %}
6580   interface(MEMORY_INTER) %{
6581     base($reg);
6582     index(0xffffffff);
6583     scale(0x0);
6584     disp($off);
6585   %}
6586 %}
6587 
6588 operand indirectN(iRegN reg)
6589 %{
6590   predicate(Universe::narrow_oop_shift() == 0);
6591   constraint(ALLOC_IN_RC(ptr_reg));
6592   match(DecodeN reg);
6593   op_cost(0);
6594   format %{ "[$reg]\t# narrow" %}
6595   interface(MEMORY_INTER) %{
6596     base($reg);
6597     index(0xffffffff);
6598     scale(0x0);
6599     disp(0x0);
6600   %}
6601 %}
6602 
6603 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6604 %{
6605   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6606   constraint(ALLOC_IN_RC(ptr_reg));
6607   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6608   op_cost(0);
6609   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6610   interface(MEMORY_INTER) %{
6611     base($reg);
6612     index($ireg);
6613     scale($scale);
6614     disp(0x0);
6615   %}
6616 %}
6617 
6618 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6619 %{
6620   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6621   constraint(ALLOC_IN_RC(ptr_reg));
6622   match(AddP (DecodeN reg) (LShiftL lreg scale));
6623   op_cost(0);
6624   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6625   interface(MEMORY_INTER) %{
6626     base($reg);
6627     index($lreg);
6628     scale($scale);
6629     disp(0x0);
6630   %}
6631 %}
6632 
6633 operand indIndexI2LN(iRegN reg, iRegI ireg)
6634 %{
6635   predicate(Universe::narrow_oop_shift() == 0);
6636   constraint(ALLOC_IN_RC(ptr_reg));
6637   match(AddP (DecodeN reg) (ConvI2L ireg));
6638   op_cost(0);
6639   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6640   interface(MEMORY_INTER) %{
6641     base($reg);
6642     index($ireg);
6643     scale(0x0);
6644     disp(0x0);
6645   %}
6646 %}
6647 
6648 operand indIndexN(iRegN reg, iRegL lreg)
6649 %{
6650   predicate(Universe::narrow_oop_shift() == 0);
6651   constraint(ALLOC_IN_RC(ptr_reg));
6652   match(AddP (DecodeN reg) lreg);
6653   op_cost(0);
6654   format %{ "$reg, $lreg\t# narrow" %}
6655   interface(MEMORY_INTER) %{
6656     base($reg);
6657     index($lreg);
6658     scale(0x0);
6659     disp(0x0);
6660   %}
6661 %}
6662 
6663 operand indOffIN(iRegN reg, immIOffset off)
6664 %{
6665   predicate(Universe::narrow_oop_shift() == 0);
6666   constraint(ALLOC_IN_RC(ptr_reg));
6667   match(AddP (DecodeN reg) off);
6668   op_cost(0);
6669   format %{ "[$reg, $off]\t# narrow" %}
6670   interface(MEMORY_INTER) %{
6671     base($reg);
6672     index(0xffffffff);
6673     scale(0x0);
6674     disp($off);
6675   %}
6676 %}
6677 
6678 operand indOffLN(iRegN reg, immLoffset off)
6679 %{
6680   predicate(Universe::narrow_oop_shift() == 0);
6681   constraint(ALLOC_IN_RC(ptr_reg));
6682   match(AddP (DecodeN reg) off);
6683   op_cost(0);
6684   format %{ "[$reg, $off]\t# narrow" %}
6685   interface(MEMORY_INTER) %{
6686     base($reg);
6687     index(0xffffffff);
6688     scale(0x0);
6689     disp($off);
6690   %}
6691 %}
6692 
6693 
6694 
6695 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6696 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6697 %{
6698   constraint(ALLOC_IN_RC(ptr_reg));
6699   match(AddP reg off);
6700   op_cost(0);
6701   format %{ "[$reg, $off]" %}
6702   interface(MEMORY_INTER) %{
6703     base($reg);
6704     index(0xffffffff);
6705     scale(0x0);
6706     disp($off);
6707   %}
6708 %}
6709 
6710 //----------Special Memory Operands--------------------------------------------
6711 // Stack Slot Operand - This operand is used for loading and storing temporary
6712 //                      values on the stack where a match requires a value to
6713 //                      flow through memory.
6714 operand stackSlotP(sRegP reg)
6715 %{
6716   constraint(ALLOC_IN_RC(stack_slots));
6717   op_cost(100);
6718   // No match rule because this operand is only generated in matching
6719   // match(RegP);
6720   format %{ "[$reg]" %}
6721   interface(MEMORY_INTER) %{
6722     base(0x1e);  // RSP
6723     index(0x0);  // No Index
6724     scale(0x0);  // No Scale
6725     disp($reg);  // Stack Offset
6726   %}
6727 %}
6728 
6729 operand stackSlotI(sRegI reg)
6730 %{
6731   constraint(ALLOC_IN_RC(stack_slots));
6732   // No match rule because this operand is only generated in matching
6733   // match(RegI);
6734   format %{ "[$reg]" %}
6735   interface(MEMORY_INTER) %{
6736     base(0x1e);  // RSP
6737     index(0x0);  // No Index
6738     scale(0x0);  // No Scale
6739     disp($reg);  // Stack Offset
6740   %}
6741 %}
6742 
6743 operand stackSlotF(sRegF reg)
6744 %{
6745   constraint(ALLOC_IN_RC(stack_slots));
6746   // No match rule because this operand is only generated in matching
6747   // match(RegF);
6748   format %{ "[$reg]" %}
6749   interface(MEMORY_INTER) %{
6750     base(0x1e);  // RSP
6751     index(0x0);  // No Index
6752     scale(0x0);  // No Scale
6753     disp($reg);  // Stack Offset
6754   %}
6755 %}
6756 
6757 operand stackSlotD(sRegD reg)
6758 %{
6759   constraint(ALLOC_IN_RC(stack_slots));
6760   // No match rule because this operand is only generated in matching
6761   // match(RegD);
6762   format %{ "[$reg]" %}
6763   interface(MEMORY_INTER) %{
6764     base(0x1e);  // RSP
6765     index(0x0);  // No Index
6766     scale(0x0);  // No Scale
6767     disp($reg);  // Stack Offset
6768   %}
6769 %}
6770 
6771 operand stackSlotL(sRegL reg)
6772 %{
6773   constraint(ALLOC_IN_RC(stack_slots));
6774   // No match rule because this operand is only generated in matching
6775   // match(RegL);
6776   format %{ "[$reg]" %}
6777   interface(MEMORY_INTER) %{
6778     base(0x1e);  // RSP
6779     index(0x0);  // No Index
6780     scale(0x0);  // No Scale
6781     disp($reg);  // Stack Offset
6782   %}
6783 %}
6784 
6785 // Operands for expressing Control Flow
6786 // NOTE: Label is a predefined operand which should not be redefined in
6787 //       the AD file. It is generically handled within the ADLC.
6788 
6789 //----------Conditional Branch Operands----------------------------------------
6790 // Comparison Op  - This is the operation of the comparison, and is limited to
6791 //                  the following set of codes:
6792 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6793 //
6794 // Other attributes of the comparison, such as unsignedness, are specified
6795 // by the comparison instruction that sets a condition code flags register.
6796 // That result is represented by a flags operand whose subtype is appropriate
6797 // to the unsignedness (etc.) of the comparison.
6798 //
6799 // Later, the instruction which matches both the Comparison Op (a Bool) and
6800 // the flags (produced by the Cmp) specifies the coding of the comparison op
6801 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6802 
6803 // used for signed integral comparisons and fp comparisons
6804 
6805 operand cmpOp()
6806 %{
6807   match(Bool);
6808 
6809   format %{ "" %}
6810   interface(COND_INTER) %{
6811     equal(0x0, "eq");
6812     not_equal(0x1, "ne");
6813     less(0xb, "lt");
6814     greater_equal(0xa, "ge");
6815     less_equal(0xd, "le");
6816     greater(0xc, "gt");
6817     overflow(0x6, "vs");
6818     no_overflow(0x7, "vc");
6819   %}
6820 %}
6821 
6822 // used for unsigned integral comparisons
6823 
6824 operand cmpOpU()
6825 %{
6826   match(Bool);
6827 
6828   format %{ "" %}
6829   interface(COND_INTER) %{
6830     equal(0x0, "eq");
6831     not_equal(0x1, "ne");
6832     less(0x3, "lo");
6833     greater_equal(0x2, "hs");
6834     less_equal(0x9, "ls");
6835     greater(0x8, "hi");
6836     overflow(0x6, "vs");
6837     no_overflow(0x7, "vc");
6838   %}
6839 %}
6840 
6841 // used for certain integral comparisons which can be
6842 // converted to cbxx or tbxx instructions
6843 
6844 operand cmpOpEqNe()
6845 %{
6846   match(Bool);
6847   match(CmpOp);
6848   op_cost(0);
6849   predicate(n->as_Bool()->_test._test == BoolTest::ne
6850             || n->as_Bool()->_test._test == BoolTest::eq);
6851 
6852   format %{ "" %}
6853   interface(COND_INTER) %{
6854     equal(0x0, "eq");
6855     not_equal(0x1, "ne");
6856     less(0xb, "lt");
6857     greater_equal(0xa, "ge");
6858     less_equal(0xd, "le");
6859     greater(0xc, "gt");
6860     overflow(0x6, "vs");
6861     no_overflow(0x7, "vc");
6862   %}
6863 %}
6864 
6865 // used for certain integral comparisons which can be
6866 // converted to cbxx or tbxx instructions
6867 
6868 operand cmpOpLtGe()
6869 %{
6870   match(Bool);
6871   match(CmpOp);
6872   op_cost(0);
6873 
6874   predicate(n->as_Bool()->_test._test == BoolTest::lt
6875             || n->as_Bool()->_test._test == BoolTest::ge);
6876 
6877   format %{ "" %}
6878   interface(COND_INTER) %{
6879     equal(0x0, "eq");
6880     not_equal(0x1, "ne");
6881     less(0xb, "lt");
6882     greater_equal(0xa, "ge");
6883     less_equal(0xd, "le");
6884     greater(0xc, "gt");
6885     overflow(0x6, "vs");
6886     no_overflow(0x7, "vc");
6887   %}
6888 %}
6889 
6890 // used for certain unsigned integral comparisons which can be
6891 // converted to cbxx or tbxx instructions
6892 
6893 operand cmpOpUEqNeLtGe()
6894 %{
6895   match(Bool);
6896   match(CmpOp);
6897   op_cost(0);
6898 
6899   predicate(n->as_Bool()->_test._test == BoolTest::eq
6900             || n->as_Bool()->_test._test == BoolTest::ne
6901             || n->as_Bool()->_test._test == BoolTest::lt
6902             || n->as_Bool()->_test._test == BoolTest::ge);
6903 
6904   format %{ "" %}
6905   interface(COND_INTER) %{
6906     equal(0x0, "eq");
6907     not_equal(0x1, "ne");
6908     less(0xb, "lt");
6909     greater_equal(0xa, "ge");
6910     less_equal(0xd, "le");
6911     greater(0xc, "gt");
6912     overflow(0x6, "vs");
6913     no_overflow(0x7, "vc");
6914   %}
6915 %}
6916 
6917 // Special operand allowing long args to int ops to be truncated for free
6918 
6919 operand iRegL2I(iRegL reg) %{
6920 
6921   op_cost(0);
6922 
6923   match(ConvL2I reg);
6924 
6925   format %{ "l2i($reg)" %}
6926 
6927   interface(REG_INTER)
6928 %}
6929 
6930 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6931 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6932 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6933 
6934 //----------OPERAND CLASSES----------------------------------------------------
6935 // Operand Classes are groups of operands that are used as to simplify
6936 // instruction definitions by not requiring the AD writer to specify
6937 // separate instructions for every form of operand when the
6938 // instruction accepts multiple operand types with the same basic
6939 // encoding and format. The classic case of this is memory operands.
6940 
6941 // memory is used to define read/write location for load/store
6942 // instruction defs. we can turn a memory op into an Address
6943 
6944 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6945                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6946 
6947 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6948 // operations. it allows the src to be either an iRegI or a (ConvL2I
6949 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6950 // can be elided because the 32-bit instruction will just employ the
6951 // lower 32 bits anyway.
6952 //
6953 // n.b. this does not elide all L2I conversions. if the truncated
6954 // value is consumed by more than one operation then the ConvL2I
6955 // cannot be bundled into the consuming nodes so an l2i gets planted
6956 // (actually a movw $dst $src) and the downstream instructions consume
6957 // the result of the l2i as an iRegI input. That's a shame since the
6958 // movw is actually redundant but its not too costly.
6959 
6960 opclass iRegIorL2I(iRegI, iRegL2I);
6961 
6962 //----------PIPELINE-----------------------------------------------------------
6963 // Rules which define the behavior of the target architectures pipeline.
6964 
6965 // For specific pipelines, eg A53, define the stages of that pipeline
6966 //pipe_desc(ISS, EX1, EX2, WR);
6967 #define ISS S0
6968 #define EX1 S1
6969 #define EX2 S2
6970 #define WR  S3
6971 
6972 // Integer ALU reg operation
6973 pipeline %{
6974 
6975 attributes %{
6976   // ARM instructions are of fixed length
6977   fixed_size_instructions;        // Fixed size instructions TODO does
6978   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6979   // ARM instructions come in 32-bit word units
6980   instruction_unit_size = 4;         // An instruction is 4 bytes long
6981   instruction_fetch_unit_size = 64;  // The processor fetches one line
6982   instruction_fetch_units = 1;       // of 64 bytes
6983 
6984   // List of nop instructions
6985   nops( MachNop );
6986 %}
6987 
6988 // We don't use an actual pipeline model so don't care about resources
6989 // or description. we do use pipeline classes to introduce fixed
6990 // latencies
6991 
6992 //----------RESOURCES----------------------------------------------------------
6993 // Resources are the functional units available to the machine
6994 
6995 resources( INS0, INS1, INS01 = INS0 | INS1,
6996            ALU0, ALU1, ALU = ALU0 | ALU1,
6997            MAC,
6998            DIV,
6999            BRANCH,
7000            LDST,
7001            NEON_FP);
7002 
7003 //----------PIPELINE DESCRIPTION-----------------------------------------------
7004 // Pipeline Description specifies the stages in the machine's pipeline
7005 
7006 // Define the pipeline as a generic 6 stage pipeline
7007 pipe_desc(S0, S1, S2, S3, S4, S5);
7008 
7009 //----------PIPELINE CLASSES---------------------------------------------------
7010 // Pipeline Classes describe the stages in which input and output are
7011 // referenced by the hardware pipeline.
7012 
7013 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
7014 %{
7015   single_instruction;
7016   src1   : S1(read);
7017   src2   : S2(read);
7018   dst    : S5(write);
7019   INS01  : ISS;
7020   NEON_FP : S5;
7021 %}
7022 
7023 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
7024 %{
7025   single_instruction;
7026   src1   : S1(read);
7027   src2   : S2(read);
7028   dst    : S5(write);
7029   INS01  : ISS;
7030   NEON_FP : S5;
7031 %}
7032 
7033 pipe_class fp_uop_s(vRegF dst, vRegF src)
7034 %{
7035   single_instruction;
7036   src    : S1(read);
7037   dst    : S5(write);
7038   INS01  : ISS;
7039   NEON_FP : S5;
7040 %}
7041 
7042 pipe_class fp_uop_d(vRegD dst, vRegD src)
7043 %{
7044   single_instruction;
7045   src    : S1(read);
7046   dst    : S5(write);
7047   INS01  : ISS;
7048   NEON_FP : S5;
7049 %}
7050 
7051 pipe_class fp_d2f(vRegF dst, vRegD src)
7052 %{
7053   single_instruction;
7054   src    : S1(read);
7055   dst    : S5(write);
7056   INS01  : ISS;
7057   NEON_FP : S5;
7058 %}
7059 
7060 pipe_class fp_f2d(vRegD dst, vRegF src)
7061 %{
7062   single_instruction;
7063   src    : S1(read);
7064   dst    : S5(write);
7065   INS01  : ISS;
7066   NEON_FP : S5;
7067 %}
7068 
7069 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7070 %{
7071   single_instruction;
7072   src    : S1(read);
7073   dst    : S5(write);
7074   INS01  : ISS;
7075   NEON_FP : S5;
7076 %}
7077 
7078 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7079 %{
7080   single_instruction;
7081   src    : S1(read);
7082   dst    : S5(write);
7083   INS01  : ISS;
7084   NEON_FP : S5;
7085 %}
7086 
7087 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7088 %{
7089   single_instruction;
7090   src    : S1(read);
7091   dst    : S5(write);
7092   INS01  : ISS;
7093   NEON_FP : S5;
7094 %}
7095 
7096 pipe_class fp_l2f(vRegF dst, iRegL src)
7097 %{
7098   single_instruction;
7099   src    : S1(read);
7100   dst    : S5(write);
7101   INS01  : ISS;
7102   NEON_FP : S5;
7103 %}
7104 
7105 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7106 %{
7107   single_instruction;
7108   src    : S1(read);
7109   dst    : S5(write);
7110   INS01  : ISS;
7111   NEON_FP : S5;
7112 %}
7113 
7114 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7115 %{
7116   single_instruction;
7117   src    : S1(read);
7118   dst    : S5(write);
7119   INS01  : ISS;
7120   NEON_FP : S5;
7121 %}
7122 
7123 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7124 %{
7125   single_instruction;
7126   src    : S1(read);
7127   dst    : S5(write);
7128   INS01  : ISS;
7129   NEON_FP : S5;
7130 %}
7131 
7132 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7133 %{
7134   single_instruction;
7135   src    : S1(read);
7136   dst    : S5(write);
7137   INS01  : ISS;
7138   NEON_FP : S5;
7139 %}
7140 
7141 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7142 %{
7143   single_instruction;
7144   src1   : S1(read);
7145   src2   : S2(read);
7146   dst    : S5(write);
7147   INS0   : ISS;
7148   NEON_FP : S5;
7149 %}
7150 
7151 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7152 %{
7153   single_instruction;
7154   src1   : S1(read);
7155   src2   : S2(read);
7156   dst    : S5(write);
7157   INS0   : ISS;
7158   NEON_FP : S5;
7159 %}
7160 
7161 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7162 %{
7163   single_instruction;
7164   cr     : S1(read);
7165   src1   : S1(read);
7166   src2   : S1(read);
7167   dst    : S3(write);
7168   INS01  : ISS;
7169   NEON_FP : S3;
7170 %}
7171 
7172 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7173 %{
7174   single_instruction;
7175   cr     : S1(read);
7176   src1   : S1(read);
7177   src2   : S1(read);
7178   dst    : S3(write);
7179   INS01  : ISS;
7180   NEON_FP : S3;
7181 %}
7182 
7183 pipe_class fp_imm_s(vRegF dst)
7184 %{
7185   single_instruction;
7186   dst    : S3(write);
7187   INS01  : ISS;
7188   NEON_FP : S3;
7189 %}
7190 
7191 pipe_class fp_imm_d(vRegD dst)
7192 %{
7193   single_instruction;
7194   dst    : S3(write);
7195   INS01  : ISS;
7196   NEON_FP : S3;
7197 %}
7198 
7199 pipe_class fp_load_constant_s(vRegF dst)
7200 %{
7201   single_instruction;
7202   dst    : S4(write);
7203   INS01  : ISS;
7204   NEON_FP : S4;
7205 %}
7206 
7207 pipe_class fp_load_constant_d(vRegD dst)
7208 %{
7209   single_instruction;
7210   dst    : S4(write);
7211   INS01  : ISS;
7212   NEON_FP : S4;
7213 %}
7214 
7215 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7216 %{
7217   single_instruction;
7218   dst    : S5(write);
7219   src1   : S1(read);
7220   src2   : S1(read);
7221   INS01  : ISS;
7222   NEON_FP : S5;
7223 %}
7224 
7225 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7226 %{
7227   single_instruction;
7228   dst    : S5(write);
7229   src1   : S1(read);
7230   src2   : S1(read);
7231   INS0   : ISS;
7232   NEON_FP : S5;
7233 %}
7234 
7235 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7236 %{
7237   single_instruction;
7238   dst    : S5(write);
7239   src1   : S1(read);
7240   src2   : S1(read);
7241   dst    : S1(read);
7242   INS01  : ISS;
7243   NEON_FP : S5;
7244 %}
7245 
7246 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7247 %{
7248   single_instruction;
7249   dst    : S5(write);
7250   src1   : S1(read);
7251   src2   : S1(read);
7252   dst    : S1(read);
7253   INS0   : ISS;
7254   NEON_FP : S5;
7255 %}
7256 
7257 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7258 %{
7259   single_instruction;
7260   dst    : S4(write);
7261   src1   : S2(read);
7262   src2   : S2(read);
7263   INS01  : ISS;
7264   NEON_FP : S4;
7265 %}
7266 
7267 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7268 %{
7269   single_instruction;
7270   dst    : S4(write);
7271   src1   : S2(read);
7272   src2   : S2(read);
7273   INS0   : ISS;
7274   NEON_FP : S4;
7275 %}
7276 
7277 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7278 %{
7279   single_instruction;
7280   dst    : S3(write);
7281   src1   : S2(read);
7282   src2   : S2(read);
7283   INS01  : ISS;
7284   NEON_FP : S3;
7285 %}
7286 
7287 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7288 %{
7289   single_instruction;
7290   dst    : S3(write);
7291   src1   : S2(read);
7292   src2   : S2(read);
7293   INS0   : ISS;
7294   NEON_FP : S3;
7295 %}
7296 
7297 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7298 %{
7299   single_instruction;
7300   dst    : S3(write);
7301   src    : S1(read);
7302   shift  : S1(read);
7303   INS01  : ISS;
7304   NEON_FP : S3;
7305 %}
7306 
7307 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7308 %{
7309   single_instruction;
7310   dst    : S3(write);
7311   src    : S1(read);
7312   shift  : S1(read);
7313   INS0   : ISS;
7314   NEON_FP : S3;
7315 %}
7316 
7317 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7318 %{
7319   single_instruction;
7320   dst    : S3(write);
7321   src    : S1(read);
7322   INS01  : ISS;
7323   NEON_FP : S3;
7324 %}
7325 
7326 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7327 %{
7328   single_instruction;
7329   dst    : S3(write);
7330   src    : S1(read);
7331   INS0   : ISS;
7332   NEON_FP : S3;
7333 %}
7334 
7335 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7336 %{
7337   single_instruction;
7338   dst    : S5(write);
7339   src1   : S1(read);
7340   src2   : S1(read);
7341   INS01  : ISS;
7342   NEON_FP : S5;
7343 %}
7344 
7345 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7346 %{
7347   single_instruction;
7348   dst    : S5(write);
7349   src1   : S1(read);
7350   src2   : S1(read);
7351   INS0   : ISS;
7352   NEON_FP : S5;
7353 %}
7354 
7355 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7356 %{
7357   single_instruction;
7358   dst    : S5(write);
7359   src1   : S1(read);
7360   src2   : S1(read);
7361   INS0   : ISS;
7362   NEON_FP : S5;
7363 %}
7364 
7365 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7366 %{
7367   single_instruction;
7368   dst    : S5(write);
7369   src1   : S1(read);
7370   src2   : S1(read);
7371   INS0   : ISS;
7372   NEON_FP : S5;
7373 %}
7374 
7375 pipe_class vsqrt_fp128(vecX dst, vecX src)
7376 %{
7377   single_instruction;
7378   dst    : S5(write);
7379   src    : S1(read);
7380   INS0   : ISS;
7381   NEON_FP : S5;
7382 %}
7383 
7384 pipe_class vunop_fp64(vecD dst, vecD src)
7385 %{
7386   single_instruction;
7387   dst    : S5(write);
7388   src    : S1(read);
7389   INS01  : ISS;
7390   NEON_FP : S5;
7391 %}
7392 
7393 pipe_class vunop_fp128(vecX dst, vecX src)
7394 %{
7395   single_instruction;
7396   dst    : S5(write);
7397   src    : S1(read);
7398   INS0   : ISS;
7399   NEON_FP : S5;
7400 %}
7401 
7402 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7403 %{
7404   single_instruction;
7405   dst    : S3(write);
7406   src    : S1(read);
7407   INS01  : ISS;
7408   NEON_FP : S3;
7409 %}
7410 
7411 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7412 %{
7413   single_instruction;
7414   dst    : S3(write);
7415   src    : S1(read);
7416   INS01  : ISS;
7417   NEON_FP : S3;
7418 %}
7419 
7420 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7421 %{
7422   single_instruction;
7423   dst    : S3(write);
7424   src    : S1(read);
7425   INS01  : ISS;
7426   NEON_FP : S3;
7427 %}
7428 
7429 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7430 %{
7431   single_instruction;
7432   dst    : S3(write);
7433   src    : S1(read);
7434   INS01  : ISS;
7435   NEON_FP : S3;
7436 %}
7437 
7438 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7439 %{
7440   single_instruction;
7441   dst    : S3(write);
7442   src    : S1(read);
7443   INS01  : ISS;
7444   NEON_FP : S3;
7445 %}
7446 
7447 pipe_class vmovi_reg_imm64(vecD dst)
7448 %{
7449   single_instruction;
7450   dst    : S3(write);
7451   INS01  : ISS;
7452   NEON_FP : S3;
7453 %}
7454 
7455 pipe_class vmovi_reg_imm128(vecX dst)
7456 %{
7457   single_instruction;
7458   dst    : S3(write);
7459   INS0   : ISS;
7460   NEON_FP : S3;
7461 %}
7462 
7463 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7464 %{
7465   single_instruction;
7466   dst    : S5(write);
7467   mem    : ISS(read);
7468   INS01  : ISS;
7469   NEON_FP : S3;
7470 %}
7471 
7472 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7473 %{
7474   single_instruction;
7475   dst    : S5(write);
7476   mem    : ISS(read);
7477   INS01  : ISS;
7478   NEON_FP : S3;
7479 %}
7480 
7481 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7482 %{
7483   single_instruction;
7484   mem    : ISS(read);
7485   src    : S2(read);
7486   INS01  : ISS;
7487   NEON_FP : S3;
7488 %}
7489 
7490 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7491 %{
7492   single_instruction;
7493   mem    : ISS(read);
7494   src    : S2(read);
7495   INS01  : ISS;
7496   NEON_FP : S3;
7497 %}
7498 
7499 //------- Integer ALU operations --------------------------
7500 
7501 // Integer ALU reg-reg operation
7502 // Operands needed in EX1, result generated in EX2
7503 // Eg.  ADD     x0, x1, x2
7504 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7505 %{
7506   single_instruction;
7507   dst    : EX2(write);
7508   src1   : EX1(read);
7509   src2   : EX1(read);
7510   INS01  : ISS; // Dual issue as instruction 0 or 1
7511   ALU    : EX2;
7512 %}
7513 
7514 // Integer ALU reg-reg operation with constant shift
7515 // Shifted register must be available in LATE_ISS instead of EX1
7516 // Eg.  ADD     x0, x1, x2, LSL #2
7517 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7518 %{
7519   single_instruction;
7520   dst    : EX2(write);
7521   src1   : EX1(read);
7522   src2   : ISS(read);
7523   INS01  : ISS;
7524   ALU    : EX2;
7525 %}
7526 
7527 // Integer ALU reg operation with constant shift
7528 // Eg.  LSL     x0, x1, #shift
7529 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7530 %{
7531   single_instruction;
7532   dst    : EX2(write);
7533   src1   : ISS(read);
7534   INS01  : ISS;
7535   ALU    : EX2;
7536 %}
7537 
7538 // Integer ALU reg-reg operation with variable shift
7539 // Both operands must be available in LATE_ISS instead of EX1
7540 // Result is available in EX1 instead of EX2
7541 // Eg.  LSLV    x0, x1, x2
7542 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7543 %{
7544   single_instruction;
7545   dst    : EX1(write);
7546   src1   : ISS(read);
7547   src2   : ISS(read);
7548   INS01  : ISS;
7549   ALU    : EX1;
7550 %}
7551 
7552 // Integer ALU reg-reg operation with extract
7553 // As for _vshift above, but result generated in EX2
7554 // Eg.  EXTR    x0, x1, x2, #N
7555 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7556 %{
7557   single_instruction;
7558   dst    : EX2(write);
7559   src1   : ISS(read);
7560   src2   : ISS(read);
7561   INS1   : ISS; // Can only dual issue as Instruction 1
7562   ALU    : EX1;
7563 %}
7564 
7565 // Integer ALU reg operation
7566 // Eg.  NEG     x0, x1
7567 pipe_class ialu_reg(iRegI dst, iRegI src)
7568 %{
7569   single_instruction;
7570   dst    : EX2(write);
7571   src    : EX1(read);
7572   INS01  : ISS;
7573   ALU    : EX2;
7574 %}
7575 
7576 // Integer ALU reg mmediate operation
7577 // Eg.  ADD     x0, x1, #N
7578 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7579 %{
7580   single_instruction;
7581   dst    : EX2(write);
7582   src1   : EX1(read);
7583   INS01  : ISS;
7584   ALU    : EX2;
7585 %}
7586 
7587 // Integer ALU immediate operation (no source operands)
7588 // Eg.  MOV     x0, #N
7589 pipe_class ialu_imm(iRegI dst)
7590 %{
7591   single_instruction;
7592   dst    : EX1(write);
7593   INS01  : ISS;
7594   ALU    : EX1;
7595 %}
7596 
7597 //------- Compare operation -------------------------------
7598 
7599 // Compare reg-reg
7600 // Eg.  CMP     x0, x1
7601 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7602 %{
7603   single_instruction;
7604 //  fixed_latency(16);
7605   cr     : EX2(write);
7606   op1    : EX1(read);
7607   op2    : EX1(read);
7608   INS01  : ISS;
7609   ALU    : EX2;
7610 %}
7611 
7612 // Compare reg-reg
7613 // Eg.  CMP     x0, #N
7614 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7615 %{
7616   single_instruction;
7617 //  fixed_latency(16);
7618   cr     : EX2(write);
7619   op1    : EX1(read);
7620   INS01  : ISS;
7621   ALU    : EX2;
7622 %}
7623 
7624 //------- Conditional instructions ------------------------
7625 
7626 // Conditional no operands
7627 // Eg.  CSINC   x0, zr, zr, <cond>
7628 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7629 %{
7630   single_instruction;
7631   cr     : EX1(read);
7632   dst    : EX2(write);
7633   INS01  : ISS;
7634   ALU    : EX2;
7635 %}
7636 
7637 // Conditional 2 operand
7638 // EG.  CSEL    X0, X1, X2, <cond>
7639 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7640 %{
7641   single_instruction;
7642   cr     : EX1(read);
7643   src1   : EX1(read);
7644   src2   : EX1(read);
7645   dst    : EX2(write);
7646   INS01  : ISS;
7647   ALU    : EX2;
7648 %}
7649 
7650 // Conditional 2 operand
7651 // EG.  CSEL    X0, X1, X2, <cond>
7652 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7653 %{
7654   single_instruction;
7655   cr     : EX1(read);
7656   src    : EX1(read);
7657   dst    : EX2(write);
7658   INS01  : ISS;
7659   ALU    : EX2;
7660 %}
7661 
7662 //------- Multiply pipeline operations --------------------
7663 
7664 // Multiply reg-reg
7665 // Eg.  MUL     w0, w1, w2
7666 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7667 %{
7668   single_instruction;
7669   dst    : WR(write);
7670   src1   : ISS(read);
7671   src2   : ISS(read);
7672   INS01  : ISS;
7673   MAC    : WR;
7674 %}
7675 
7676 // Multiply accumulate
7677 // Eg.  MADD    w0, w1, w2, w3
7678 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7679 %{
7680   single_instruction;
7681   dst    : WR(write);
7682   src1   : ISS(read);
7683   src2   : ISS(read);
7684   src3   : ISS(read);
7685   INS01  : ISS;
7686   MAC    : WR;
7687 %}
7688 
7689 // Eg.  MUL     w0, w1, w2
7690 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7691 %{
7692   single_instruction;
7693   fixed_latency(3); // Maximum latency for 64 bit mul
7694   dst    : WR(write);
7695   src1   : ISS(read);
7696   src2   : ISS(read);
7697   INS01  : ISS;
7698   MAC    : WR;
7699 %}
7700 
7701 // Multiply accumulate
7702 // Eg.  MADD    w0, w1, w2, w3
7703 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7704 %{
7705   single_instruction;
7706   fixed_latency(3); // Maximum latency for 64 bit mul
7707   dst    : WR(write);
7708   src1   : ISS(read);
7709   src2   : ISS(read);
7710   src3   : ISS(read);
7711   INS01  : ISS;
7712   MAC    : WR;
7713 %}
7714 
7715 //------- Divide pipeline operations --------------------
7716 
7717 // Eg.  SDIV    w0, w1, w2
7718 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7719 %{
7720   single_instruction;
7721   fixed_latency(8); // Maximum latency for 32 bit divide
7722   dst    : WR(write);
7723   src1   : ISS(read);
7724   src2   : ISS(read);
7725   INS0   : ISS; // Can only dual issue as instruction 0
7726   DIV    : WR;
7727 %}
7728 
7729 // Eg.  SDIV    x0, x1, x2
7730 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7731 %{
7732   single_instruction;
7733   fixed_latency(16); // Maximum latency for 64 bit divide
7734   dst    : WR(write);
7735   src1   : ISS(read);
7736   src2   : ISS(read);
7737   INS0   : ISS; // Can only dual issue as instruction 0
7738   DIV    : WR;
7739 %}
7740 
7741 //------- Load pipeline operations ------------------------
7742 
7743 // Load - prefetch
7744 // Eg.  PFRM    <mem>
7745 pipe_class iload_prefetch(memory mem)
7746 %{
7747   single_instruction;
7748   mem    : ISS(read);
7749   INS01  : ISS;
7750   LDST   : WR;
7751 %}
7752 
7753 // Load - reg, mem
7754 // Eg.  LDR     x0, <mem>
7755 pipe_class iload_reg_mem(iRegI dst, memory mem)
7756 %{
7757   single_instruction;
7758   dst    : WR(write);
7759   mem    : ISS(read);
7760   INS01  : ISS;
7761   LDST   : WR;
7762 %}
7763 
7764 // Load - reg, reg
7765 // Eg.  LDR     x0, [sp, x1]
7766 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7767 %{
7768   single_instruction;
7769   dst    : WR(write);
7770   src    : ISS(read);
7771   INS01  : ISS;
7772   LDST   : WR;
7773 %}
7774 
7775 //------- Store pipeline operations -----------------------
7776 
7777 // Store - zr, mem
7778 // Eg.  STR     zr, <mem>
7779 pipe_class istore_mem(memory mem)
7780 %{
7781   single_instruction;
7782   mem    : ISS(read);
7783   INS01  : ISS;
7784   LDST   : WR;
7785 %}
7786 
7787 // Store - reg, mem
7788 // Eg.  STR     x0, <mem>
7789 pipe_class istore_reg_mem(iRegI src, memory mem)
7790 %{
7791   single_instruction;
7792   mem    : ISS(read);
7793   src    : EX2(read);
7794   INS01  : ISS;
7795   LDST   : WR;
7796 %}
7797 
7798 // Store - reg, reg
7799 // Eg. STR      x0, [sp, x1]
7800 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7801 %{
7802   single_instruction;
7803   dst    : ISS(read);
7804   src    : EX2(read);
7805   INS01  : ISS;
7806   LDST   : WR;
7807 %}
7808 
7809 //------- Store pipeline operations -----------------------
7810 
7811 // Branch
7812 pipe_class pipe_branch()
7813 %{
7814   single_instruction;
7815   INS01  : ISS;
7816   BRANCH : EX1;
7817 %}
7818 
7819 // Conditional branch
7820 pipe_class pipe_branch_cond(rFlagsReg cr)
7821 %{
7822   single_instruction;
7823   cr     : EX1(read);
7824   INS01  : ISS;
7825   BRANCH : EX1;
7826 %}
7827 
7828 // Compare & Branch
7829 // EG.  CBZ/CBNZ
7830 pipe_class pipe_cmp_branch(iRegI op1)
7831 %{
7832   single_instruction;
7833   op1    : EX1(read);
7834   INS01  : ISS;
7835   BRANCH : EX1;
7836 %}
7837 
7838 //------- Synchronisation operations ----------------------
7839 
7840 // Any operation requiring serialization.
7841 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7842 pipe_class pipe_serial()
7843 %{
7844   single_instruction;
7845   force_serialization;
7846   fixed_latency(16);
7847   INS01  : ISS(2); // Cannot dual issue with any other instruction
7848   LDST   : WR;
7849 %}
7850 
7851 // Generic big/slow expanded idiom - also serialized
7852 pipe_class pipe_slow()
7853 %{
7854   instruction_count(10);
7855   multiple_bundles;
7856   force_serialization;
7857   fixed_latency(16);
7858   INS01  : ISS(2); // Cannot dual issue with any other instruction
7859   LDST   : WR;
7860 %}
7861 
7862 // Empty pipeline class
7863 pipe_class pipe_class_empty()
7864 %{
7865   single_instruction;
7866   fixed_latency(0);
7867 %}
7868 
7869 // Default pipeline class.
7870 pipe_class pipe_class_default()
7871 %{
7872   single_instruction;
7873   fixed_latency(2);
7874 %}
7875 
7876 // Pipeline class for compares.
7877 pipe_class pipe_class_compare()
7878 %{
7879   single_instruction;
7880   fixed_latency(16);
7881 %}
7882 
7883 // Pipeline class for memory operations.
7884 pipe_class pipe_class_memory()
7885 %{
7886   single_instruction;
7887   fixed_latency(16);
7888 %}
7889 
7890 // Pipeline class for call.
7891 pipe_class pipe_class_call()
7892 %{
7893   single_instruction;
7894   fixed_latency(100);
7895 %}
7896 
7897 // Define the class for the Nop node.
7898 define %{
7899    MachNop = pipe_class_empty;
7900 %}
7901 
7902 %}
7903 //----------INSTRUCTIONS-------------------------------------------------------
7904 //
7905 // match      -- States which machine-independent subtree may be replaced
7906 //               by this instruction.
7907 // ins_cost   -- The estimated cost of this instruction is used by instruction
7908 //               selection to identify a minimum cost tree of machine
7909 //               instructions that matches a tree of machine-independent
7910 //               instructions.
7911 // format     -- A string providing the disassembly for this instruction.
7912 //               The value of an instruction's operand may be inserted
7913 //               by referring to it with a '$' prefix.
7914 // opcode     -- Three instruction opcodes may be provided.  These are referred
7915 //               to within an encode class as $primary, $secondary, and $tertiary
7916 //               rrspectively.  The primary opcode is commonly used to
7917 //               indicate the type of machine instruction, while secondary
7918 //               and tertiary are often used for prefix options or addressing
7919 //               modes.
7920 // ins_encode -- A list of encode classes with parameters. The encode class
7921 //               name must have been defined in an 'enc_class' specification
7922 //               in the encode section of the architecture description.
7923 
7924 // ============================================================================
7925 // Memory (Load/Store) Instructions
7926 
7927 // Load Instructions
7928 
7929 // Load Byte (8 bit signed)
7930 instruct loadB(iRegINoSp dst, memory mem)
7931 %{
7932   match(Set dst (LoadB mem));
7933   predicate(!needs_acquiring_load(n));
7934 
7935   ins_cost(4 * INSN_COST);
7936   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7937 
7938   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7939 
7940   ins_pipe(iload_reg_mem);
7941 %}
7942 
7943 // Load Byte (8 bit signed) into long
7944 instruct loadB2L(iRegLNoSp dst, memory mem)
7945 %{
7946   match(Set dst (ConvI2L (LoadB mem)));
7947   predicate(!needs_acquiring_load(n->in(1)));
7948 
7949   ins_cost(4 * INSN_COST);
7950   format %{ "ldrsb  $dst, $mem\t# byte" %}
7951 
7952   ins_encode(aarch64_enc_ldrsb(dst, mem));
7953 
7954   ins_pipe(iload_reg_mem);
7955 %}
7956 
7957 // Load Byte (8 bit unsigned)
7958 instruct loadUB(iRegINoSp dst, memory mem)
7959 %{
7960   match(Set dst (LoadUB mem));
7961   predicate(!needs_acquiring_load(n));
7962 
7963   ins_cost(4 * INSN_COST);
7964   format %{ "ldrbw  $dst, $mem\t# byte" %}
7965 
7966   ins_encode(aarch64_enc_ldrb(dst, mem));
7967 
7968   ins_pipe(iload_reg_mem);
7969 %}
7970 
7971 // Load Byte (8 bit unsigned) into long
7972 instruct loadUB2L(iRegLNoSp dst, memory mem)
7973 %{
7974   match(Set dst (ConvI2L (LoadUB mem)));
7975   predicate(!needs_acquiring_load(n->in(1)));
7976 
7977   ins_cost(4 * INSN_COST);
7978   format %{ "ldrb  $dst, $mem\t# byte" %}
7979 
7980   ins_encode(aarch64_enc_ldrb(dst, mem));
7981 
7982   ins_pipe(iload_reg_mem);
7983 %}
7984 
7985 // Load Short (16 bit signed)
7986 instruct loadS(iRegINoSp dst, memory mem)
7987 %{
7988   match(Set dst (LoadS mem));
7989   predicate(!needs_acquiring_load(n));
7990 
7991   ins_cost(4 * INSN_COST);
7992   format %{ "ldrshw  $dst, $mem\t# short" %}
7993 
7994   ins_encode(aarch64_enc_ldrshw(dst, mem));
7995 
7996   ins_pipe(iload_reg_mem);
7997 %}
7998 
7999 // Load Short (16 bit signed) into long
8000 instruct loadS2L(iRegLNoSp dst, memory mem)
8001 %{
8002   match(Set dst (ConvI2L (LoadS mem)));
8003   predicate(!needs_acquiring_load(n->in(1)));
8004 
8005   ins_cost(4 * INSN_COST);
8006   format %{ "ldrsh  $dst, $mem\t# short" %}
8007 
8008   ins_encode(aarch64_enc_ldrsh(dst, mem));
8009 
8010   ins_pipe(iload_reg_mem);
8011 %}
8012 
8013 // Load Char (16 bit unsigned)
8014 instruct loadUS(iRegINoSp dst, memory mem)
8015 %{
8016   match(Set dst (LoadUS mem));
8017   predicate(!needs_acquiring_load(n));
8018 
8019   ins_cost(4 * INSN_COST);
8020   format %{ "ldrh  $dst, $mem\t# short" %}
8021 
8022   ins_encode(aarch64_enc_ldrh(dst, mem));
8023 
8024   ins_pipe(iload_reg_mem);
8025 %}
8026 
8027 // Load Short/Char (16 bit unsigned) into long
8028 instruct loadUS2L(iRegLNoSp dst, memory mem)
8029 %{
8030   match(Set dst (ConvI2L (LoadUS mem)));
8031   predicate(!needs_acquiring_load(n->in(1)));
8032 
8033   ins_cost(4 * INSN_COST);
8034   format %{ "ldrh  $dst, $mem\t# short" %}
8035 
8036   ins_encode(aarch64_enc_ldrh(dst, mem));
8037 
8038   ins_pipe(iload_reg_mem);
8039 %}
8040 
8041 // Load Integer (32 bit signed)
8042 instruct loadI(iRegINoSp dst, memory mem)
8043 %{
8044   match(Set dst (LoadI mem));
8045   predicate(!needs_acquiring_load(n));
8046 
8047   ins_cost(4 * INSN_COST);
8048   format %{ "ldrw  $dst, $mem\t# int" %}
8049 
8050   ins_encode(aarch64_enc_ldrw(dst, mem));
8051 
8052   ins_pipe(iload_reg_mem);
8053 %}
8054 
8055 // Load Integer (32 bit signed) into long
8056 instruct loadI2L(iRegLNoSp dst, memory mem)
8057 %{
8058   match(Set dst (ConvI2L (LoadI mem)));
8059   predicate(!needs_acquiring_load(n->in(1)));
8060 
8061   ins_cost(4 * INSN_COST);
8062   format %{ "ldrsw  $dst, $mem\t# int" %}
8063 
8064   ins_encode(aarch64_enc_ldrsw(dst, mem));
8065 
8066   ins_pipe(iload_reg_mem);
8067 %}
8068 
8069 // Load Integer (32 bit unsigned) into long
8070 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8071 %{
8072   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8073   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8074 
8075   ins_cost(4 * INSN_COST);
8076   format %{ "ldrw  $dst, $mem\t# int" %}
8077 
8078   ins_encode(aarch64_enc_ldrw(dst, mem));
8079 
8080   ins_pipe(iload_reg_mem);
8081 %}
8082 
8083 // Load Long (64 bit signed)
8084 instruct loadL(iRegLNoSp dst, memory mem)
8085 %{
8086   match(Set dst (LoadL mem));
8087   predicate(!needs_acquiring_load(n));
8088 
8089   ins_cost(4 * INSN_COST);
8090   format %{ "ldr  $dst, $mem\t# int" %}
8091 
8092   ins_encode(aarch64_enc_ldr(dst, mem));
8093 
8094   ins_pipe(iload_reg_mem);
8095 %}
8096 
8097 // Load Range
8098 instruct loadRange(iRegINoSp dst, memory mem)
8099 %{
8100   match(Set dst (LoadRange mem));
8101 
8102   ins_cost(4 * INSN_COST);
8103   format %{ "ldrw  $dst, $mem\t# range" %}
8104 
8105   ins_encode(aarch64_enc_ldrw(dst, mem));
8106 
8107   ins_pipe(iload_reg_mem);
8108 %}
8109 
8110 // Load Pointer
8111 instruct loadP(iRegPNoSp dst, memory mem)
8112 %{
8113   match(Set dst (LoadP mem));
8114   predicate(!needs_acquiring_load(n));
8115 
8116   ins_cost(4 * INSN_COST);
8117   format %{ "ldr  $dst, $mem\t# ptr" %}
8118 
8119   ins_encode(aarch64_enc_ldr(dst, mem));
8120 
8121   ins_pipe(iload_reg_mem);
8122 %}
8123 
8124 // Load Compressed Pointer
8125 instruct loadN(iRegNNoSp dst, memory mem)
8126 %{
8127   match(Set dst (LoadN mem));
8128   predicate(!needs_acquiring_load(n));
8129 
8130   ins_cost(4 * INSN_COST);
8131   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8132 
8133   ins_encode(aarch64_enc_ldrw(dst, mem));
8134 
8135   ins_pipe(iload_reg_mem);
8136 %}
8137 
8138 // Load Klass Pointer
8139 instruct loadKlass(iRegPNoSp dst, memory mem)
8140 %{
8141   match(Set dst (LoadKlass mem));
8142   predicate(!needs_acquiring_load(n));
8143 
8144   ins_cost(4 * INSN_COST);
8145   format %{ "ldr  $dst, $mem\t# class" %}
8146 
8147   ins_encode(aarch64_enc_ldr(dst, mem));
8148 
8149   ins_pipe(iload_reg_mem);
8150 %}
8151 
8152 // Load Narrow Klass Pointer
8153 instruct loadNKlass(iRegNNoSp dst, memory mem)
8154 %{
8155   match(Set dst (LoadNKlass mem));
8156   predicate(!needs_acquiring_load(n));
8157 
8158   ins_cost(4 * INSN_COST);
8159   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8160 
8161   ins_encode(aarch64_enc_ldrw(dst, mem));
8162 
8163   ins_pipe(iload_reg_mem);
8164 %}
8165 
8166 // Load Float
8167 instruct loadF(vRegF dst, memory mem)
8168 %{
8169   match(Set dst (LoadF mem));
8170   predicate(!needs_acquiring_load(n));
8171 
8172   ins_cost(4 * INSN_COST);
8173   format %{ "ldrs  $dst, $mem\t# float" %}
8174 
8175   ins_encode( aarch64_enc_ldrs(dst, mem) );
8176 
8177   ins_pipe(pipe_class_memory);
8178 %}
8179 
8180 // Load Double
8181 instruct loadD(vRegD dst, memory mem)
8182 %{
8183   match(Set dst (LoadD mem));
8184   predicate(!needs_acquiring_load(n));
8185 
8186   ins_cost(4 * INSN_COST);
8187   format %{ "ldrd  $dst, $mem\t# double" %}
8188 
8189   ins_encode( aarch64_enc_ldrd(dst, mem) );
8190 
8191   ins_pipe(pipe_class_memory);
8192 %}
8193 
8194 
8195 // Load Int Constant
8196 instruct loadConI(iRegINoSp dst, immI src)
8197 %{
8198   match(Set dst src);
8199 
8200   ins_cost(INSN_COST);
8201   format %{ "mov $dst, $src\t# int" %}
8202 
8203   ins_encode( aarch64_enc_movw_imm(dst, src) );
8204 
8205   ins_pipe(ialu_imm);
8206 %}
8207 
8208 // Load Long Constant
8209 instruct loadConL(iRegLNoSp dst, immL src)
8210 %{
8211   match(Set dst src);
8212 
8213   ins_cost(INSN_COST);
8214   format %{ "mov $dst, $src\t# long" %}
8215 
8216   ins_encode( aarch64_enc_mov_imm(dst, src) );
8217 
8218   ins_pipe(ialu_imm);
8219 %}
8220 
8221 // Load Pointer Constant
8222 
8223 instruct loadConP(iRegPNoSp dst, immP con)
8224 %{
8225   match(Set dst con);
8226 
8227   ins_cost(INSN_COST * 4);
8228   format %{
8229     "mov  $dst, $con\t# ptr\n\t"
8230   %}
8231 
8232   ins_encode(aarch64_enc_mov_p(dst, con));
8233 
8234   ins_pipe(ialu_imm);
8235 %}
8236 
8237 // Load Null Pointer Constant
8238 
8239 instruct loadConP0(iRegPNoSp dst, immP0 con)
8240 %{
8241   match(Set dst con);
8242 
8243   ins_cost(INSN_COST);
8244   format %{ "mov  $dst, $con\t# NULL ptr" %}
8245 
8246   ins_encode(aarch64_enc_mov_p0(dst, con));
8247 
8248   ins_pipe(ialu_imm);
8249 %}
8250 
8251 // Load Pointer Constant One
8252 
8253 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8254 %{
8255   match(Set dst con);
8256 
8257   ins_cost(INSN_COST);
8258   format %{ "mov  $dst, $con\t# NULL ptr" %}
8259 
8260   ins_encode(aarch64_enc_mov_p1(dst, con));
8261 
8262   ins_pipe(ialu_imm);
8263 %}
8264 
8265 // Load Poll Page Constant
8266 
8267 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8268 %{
8269   match(Set dst con);
8270 
8271   ins_cost(INSN_COST);
8272   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8273 
8274   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8275 
8276   ins_pipe(ialu_imm);
8277 %}
8278 
8279 // Load Byte Map Base Constant
8280 
8281 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8282 %{
8283   match(Set dst con);
8284 
8285   ins_cost(INSN_COST);
8286   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8287 
8288   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8289 
8290   ins_pipe(ialu_imm);
8291 %}
8292 
8293 // Load Narrow Pointer Constant
8294 
8295 instruct loadConN(iRegNNoSp dst, immN con)
8296 %{
8297   match(Set dst con);
8298 
8299   ins_cost(INSN_COST * 4);
8300   format %{ "mov  $dst, $con\t# compressed ptr" %}
8301 
8302   ins_encode(aarch64_enc_mov_n(dst, con));
8303 
8304   ins_pipe(ialu_imm);
8305 %}
8306 
8307 // Load Narrow Null Pointer Constant
8308 
8309 instruct loadConN0(iRegNNoSp dst, immN0 con)
8310 %{
8311   match(Set dst con);
8312 
8313   ins_cost(INSN_COST);
8314   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8315 
8316   ins_encode(aarch64_enc_mov_n0(dst, con));
8317 
8318   ins_pipe(ialu_imm);
8319 %}
8320 
8321 // Load Narrow Klass Constant
8322 
8323 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8324 %{
8325   match(Set dst con);
8326 
8327   ins_cost(INSN_COST);
8328   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8329 
8330   ins_encode(aarch64_enc_mov_nk(dst, con));
8331 
8332   ins_pipe(ialu_imm);
8333 %}
8334 
8335 // Load Packed Float Constant
8336 
8337 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8338   match(Set dst con);
8339   ins_cost(INSN_COST * 4);
8340   format %{ "fmovs  $dst, $con"%}
8341   ins_encode %{
8342     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8343   %}
8344 
8345   ins_pipe(fp_imm_s);
8346 %}
8347 
8348 // Load Float Constant
8349 
8350 instruct loadConF(vRegF dst, immF con) %{
8351   match(Set dst con);
8352 
8353   ins_cost(INSN_COST * 4);
8354 
8355   format %{
8356     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8357   %}
8358 
8359   ins_encode %{
8360     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8361   %}
8362 
8363   ins_pipe(fp_load_constant_s);
8364 %}
8365 
8366 // Load Packed Double Constant
8367 
8368 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8369   match(Set dst con);
8370   ins_cost(INSN_COST);
8371   format %{ "fmovd  $dst, $con"%}
8372   ins_encode %{
8373     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8374   %}
8375 
8376   ins_pipe(fp_imm_d);
8377 %}
8378 
8379 // Load Double Constant
8380 
8381 instruct loadConD(vRegD dst, immD con) %{
8382   match(Set dst con);
8383 
8384   ins_cost(INSN_COST * 5);
8385   format %{
8386     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8387   %}
8388 
8389   ins_encode %{
8390     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8391   %}
8392 
8393   ins_pipe(fp_load_constant_d);
8394 %}
8395 
8396 // Store Instructions
8397 
8398 // Store CMS card-mark Immediate
8399 instruct storeimmCM0(immI0 zero, memory mem)
8400 %{
8401   match(Set mem (StoreCM mem zero));
8402   predicate(unnecessary_storestore(n));
8403 
8404   ins_cost(INSN_COST);
8405   format %{ "storestore (elided)\n\t"
8406             "strb zr, $mem\t# byte" %}
8407 
8408   ins_encode(aarch64_enc_strb0(mem));
8409 
8410   ins_pipe(istore_mem);
8411 %}
8412 
8413 // Store CMS card-mark Immediate with intervening StoreStore
8414 // needed when using CMS with no conditional card marking
8415 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8416 %{
8417   match(Set mem (StoreCM mem zero));
8418 
8419   ins_cost(INSN_COST * 2);
8420   format %{ "storestore\n\t"
8421             "dmb ishst"
8422             "\n\tstrb zr, $mem\t# byte" %}
8423 
8424   ins_encode(aarch64_enc_strb0_ordered(mem));
8425 
8426   ins_pipe(istore_mem);
8427 %}
8428 
8429 // Store Byte
8430 instruct storeB(iRegIorL2I src, memory mem)
8431 %{
8432   match(Set mem (StoreB mem src));
8433   predicate(!needs_releasing_store(n));
8434 
8435   ins_cost(INSN_COST);
8436   format %{ "strb  $src, $mem\t# byte" %}
8437 
8438   ins_encode(aarch64_enc_strb(src, mem));
8439 
8440   ins_pipe(istore_reg_mem);
8441 %}
8442 
8443 
8444 instruct storeimmB0(immI0 zero, memory mem)
8445 %{
8446   match(Set mem (StoreB mem zero));
8447   predicate(!needs_releasing_store(n));
8448 
8449   ins_cost(INSN_COST);
8450   format %{ "strb rscractch2, $mem\t# byte" %}
8451 
8452   ins_encode(aarch64_enc_strb0(mem));
8453 
8454   ins_pipe(istore_mem);
8455 %}
8456 
8457 // Store Char/Short
8458 instruct storeC(iRegIorL2I src, memory mem)
8459 %{
8460   match(Set mem (StoreC mem src));
8461   predicate(!needs_releasing_store(n));
8462 
8463   ins_cost(INSN_COST);
8464   format %{ "strh  $src, $mem\t# short" %}
8465 
8466   ins_encode(aarch64_enc_strh(src, mem));
8467 
8468   ins_pipe(istore_reg_mem);
8469 %}
8470 
8471 instruct storeimmC0(immI0 zero, memory mem)
8472 %{
8473   match(Set mem (StoreC mem zero));
8474   predicate(!needs_releasing_store(n));
8475 
8476   ins_cost(INSN_COST);
8477   format %{ "strh  zr, $mem\t# short" %}
8478 
8479   ins_encode(aarch64_enc_strh0(mem));
8480 
8481   ins_pipe(istore_mem);
8482 %}
8483 
8484 // Store Integer
8485 
8486 instruct storeI(iRegIorL2I src, memory mem)
8487 %{
8488   match(Set mem(StoreI mem src));
8489   predicate(!needs_releasing_store(n));
8490 
8491   ins_cost(INSN_COST);
8492   format %{ "strw  $src, $mem\t# int" %}
8493 
8494   ins_encode(aarch64_enc_strw(src, mem));
8495 
8496   ins_pipe(istore_reg_mem);
8497 %}
8498 
8499 instruct storeimmI0(immI0 zero, memory mem)
8500 %{
8501   match(Set mem(StoreI mem zero));
8502   predicate(!needs_releasing_store(n));
8503 
8504   ins_cost(INSN_COST);
8505   format %{ "strw  zr, $mem\t# int" %}
8506 
8507   ins_encode(aarch64_enc_strw0(mem));
8508 
8509   ins_pipe(istore_mem);
8510 %}
8511 
8512 // Store Long (64 bit signed)
8513 instruct storeL(iRegL src, memory mem)
8514 %{
8515   match(Set mem (StoreL mem src));
8516   predicate(!needs_releasing_store(n));
8517 
8518   ins_cost(INSN_COST);
8519   format %{ "str  $src, $mem\t# int" %}
8520 
8521   ins_encode(aarch64_enc_str(src, mem));
8522 
8523   ins_pipe(istore_reg_mem);
8524 %}
8525 
8526 // Store Long (64 bit signed)
8527 instruct storeimmL0(immL0 zero, memory mem)
8528 %{
8529   match(Set mem (StoreL mem zero));
8530   predicate(!needs_releasing_store(n));
8531 
8532   ins_cost(INSN_COST);
8533   format %{ "str  zr, $mem\t# int" %}
8534 
8535   ins_encode(aarch64_enc_str0(mem));
8536 
8537   ins_pipe(istore_mem);
8538 %}
8539 
8540 // Store Pointer
8541 instruct storeP(iRegP src, memory mem)
8542 %{
8543   match(Set mem (StoreP mem src));
8544   predicate(!needs_releasing_store(n));
8545 
8546   ins_cost(INSN_COST);
8547   format %{ "str  $src, $mem\t# ptr" %}
8548 
8549   ins_encode(aarch64_enc_str(src, mem));
8550 
8551   ins_pipe(istore_reg_mem);
8552 %}
8553 
8554 // Store Pointer
8555 instruct storeimmP0(immP0 zero, memory mem)
8556 %{
8557   match(Set mem (StoreP mem zero));
8558   predicate(!needs_releasing_store(n));
8559 
8560   ins_cost(INSN_COST);
8561   format %{ "str zr, $mem\t# ptr" %}
8562 
8563   ins_encode(aarch64_enc_str0(mem));
8564 
8565   ins_pipe(istore_mem);
8566 %}
8567 
8568 // Store Compressed Pointer
8569 instruct storeN(iRegN src, memory mem)
8570 %{
8571   match(Set mem (StoreN mem src));
8572   predicate(!needs_releasing_store(n));
8573 
8574   ins_cost(INSN_COST);
8575   format %{ "strw  $src, $mem\t# compressed ptr" %}
8576 
8577   ins_encode(aarch64_enc_strw(src, mem));
8578 
8579   ins_pipe(istore_reg_mem);
8580 %}
8581 
8582 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8583 %{
8584   match(Set mem (StoreN mem zero));
8585   predicate(Universe::narrow_oop_base() == NULL &&
8586             Universe::narrow_klass_base() == NULL &&
8587             (!needs_releasing_store(n)));
8588 
8589   ins_cost(INSN_COST);
8590   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8591 
8592   ins_encode(aarch64_enc_strw(heapbase, mem));
8593 
8594   ins_pipe(istore_reg_mem);
8595 %}
8596 
8597 // Store Float
8598 instruct storeF(vRegF src, memory mem)
8599 %{
8600   match(Set mem (StoreF mem src));
8601   predicate(!needs_releasing_store(n));
8602 
8603   ins_cost(INSN_COST);
8604   format %{ "strs  $src, $mem\t# float" %}
8605 
8606   ins_encode( aarch64_enc_strs(src, mem) );
8607 
8608   ins_pipe(pipe_class_memory);
8609 %}
8610 
8611 // TODO
8612 // implement storeImmF0 and storeFImmPacked
8613 
8614 // Store Double
8615 instruct storeD(vRegD src, memory mem)
8616 %{
8617   match(Set mem (StoreD mem src));
8618   predicate(!needs_releasing_store(n));
8619 
8620   ins_cost(INSN_COST);
8621   format %{ "strd  $src, $mem\t# double" %}
8622 
8623   ins_encode( aarch64_enc_strd(src, mem) );
8624 
8625   ins_pipe(pipe_class_memory);
8626 %}
8627 
8628 // Store Compressed Klass Pointer
8629 instruct storeNKlass(iRegN src, memory mem)
8630 %{
8631   predicate(!needs_releasing_store(n));
8632   match(Set mem (StoreNKlass mem src));
8633 
8634   ins_cost(INSN_COST);
8635   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8636 
8637   ins_encode(aarch64_enc_strw(src, mem));
8638 
8639   ins_pipe(istore_reg_mem);
8640 %}
8641 
8642 // TODO
8643 // implement storeImmD0 and storeDImmPacked
8644 
8645 // prefetch instructions
8646 // Must be safe to execute with invalid address (cannot fault).
8647 
8648 instruct prefetchalloc( memory mem ) %{
8649   match(PrefetchAllocation mem);
8650 
8651   ins_cost(INSN_COST);
8652   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8653 
8654   ins_encode( aarch64_enc_prefetchw(mem) );
8655 
8656   ins_pipe(iload_prefetch);
8657 %}
8658 
8659 //  ---------------- volatile loads and stores ----------------
8660 
8661 // Load Byte (8 bit signed)
8662 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8663 %{
8664   match(Set dst (LoadB mem));
8665 
8666   ins_cost(VOLATILE_REF_COST);
8667   format %{ "ldarsb  $dst, $mem\t# byte" %}
8668 
8669   ins_encode(aarch64_enc_ldarsb(dst, mem));
8670 
8671   ins_pipe(pipe_serial);
8672 %}
8673 
8674 // Load Byte (8 bit signed) into long
8675 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8676 %{
8677   match(Set dst (ConvI2L (LoadB mem)));
8678 
8679   ins_cost(VOLATILE_REF_COST);
8680   format %{ "ldarsb  $dst, $mem\t# byte" %}
8681 
8682   ins_encode(aarch64_enc_ldarsb(dst, mem));
8683 
8684   ins_pipe(pipe_serial);
8685 %}
8686 
8687 // Load Byte (8 bit unsigned)
8688 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8689 %{
8690   match(Set dst (LoadUB mem));
8691 
8692   ins_cost(VOLATILE_REF_COST);
8693   format %{ "ldarb  $dst, $mem\t# byte" %}
8694 
8695   ins_encode(aarch64_enc_ldarb(dst, mem));
8696 
8697   ins_pipe(pipe_serial);
8698 %}
8699 
8700 // Load Byte (8 bit unsigned) into long
8701 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8702 %{
8703   match(Set dst (ConvI2L (LoadUB mem)));
8704 
8705   ins_cost(VOLATILE_REF_COST);
8706   format %{ "ldarb  $dst, $mem\t# byte" %}
8707 
8708   ins_encode(aarch64_enc_ldarb(dst, mem));
8709 
8710   ins_pipe(pipe_serial);
8711 %}
8712 
8713 // Load Short (16 bit signed)
8714 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8715 %{
8716   match(Set dst (LoadS mem));
8717 
8718   ins_cost(VOLATILE_REF_COST);
8719   format %{ "ldarshw  $dst, $mem\t# short" %}
8720 
8721   ins_encode(aarch64_enc_ldarshw(dst, mem));
8722 
8723   ins_pipe(pipe_serial);
8724 %}
8725 
8726 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8727 %{
8728   match(Set dst (LoadUS mem));
8729 
8730   ins_cost(VOLATILE_REF_COST);
8731   format %{ "ldarhw  $dst, $mem\t# short" %}
8732 
8733   ins_encode(aarch64_enc_ldarhw(dst, mem));
8734 
8735   ins_pipe(pipe_serial);
8736 %}
8737 
8738 // Load Short/Char (16 bit unsigned) into long
8739 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8740 %{
8741   match(Set dst (ConvI2L (LoadUS mem)));
8742 
8743   ins_cost(VOLATILE_REF_COST);
8744   format %{ "ldarh  $dst, $mem\t# short" %}
8745 
8746   ins_encode(aarch64_enc_ldarh(dst, mem));
8747 
8748   ins_pipe(pipe_serial);
8749 %}
8750 
8751 // Load Short/Char (16 bit signed) into long
8752 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8753 %{
8754   match(Set dst (ConvI2L (LoadS mem)));
8755 
8756   ins_cost(VOLATILE_REF_COST);
8757   format %{ "ldarh  $dst, $mem\t# short" %}
8758 
8759   ins_encode(aarch64_enc_ldarsh(dst, mem));
8760 
8761   ins_pipe(pipe_serial);
8762 %}
8763 
8764 // Load Integer (32 bit signed)
8765 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8766 %{
8767   match(Set dst (LoadI mem));
8768 
8769   ins_cost(VOLATILE_REF_COST);
8770   format %{ "ldarw  $dst, $mem\t# int" %}
8771 
8772   ins_encode(aarch64_enc_ldarw(dst, mem));
8773 
8774   ins_pipe(pipe_serial);
8775 %}
8776 
8777 // Load Integer (32 bit unsigned) into long
8778 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8779 %{
8780   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8781 
8782   ins_cost(VOLATILE_REF_COST);
8783   format %{ "ldarw  $dst, $mem\t# int" %}
8784 
8785   ins_encode(aarch64_enc_ldarw(dst, mem));
8786 
8787   ins_pipe(pipe_serial);
8788 %}
8789 
8790 // Load Long (64 bit signed)
8791 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8792 %{
8793   match(Set dst (LoadL mem));
8794 
8795   ins_cost(VOLATILE_REF_COST);
8796   format %{ "ldar  $dst, $mem\t# int" %}
8797 
8798   ins_encode(aarch64_enc_ldar(dst, mem));
8799 
8800   ins_pipe(pipe_serial);
8801 %}
8802 
8803 // Load Pointer
8804 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8805 %{
8806   match(Set dst (LoadP mem));
8807 
8808   ins_cost(VOLATILE_REF_COST);
8809   format %{ "ldar  $dst, $mem\t# ptr" %}
8810 
8811   ins_encode(aarch64_enc_ldar(dst, mem));
8812 
8813   ins_pipe(pipe_serial);
8814 %}
8815 
8816 // Load Compressed Pointer
8817 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8818 %{
8819   match(Set dst (LoadN mem));
8820 
8821   ins_cost(VOLATILE_REF_COST);
8822   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8823 
8824   ins_encode(aarch64_enc_ldarw(dst, mem));
8825 
8826   ins_pipe(pipe_serial);
8827 %}
8828 
8829 // Load Float
8830 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8831 %{
8832   match(Set dst (LoadF mem));
8833 
8834   ins_cost(VOLATILE_REF_COST);
8835   format %{ "ldars  $dst, $mem\t# float" %}
8836 
8837   ins_encode( aarch64_enc_fldars(dst, mem) );
8838 
8839   ins_pipe(pipe_serial);
8840 %}
8841 
8842 // Load Double
8843 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8844 %{
8845   match(Set dst (LoadD mem));
8846 
8847   ins_cost(VOLATILE_REF_COST);
8848   format %{ "ldard  $dst, $mem\t# double" %}
8849 
8850   ins_encode( aarch64_enc_fldard(dst, mem) );
8851 
8852   ins_pipe(pipe_serial);
8853 %}
8854 
8855 // Store Byte
8856 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8857 %{
8858   match(Set mem (StoreB mem src));
8859 
8860   ins_cost(VOLATILE_REF_COST);
8861   format %{ "stlrb  $src, $mem\t# byte" %}
8862 
8863   ins_encode(aarch64_enc_stlrb(src, mem));
8864 
8865   ins_pipe(pipe_class_memory);
8866 %}
8867 
8868 // Store Char/Short
8869 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8870 %{
8871   match(Set mem (StoreC mem src));
8872 
8873   ins_cost(VOLATILE_REF_COST);
8874   format %{ "stlrh  $src, $mem\t# short" %}
8875 
8876   ins_encode(aarch64_enc_stlrh(src, mem));
8877 
8878   ins_pipe(pipe_class_memory);
8879 %}
8880 
8881 // Store Integer
8882 
8883 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8884 %{
8885   match(Set mem(StoreI mem src));
8886 
8887   ins_cost(VOLATILE_REF_COST);
8888   format %{ "stlrw  $src, $mem\t# int" %}
8889 
8890   ins_encode(aarch64_enc_stlrw(src, mem));
8891 
8892   ins_pipe(pipe_class_memory);
8893 %}
8894 
8895 // Store Long (64 bit signed)
8896 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8897 %{
8898   match(Set mem (StoreL mem src));
8899 
8900   ins_cost(VOLATILE_REF_COST);
8901   format %{ "stlr  $src, $mem\t# int" %}
8902 
8903   ins_encode(aarch64_enc_stlr(src, mem));
8904 
8905   ins_pipe(pipe_class_memory);
8906 %}
8907 
8908 // Store Pointer
8909 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8910 %{
8911   match(Set mem (StoreP mem src));
8912 
8913   ins_cost(VOLATILE_REF_COST);
8914   format %{ "stlr  $src, $mem\t# ptr" %}
8915 
8916   ins_encode(aarch64_enc_stlr(src, mem));
8917 
8918   ins_pipe(pipe_class_memory);
8919 %}
8920 
8921 // Store Compressed Pointer
8922 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8923 %{
8924   match(Set mem (StoreN mem src));
8925 
8926   ins_cost(VOLATILE_REF_COST);
8927   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8928 
8929   ins_encode(aarch64_enc_stlrw(src, mem));
8930 
8931   ins_pipe(pipe_class_memory);
8932 %}
8933 
8934 // Store Float
8935 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8936 %{
8937   match(Set mem (StoreF mem src));
8938 
8939   ins_cost(VOLATILE_REF_COST);
8940   format %{ "stlrs  $src, $mem\t# float" %}
8941 
8942   ins_encode( aarch64_enc_fstlrs(src, mem) );
8943 
8944   ins_pipe(pipe_class_memory);
8945 %}
8946 
8947 // TODO
8948 // implement storeImmF0 and storeFImmPacked
8949 
8950 // Store Double
8951 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8952 %{
8953   match(Set mem (StoreD mem src));
8954 
8955   ins_cost(VOLATILE_REF_COST);
8956   format %{ "stlrd  $src, $mem\t# double" %}
8957 
8958   ins_encode( aarch64_enc_fstlrd(src, mem) );
8959 
8960   ins_pipe(pipe_class_memory);
8961 %}
8962 
8963 //  ---------------- end of volatile loads and stores ----------------
8964 
8965 // ============================================================================
8966 // BSWAP Instructions
8967 
8968 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8969   match(Set dst (ReverseBytesI src));
8970 
8971   ins_cost(INSN_COST);
8972   format %{ "revw  $dst, $src" %}
8973 
8974   ins_encode %{
8975     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8976   %}
8977 
8978   ins_pipe(ialu_reg);
8979 %}
8980 
8981 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8982   match(Set dst (ReverseBytesL src));
8983 
8984   ins_cost(INSN_COST);
8985   format %{ "rev  $dst, $src" %}
8986 
8987   ins_encode %{
8988     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8989   %}
8990 
8991   ins_pipe(ialu_reg);
8992 %}
8993 
8994 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8995   match(Set dst (ReverseBytesUS src));
8996 
8997   ins_cost(INSN_COST);
8998   format %{ "rev16w  $dst, $src" %}
8999 
9000   ins_encode %{
9001     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9002   %}
9003 
9004   ins_pipe(ialu_reg);
9005 %}
9006 
9007 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
9008   match(Set dst (ReverseBytesS src));
9009 
9010   ins_cost(INSN_COST);
9011   format %{ "rev16w  $dst, $src\n\t"
9012             "sbfmw $dst, $dst, #0, #15" %}
9013 
9014   ins_encode %{
9015     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9016     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
9017   %}
9018 
9019   ins_pipe(ialu_reg);
9020 %}
9021 
9022 // ============================================================================
9023 // Zero Count Instructions
9024 
9025 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9026   match(Set dst (CountLeadingZerosI src));
9027 
9028   ins_cost(INSN_COST);
9029   format %{ "clzw  $dst, $src" %}
9030   ins_encode %{
9031     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9032   %}
9033 
9034   ins_pipe(ialu_reg);
9035 %}
9036 
9037 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9038   match(Set dst (CountLeadingZerosL src));
9039 
9040   ins_cost(INSN_COST);
9041   format %{ "clz   $dst, $src" %}
9042   ins_encode %{
9043     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9044   %}
9045 
9046   ins_pipe(ialu_reg);
9047 %}
9048 
9049 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9050   match(Set dst (CountTrailingZerosI src));
9051 
9052   ins_cost(INSN_COST * 2);
9053   format %{ "rbitw  $dst, $src\n\t"
9054             "clzw   $dst, $dst" %}
9055   ins_encode %{
9056     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9057     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9058   %}
9059 
9060   ins_pipe(ialu_reg);
9061 %}
9062 
9063 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9064   match(Set dst (CountTrailingZerosL src));
9065 
9066   ins_cost(INSN_COST * 2);
9067   format %{ "rbit   $dst, $src\n\t"
9068             "clz    $dst, $dst" %}
9069   ins_encode %{
9070     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9071     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9072   %}
9073 
9074   ins_pipe(ialu_reg);
9075 %}
9076 
9077 //---------- Population Count Instructions -------------------------------------
9078 //
9079 
9080 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9081   predicate(UsePopCountInstruction);
9082   match(Set dst (PopCountI src));
9083   effect(TEMP tmp);
9084   ins_cost(INSN_COST * 13);
9085 
9086   format %{ "movw   $src, $src\n\t"
9087             "mov    $tmp, $src\t# vector (1D)\n\t"
9088             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9089             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9090             "mov    $dst, $tmp\t# vector (1D)" %}
9091   ins_encode %{
9092     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9093     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9094     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9095     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9096     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9097   %}
9098 
9099   ins_pipe(pipe_class_default);
9100 %}
9101 
9102 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9103   predicate(UsePopCountInstruction);
9104   match(Set dst (PopCountI (LoadI mem)));
9105   effect(TEMP tmp);
9106   ins_cost(INSN_COST * 13);
9107 
9108   format %{ "ldrs   $tmp, $mem\n\t"
9109             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9110             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9111             "mov    $dst, $tmp\t# vector (1D)" %}
9112   ins_encode %{
9113     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9114     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9115                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9116     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9117     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9118     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9119   %}
9120 
9121   ins_pipe(pipe_class_default);
9122 %}
9123 
9124 // Note: Long.bitCount(long) returns an int.
9125 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9126   predicate(UsePopCountInstruction);
9127   match(Set dst (PopCountL src));
9128   effect(TEMP tmp);
9129   ins_cost(INSN_COST * 13);
9130 
9131   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9132             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9133             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9134             "mov    $dst, $tmp\t# vector (1D)" %}
9135   ins_encode %{
9136     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9137     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9138     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9139     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9140   %}
9141 
9142   ins_pipe(pipe_class_default);
9143 %}
9144 
9145 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9146   predicate(UsePopCountInstruction);
9147   match(Set dst (PopCountL (LoadL mem)));
9148   effect(TEMP tmp);
9149   ins_cost(INSN_COST * 13);
9150 
9151   format %{ "ldrd   $tmp, $mem\n\t"
9152             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9153             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9154             "mov    $dst, $tmp\t# vector (1D)" %}
9155   ins_encode %{
9156     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9157     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9158                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9159     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9160     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9161     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9162   %}
9163 
9164   ins_pipe(pipe_class_default);
9165 %}
9166 
9167 // ============================================================================
9168 // MemBar Instruction
9169 
9170 instruct load_fence() %{
9171   match(LoadFence);
9172   ins_cost(VOLATILE_REF_COST);
9173 
9174   format %{ "load_fence" %}
9175 
9176   ins_encode %{
9177     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9178   %}
9179   ins_pipe(pipe_serial);
9180 %}
9181 
9182 instruct unnecessary_membar_acquire() %{
9183   predicate(unnecessary_acquire(n));
9184   match(MemBarAcquire);
9185   ins_cost(0);
9186 
9187   format %{ "membar_acquire (elided)" %}
9188 
9189   ins_encode %{
9190     __ block_comment("membar_acquire (elided)");
9191   %}
9192 
9193   ins_pipe(pipe_class_empty);
9194 %}
9195 
9196 instruct membar_acquire() %{
9197   match(MemBarAcquire);
9198   ins_cost(VOLATILE_REF_COST);
9199 
9200   format %{ "membar_acquire\n\t"
9201             "dmb ish" %}
9202 
9203   ins_encode %{
9204     __ block_comment("membar_acquire");
9205     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9206   %}
9207 
9208   ins_pipe(pipe_serial);
9209 %}
9210 
9211 
9212 instruct membar_acquire_lock() %{
9213   match(MemBarAcquireLock);
9214   ins_cost(VOLATILE_REF_COST);
9215 
9216   format %{ "membar_acquire_lock (elided)" %}
9217 
9218   ins_encode %{
9219     __ block_comment("membar_acquire_lock (elided)");
9220   %}
9221 
9222   ins_pipe(pipe_serial);
9223 %}
9224 
9225 instruct store_fence() %{
9226   match(StoreFence);
9227   ins_cost(VOLATILE_REF_COST);
9228 
9229   format %{ "store_fence" %}
9230 
9231   ins_encode %{
9232     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9233   %}
9234   ins_pipe(pipe_serial);
9235 %}
9236 
9237 instruct unnecessary_membar_release() %{
9238   predicate(unnecessary_release(n));
9239   match(MemBarRelease);
9240   ins_cost(0);
9241 
9242   format %{ "membar_release (elided)" %}
9243 
9244   ins_encode %{
9245     __ block_comment("membar_release (elided)");
9246   %}
9247   ins_pipe(pipe_serial);
9248 %}
9249 
9250 instruct membar_release() %{
9251   match(MemBarRelease);
9252   ins_cost(VOLATILE_REF_COST);
9253 
9254   format %{ "membar_release\n\t"
9255             "dmb ish" %}
9256 
9257   ins_encode %{
9258     __ block_comment("membar_release");
9259     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9260   %}
9261   ins_pipe(pipe_serial);
9262 %}
9263 
9264 instruct membar_storestore() %{
9265   match(MemBarStoreStore);
9266   ins_cost(VOLATILE_REF_COST);
9267 
9268   format %{ "MEMBAR-store-store" %}
9269 
9270   ins_encode %{
9271     __ membar(Assembler::StoreStore);
9272   %}
9273   ins_pipe(pipe_serial);
9274 %}
9275 
9276 instruct membar_release_lock() %{
9277   match(MemBarReleaseLock);
9278   ins_cost(VOLATILE_REF_COST);
9279 
9280   format %{ "membar_release_lock (elided)" %}
9281 
9282   ins_encode %{
9283     __ block_comment("membar_release_lock (elided)");
9284   %}
9285 
9286   ins_pipe(pipe_serial);
9287 %}
9288 
9289 instruct unnecessary_membar_volatile() %{
9290   predicate(unnecessary_volatile(n));
9291   match(MemBarVolatile);
9292   ins_cost(0);
9293 
9294   format %{ "membar_volatile (elided)" %}
9295 
9296   ins_encode %{
9297     __ block_comment("membar_volatile (elided)");
9298   %}
9299 
9300   ins_pipe(pipe_serial);
9301 %}
9302 
9303 instruct membar_volatile() %{
9304   match(MemBarVolatile);
9305   ins_cost(VOLATILE_REF_COST*100);
9306 
9307   format %{ "membar_volatile\n\t"
9308              "dmb ish"%}
9309 
9310   ins_encode %{
9311     __ block_comment("membar_volatile");
9312     __ membar(Assembler::StoreLoad);
9313   %}
9314 
9315   ins_pipe(pipe_serial);
9316 %}
9317 
9318 // ============================================================================
9319 // Cast/Convert Instructions
9320 
9321 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9322   match(Set dst (CastX2P src));
9323 
9324   ins_cost(INSN_COST);
9325   format %{ "mov $dst, $src\t# long -> ptr" %}
9326 
9327   ins_encode %{
9328     if ($dst$$reg != $src$$reg) {
9329       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9330     }
9331   %}
9332 
9333   ins_pipe(ialu_reg);
9334 %}
9335 
9336 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9337   match(Set dst (CastP2X src));
9338 
9339   ins_cost(INSN_COST);
9340   format %{ "mov $dst, $src\t# ptr -> long" %}
9341 
9342   ins_encode %{
9343     if ($dst$$reg != $src$$reg) {
9344       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9345     }
9346   %}
9347 
9348   ins_pipe(ialu_reg);
9349 %}
9350 
9351 // Convert oop into int for vectors alignment masking
9352 instruct convP2I(iRegINoSp dst, iRegP src) %{
9353   match(Set dst (ConvL2I (CastP2X src)));
9354 
9355   ins_cost(INSN_COST);
9356   format %{ "movw $dst, $src\t# ptr -> int" %}
9357   ins_encode %{
9358     __ movw($dst$$Register, $src$$Register);
9359   %}
9360 
9361   ins_pipe(ialu_reg);
9362 %}
9363 
9364 // Convert compressed oop into int for vectors alignment masking
9365 // in case of 32bit oops (heap < 4Gb).
9366 instruct convN2I(iRegINoSp dst, iRegN src)
9367 %{
9368   predicate(Universe::narrow_oop_shift() == 0);
9369   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9370 
9371   ins_cost(INSN_COST);
9372   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9373   ins_encode %{
9374     __ movw($dst$$Register, $src$$Register);
9375   %}
9376 
9377   ins_pipe(ialu_reg);
9378 %}
9379 
9380 
9381 // Convert oop pointer into compressed form
9382 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9383   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9384   match(Set dst (EncodeP src));
9385   effect(KILL cr);
9386   ins_cost(INSN_COST * 3);
9387   format %{ "encode_heap_oop $dst, $src" %}
9388   ins_encode %{
9389     Register s = $src$$Register;
9390     Register d = $dst$$Register;
9391     __ encode_heap_oop(d, s);
9392   %}
9393   ins_pipe(ialu_reg);
9394 %}
9395 
9396 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9397   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9398   match(Set dst (EncodeP src));
9399   ins_cost(INSN_COST * 3);
9400   format %{ "encode_heap_oop_not_null $dst, $src" %}
9401   ins_encode %{
9402     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9403   %}
9404   ins_pipe(ialu_reg);
9405 %}
9406 
9407 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9408   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9409             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9410   match(Set dst (DecodeN src));
9411   ins_cost(INSN_COST * 3);
9412   format %{ "decode_heap_oop $dst, $src" %}
9413   ins_encode %{
9414     Register s = $src$$Register;
9415     Register d = $dst$$Register;
9416     __ decode_heap_oop(d, s);
9417   %}
9418   ins_pipe(ialu_reg);
9419 %}
9420 
9421 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9422   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9423             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9424   match(Set dst (DecodeN src));
9425   ins_cost(INSN_COST * 3);
9426   format %{ "decode_heap_oop_not_null $dst, $src" %}
9427   ins_encode %{
9428     Register s = $src$$Register;
9429     Register d = $dst$$Register;
9430     __ decode_heap_oop_not_null(d, s);
9431   %}
9432   ins_pipe(ialu_reg);
9433 %}
9434 
9435 // n.b. AArch64 implementations of encode_klass_not_null and
9436 // decode_klass_not_null do not modify the flags register so, unlike
9437 // Intel, we don't kill CR as a side effect here
9438 
9439 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9440   match(Set dst (EncodePKlass src));
9441 
9442   ins_cost(INSN_COST * 3);
9443   format %{ "encode_klass_not_null $dst,$src" %}
9444 
9445   ins_encode %{
9446     Register src_reg = as_Register($src$$reg);
9447     Register dst_reg = as_Register($dst$$reg);
9448     __ encode_klass_not_null(dst_reg, src_reg);
9449   %}
9450 
9451    ins_pipe(ialu_reg);
9452 %}
9453 
9454 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9455   match(Set dst (DecodeNKlass src));
9456 
9457   ins_cost(INSN_COST * 3);
9458   format %{ "decode_klass_not_null $dst,$src" %}
9459 
9460   ins_encode %{
9461     Register src_reg = as_Register($src$$reg);
9462     Register dst_reg = as_Register($dst$$reg);
9463     if (dst_reg != src_reg) {
9464       __ decode_klass_not_null(dst_reg, src_reg);
9465     } else {
9466       __ decode_klass_not_null(dst_reg);
9467     }
9468   %}
9469 
9470    ins_pipe(ialu_reg);
9471 %}
9472 
9473 instruct checkCastPP(iRegPNoSp dst)
9474 %{
9475   match(Set dst (CheckCastPP dst));
9476 
9477   size(0);
9478   format %{ "# checkcastPP of $dst" %}
9479   ins_encode(/* empty encoding */);
9480   ins_pipe(pipe_class_empty);
9481 %}
9482 
9483 instruct castPP(iRegPNoSp dst)
9484 %{
9485   match(Set dst (CastPP dst));
9486 
9487   size(0);
9488   format %{ "# castPP of $dst" %}
9489   ins_encode(/* empty encoding */);
9490   ins_pipe(pipe_class_empty);
9491 %}
9492 
9493 instruct castII(iRegI dst)
9494 %{
9495   match(Set dst (CastII dst));
9496 
9497   size(0);
9498   format %{ "# castII of $dst" %}
9499   ins_encode(/* empty encoding */);
9500   ins_cost(0);
9501   ins_pipe(pipe_class_empty);
9502 %}
9503 
9504 // ============================================================================
9505 // Atomic operation instructions
9506 //
9507 // Intel and SPARC both implement Ideal Node LoadPLocked and
9508 // Store{PIL}Conditional instructions using a normal load for the
9509 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9510 //
9511 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9512 // pair to lock object allocations from Eden space when not using
9513 // TLABs.
9514 //
9515 // There does not appear to be a Load{IL}Locked Ideal Node and the
9516 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9517 // and to use StoreIConditional only for 32-bit and StoreLConditional
9518 // only for 64-bit.
9519 //
9520 // We implement LoadPLocked and StorePLocked instructions using,
9521 // respectively the AArch64 hw load-exclusive and store-conditional
9522 // instructions. Whereas we must implement each of
9523 // Store{IL}Conditional using a CAS which employs a pair of
9524 // instructions comprising a load-exclusive followed by a
9525 // store-conditional.
9526 
9527 
9528 // Locked-load (linked load) of the current heap-top
9529 // used when updating the eden heap top
9530 // implemented using ldaxr on AArch64
9531 
9532 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9533 %{
9534   match(Set dst (LoadPLocked mem));
9535 
9536   ins_cost(VOLATILE_REF_COST);
9537 
9538   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9539 
9540   ins_encode(aarch64_enc_ldaxr(dst, mem));
9541 
9542   ins_pipe(pipe_serial);
9543 %}
9544 
9545 // Conditional-store of the updated heap-top.
9546 // Used during allocation of the shared heap.
9547 // Sets flag (EQ) on success.
9548 // implemented using stlxr on AArch64.
9549 
9550 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9551 %{
9552   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9553 
9554   ins_cost(VOLATILE_REF_COST);
9555 
9556  // TODO
9557  // do we need to do a store-conditional release or can we just use a
9558  // plain store-conditional?
9559 
9560   format %{
9561     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9562     "cmpw rscratch1, zr\t# EQ on successful write"
9563   %}
9564 
9565   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9566 
9567   ins_pipe(pipe_serial);
9568 %}
9569 
9570 
9571 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9572 // when attempting to rebias a lock towards the current thread.  We
9573 // must use the acquire form of cmpxchg in order to guarantee acquire
9574 // semantics in this case.
9575 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9576 %{
9577   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9578 
9579   ins_cost(VOLATILE_REF_COST);
9580 
9581   format %{
9582     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9583     "cmpw rscratch1, zr\t# EQ on successful write"
9584   %}
9585 
9586   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9587 
9588   ins_pipe(pipe_slow);
9589 %}
9590 
9591 // storeIConditional also has acquire semantics, for no better reason
9592 // than matching storeLConditional.  At the time of writing this
9593 // comment storeIConditional was not used anywhere by AArch64.
9594 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9595 %{
9596   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9597 
9598   ins_cost(VOLATILE_REF_COST);
9599 
9600   format %{
9601     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9602     "cmpw rscratch1, zr\t# EQ on successful write"
9603   %}
9604 
9605   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9606 
9607   ins_pipe(pipe_slow);
9608 %}
9609 
9610 // standard CompareAndSwapX when we are using barriers
9611 // these have higher priority than the rules selected by a predicate
9612 
9613 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9614 // can't match them
9615 
9616 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9617 
9618   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
9619   ins_cost(2 * VOLATILE_REF_COST);
9620 
9621   effect(KILL cr);
9622 
9623   format %{
9624     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9625     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9626   %}
9627 
9628   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
9629             aarch64_enc_cset_eq(res));
9630 
9631   ins_pipe(pipe_slow);
9632 %}
9633 
9634 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9635 
9636   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
9637   ins_cost(2 * VOLATILE_REF_COST);
9638 
9639   effect(KILL cr);
9640 
9641   format %{
9642     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9643     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9644   %}
9645 
9646   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
9647             aarch64_enc_cset_eq(res));
9648 
9649   ins_pipe(pipe_slow);
9650 %}
9651 
9652 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9653 
9654   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9655   ins_cost(2 * VOLATILE_REF_COST);
9656 
9657   effect(KILL cr);
9658 
9659  format %{
9660     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9661     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9662  %}
9663 
9664  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9665             aarch64_enc_cset_eq(res));
9666 
9667   ins_pipe(pipe_slow);
9668 %}
9669 
9670 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9671 
9672   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9673   ins_cost(2 * VOLATILE_REF_COST);
9674 
9675   effect(KILL cr);
9676 
9677  format %{
9678     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9679     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9680  %}
9681 
9682  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9683             aarch64_enc_cset_eq(res));
9684 
9685   ins_pipe(pipe_slow);
9686 %}
9687 
9688 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9689 
9690   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9691   ins_cost(2 * VOLATILE_REF_COST);
9692 
9693   effect(KILL cr);
9694 
9695  format %{
9696     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9697     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9698  %}
9699 
9700  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9701             aarch64_enc_cset_eq(res));
9702 
9703   ins_pipe(pipe_slow);
9704 %}
9705 
9706 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9707 
9708   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9709   ins_cost(2 * VOLATILE_REF_COST);
9710 
9711   effect(KILL cr);
9712 
9713  format %{
9714     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9715     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9716  %}
9717 
9718  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9719             aarch64_enc_cset_eq(res));
9720 
9721   ins_pipe(pipe_slow);
9722 %}
9723 
9724 // alternative CompareAndSwapX when we are eliding barriers
9725 
9726 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9727 
9728   predicate(needs_acquiring_load_exclusive(n));
9729   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9730   ins_cost(VOLATILE_REF_COST);
9731 
9732   effect(KILL cr);
9733 
9734  format %{
9735     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9736     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9737  %}
9738 
9739  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9740             aarch64_enc_cset_eq(res));
9741 
9742   ins_pipe(pipe_slow);
9743 %}
9744 
9745 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9746 
9747   predicate(needs_acquiring_load_exclusive(n));
9748   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9749   ins_cost(VOLATILE_REF_COST);
9750 
9751   effect(KILL cr);
9752 
9753  format %{
9754     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9755     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9756  %}
9757 
9758  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9759             aarch64_enc_cset_eq(res));
9760 
9761   ins_pipe(pipe_slow);
9762 %}
9763 
9764 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9765 
9766   predicate(needs_acquiring_load_exclusive(n));
9767   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9768   ins_cost(VOLATILE_REF_COST);
9769 
9770   effect(KILL cr);
9771 
9772  format %{
9773     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9774     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9775  %}
9776 
9777  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9778             aarch64_enc_cset_eq(res));
9779 
9780   ins_pipe(pipe_slow);
9781 %}
9782 
9783 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9784 
9785   predicate(needs_acquiring_load_exclusive(n));
9786   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9787   ins_cost(VOLATILE_REF_COST);
9788 
9789   effect(KILL cr);
9790 
9791  format %{
9792     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9793     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9794  %}
9795 
9796  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9797             aarch64_enc_cset_eq(res));
9798 
9799   ins_pipe(pipe_slow);
9800 %}
9801 
9802 
9803 // ---------------------------------------------------------------------
9804 
9805 
9806 // BEGIN This section of the file is automatically generated. Do not edit --------------
9807 
9808 // Sundry CAS operations.  Note that release is always true,
9809 // regardless of the memory ordering of the CAS.  This is because we
9810 // need the volatile case to be sequentially consistent but there is
9811 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9812 // can't check the type of memory ordering here, so we always emit a
9813 // STLXR.
9814 
9815 // This section is generated from aarch64_ad_cas.m4
9816 
9817 
9818 
9819 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9820   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9821   ins_cost(2 * VOLATILE_REF_COST);
9822   effect(TEMP_DEF res, KILL cr);
9823   format %{
9824     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9825   %}
9826   ins_encode %{
9827     __ uxtbw(rscratch2, $oldval$$Register);
9828     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9829                Assembler::byte, /*acquire*/ false, /*release*/ true,
9830                /*weak*/ false, $res$$Register);
9831     __ sxtbw($res$$Register, $res$$Register);
9832   %}
9833   ins_pipe(pipe_slow);
9834 %}
9835 
9836 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9837   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9838   ins_cost(2 * VOLATILE_REF_COST);
9839   effect(TEMP_DEF res, KILL cr);
9840   format %{
9841     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9842   %}
9843   ins_encode %{
9844     __ uxthw(rscratch2, $oldval$$Register);
9845     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9846                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9847                /*weak*/ false, $res$$Register);
9848     __ sxthw($res$$Register, $res$$Register);
9849   %}
9850   ins_pipe(pipe_slow);
9851 %}
9852 
9853 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9854   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9855   ins_cost(2 * VOLATILE_REF_COST);
9856   effect(TEMP_DEF res, KILL cr);
9857   format %{
9858     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9859   %}
9860   ins_encode %{
9861     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9862                Assembler::word, /*acquire*/ false, /*release*/ true,
9863                /*weak*/ false, $res$$Register);
9864   %}
9865   ins_pipe(pipe_slow);
9866 %}
9867 
9868 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9869   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9870   ins_cost(2 * VOLATILE_REF_COST);
9871   effect(TEMP_DEF res, KILL cr);
9872   format %{
9873     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9874   %}
9875   ins_encode %{
9876     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9877                Assembler::xword, /*acquire*/ false, /*release*/ true,
9878                /*weak*/ false, $res$$Register);
9879   %}
9880   ins_pipe(pipe_slow);
9881 %}
9882 
9883 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9884   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9885   ins_cost(2 * VOLATILE_REF_COST);
9886   effect(TEMP_DEF res, KILL cr);
9887   format %{
9888     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9889   %}
9890   ins_encode %{
9891     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9892                Assembler::word, /*acquire*/ false, /*release*/ true,
9893                /*weak*/ false, $res$$Register);
9894   %}
9895   ins_pipe(pipe_slow);
9896 %}
9897 
9898 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9899   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9900   ins_cost(2 * VOLATILE_REF_COST);
9901   effect(TEMP_DEF res, KILL cr);
9902   format %{
9903     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9904   %}
9905   ins_encode %{
9906     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9907                Assembler::xword, /*acquire*/ false, /*release*/ true,
9908                /*weak*/ false, $res$$Register);
9909   %}
9910   ins_pipe(pipe_slow);
9911 %}
9912 
9913 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9914   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9915   ins_cost(2 * VOLATILE_REF_COST);
9916   effect(KILL cr);
9917   format %{
9918     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9919     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9920   %}
9921   ins_encode %{
9922     __ uxtbw(rscratch2, $oldval$$Register);
9923     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9924                Assembler::byte, /*acquire*/ false, /*release*/ true,
9925                /*weak*/ true, noreg);
9926     __ csetw($res$$Register, Assembler::EQ);
9927   %}
9928   ins_pipe(pipe_slow);
9929 %}
9930 
9931 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9932   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9933   ins_cost(2 * VOLATILE_REF_COST);
9934   effect(KILL cr);
9935   format %{
9936     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9937     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9938   %}
9939   ins_encode %{
9940     __ uxthw(rscratch2, $oldval$$Register);
9941     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9942                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9943                /*weak*/ true, noreg);
9944     __ csetw($res$$Register, Assembler::EQ);
9945   %}
9946   ins_pipe(pipe_slow);
9947 %}
9948 
9949 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9950   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9951   ins_cost(2 * VOLATILE_REF_COST);
9952   effect(KILL cr);
9953   format %{
9954     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9955     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9956   %}
9957   ins_encode %{
9958     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9959                Assembler::word, /*acquire*/ false, /*release*/ true,
9960                /*weak*/ true, noreg);
9961     __ csetw($res$$Register, Assembler::EQ);
9962   %}
9963   ins_pipe(pipe_slow);
9964 %}
9965 
9966 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9967   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9968   ins_cost(2 * VOLATILE_REF_COST);
9969   effect(KILL cr);
9970   format %{
9971     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9972     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9973   %}
9974   ins_encode %{
9975     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9976                Assembler::xword, /*acquire*/ false, /*release*/ true,
9977                /*weak*/ true, noreg);
9978     __ csetw($res$$Register, Assembler::EQ);
9979   %}
9980   ins_pipe(pipe_slow);
9981 %}
9982 
9983 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9984   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9985   ins_cost(2 * VOLATILE_REF_COST);
9986   effect(KILL cr);
9987   format %{
9988     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9989     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9990   %}
9991   ins_encode %{
9992     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9993                Assembler::word, /*acquire*/ false, /*release*/ true,
9994                /*weak*/ true, noreg);
9995     __ csetw($res$$Register, Assembler::EQ);
9996   %}
9997   ins_pipe(pipe_slow);
9998 %}
9999 
10000 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
10001   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
10002   ins_cost(2 * VOLATILE_REF_COST);
10003   effect(KILL cr);
10004   format %{
10005     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10006     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10007   %}
10008   ins_encode %{
10009     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10010                Assembler::xword, /*acquire*/ false, /*release*/ true,
10011                /*weak*/ true, noreg);
10012     __ csetw($res$$Register, Assembler::EQ);
10013   %}
10014   ins_pipe(pipe_slow);
10015 %}
10016 
10017 // END This section of the file is automatically generated. Do not edit --------------
10018 // ---------------------------------------------------------------------
10019 
10020 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
10021   match(Set prev (GetAndSetI mem newv));
10022   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10023   ins_encode %{
10024     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10025   %}
10026   ins_pipe(pipe_serial);
10027 %}
10028 
10029 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10030   match(Set prev (GetAndSetL mem newv));
10031   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10032   ins_encode %{
10033     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10034   %}
10035   ins_pipe(pipe_serial);
10036 %}
10037 
10038 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10039   match(Set prev (GetAndSetN mem newv));
10040   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10041   ins_encode %{
10042     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10043   %}
10044   ins_pipe(pipe_serial);
10045 %}
10046 
10047 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10048   match(Set prev (GetAndSetP mem newv));
10049   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10050   ins_encode %{
10051     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10052   %}
10053   ins_pipe(pipe_serial);
10054 %}
10055 
10056 
10057 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10058   match(Set newval (GetAndAddL mem incr));
10059   ins_cost(INSN_COST * 10);
10060   format %{ "get_and_addL $newval, [$mem], $incr" %}
10061   ins_encode %{
10062     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10063   %}
10064   ins_pipe(pipe_serial);
10065 %}
10066 
10067 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10068   predicate(n->as_LoadStore()->result_not_used());
10069   match(Set dummy (GetAndAddL mem incr));
10070   ins_cost(INSN_COST * 9);
10071   format %{ "get_and_addL [$mem], $incr" %}
10072   ins_encode %{
10073     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10074   %}
10075   ins_pipe(pipe_serial);
10076 %}
10077 
10078 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10079   match(Set newval (GetAndAddL mem incr));
10080   ins_cost(INSN_COST * 10);
10081   format %{ "get_and_addL $newval, [$mem], $incr" %}
10082   ins_encode %{
10083     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10084   %}
10085   ins_pipe(pipe_serial);
10086 %}
10087 
10088 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10089   predicate(n->as_LoadStore()->result_not_used());
10090   match(Set dummy (GetAndAddL mem incr));
10091   ins_cost(INSN_COST * 9);
10092   format %{ "get_and_addL [$mem], $incr" %}
10093   ins_encode %{
10094     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10095   %}
10096   ins_pipe(pipe_serial);
10097 %}
10098 
10099 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10100   match(Set newval (GetAndAddI mem incr));
10101   ins_cost(INSN_COST * 10);
10102   format %{ "get_and_addI $newval, [$mem], $incr" %}
10103   ins_encode %{
10104     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10105   %}
10106   ins_pipe(pipe_serial);
10107 %}
10108 
10109 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10110   predicate(n->as_LoadStore()->result_not_used());
10111   match(Set dummy (GetAndAddI mem incr));
10112   ins_cost(INSN_COST * 9);
10113   format %{ "get_and_addI [$mem], $incr" %}
10114   ins_encode %{
10115     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10116   %}
10117   ins_pipe(pipe_serial);
10118 %}
10119 
10120 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10121   match(Set newval (GetAndAddI mem incr));
10122   ins_cost(INSN_COST * 10);
10123   format %{ "get_and_addI $newval, [$mem], $incr" %}
10124   ins_encode %{
10125     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10126   %}
10127   ins_pipe(pipe_serial);
10128 %}
10129 
10130 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10131   predicate(n->as_LoadStore()->result_not_used());
10132   match(Set dummy (GetAndAddI mem incr));
10133   ins_cost(INSN_COST * 9);
10134   format %{ "get_and_addI [$mem], $incr" %}
10135   ins_encode %{
10136     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10137   %}
10138   ins_pipe(pipe_serial);
10139 %}
10140 
10141 // Manifest a CmpL result in an integer register.
10142 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10143 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10144 %{
10145   match(Set dst (CmpL3 src1 src2));
10146   effect(KILL flags);
10147 
10148   ins_cost(INSN_COST * 6);
10149   format %{
10150       "cmp $src1, $src2"
10151       "csetw $dst, ne"
10152       "cnegw $dst, lt"
10153   %}
10154   // format %{ "CmpL3 $dst, $src1, $src2" %}
10155   ins_encode %{
10156     __ cmp($src1$$Register, $src2$$Register);
10157     __ csetw($dst$$Register, Assembler::NE);
10158     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10159   %}
10160 
10161   ins_pipe(pipe_class_default);
10162 %}
10163 
10164 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10165 %{
10166   match(Set dst (CmpL3 src1 src2));
10167   effect(KILL flags);
10168 
10169   ins_cost(INSN_COST * 6);
10170   format %{
10171       "cmp $src1, $src2"
10172       "csetw $dst, ne"
10173       "cnegw $dst, lt"
10174   %}
10175   ins_encode %{
10176     int32_t con = (int32_t)$src2$$constant;
10177      if (con < 0) {
10178       __ adds(zr, $src1$$Register, -con);
10179     } else {
10180       __ subs(zr, $src1$$Register, con);
10181     }
10182     __ csetw($dst$$Register, Assembler::NE);
10183     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10184   %}
10185 
10186   ins_pipe(pipe_class_default);
10187 %}
10188 
10189 // ============================================================================
10190 // Conditional Move Instructions
10191 
10192 // n.b. we have identical rules for both a signed compare op (cmpOp)
10193 // and an unsigned compare op (cmpOpU). it would be nice if we could
10194 // define an op class which merged both inputs and use it to type the
10195 // argument to a single rule. unfortunatelyt his fails because the
10196 // opclass does not live up to the COND_INTER interface of its
10197 // component operands. When the generic code tries to negate the
10198 // operand it ends up running the generci Machoper::negate method
10199 // which throws a ShouldNotHappen. So, we have to provide two flavours
10200 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10201 
10202 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10203   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10204 
10205   ins_cost(INSN_COST * 2);
10206   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10207 
10208   ins_encode %{
10209     __ cselw(as_Register($dst$$reg),
10210              as_Register($src2$$reg),
10211              as_Register($src1$$reg),
10212              (Assembler::Condition)$cmp$$cmpcode);
10213   %}
10214 
10215   ins_pipe(icond_reg_reg);
10216 %}
10217 
10218 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10219   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10220 
10221   ins_cost(INSN_COST * 2);
10222   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10223 
10224   ins_encode %{
10225     __ cselw(as_Register($dst$$reg),
10226              as_Register($src2$$reg),
10227              as_Register($src1$$reg),
10228              (Assembler::Condition)$cmp$$cmpcode);
10229   %}
10230 
10231   ins_pipe(icond_reg_reg);
10232 %}
10233 
10234 // special cases where one arg is zero
10235 
10236 // n.b. this is selected in preference to the rule above because it
10237 // avoids loading constant 0 into a source register
10238 
10239 // TODO
10240 // we ought only to be able to cull one of these variants as the ideal
10241 // transforms ought always to order the zero consistently (to left/right?)
10242 
10243 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10244   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10245 
10246   ins_cost(INSN_COST * 2);
10247   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10248 
10249   ins_encode %{
10250     __ cselw(as_Register($dst$$reg),
10251              as_Register($src$$reg),
10252              zr,
10253              (Assembler::Condition)$cmp$$cmpcode);
10254   %}
10255 
10256   ins_pipe(icond_reg);
10257 %}
10258 
10259 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10260   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10261 
10262   ins_cost(INSN_COST * 2);
10263   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10264 
10265   ins_encode %{
10266     __ cselw(as_Register($dst$$reg),
10267              as_Register($src$$reg),
10268              zr,
10269              (Assembler::Condition)$cmp$$cmpcode);
10270   %}
10271 
10272   ins_pipe(icond_reg);
10273 %}
10274 
10275 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10276   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10277 
10278   ins_cost(INSN_COST * 2);
10279   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10280 
10281   ins_encode %{
10282     __ cselw(as_Register($dst$$reg),
10283              zr,
10284              as_Register($src$$reg),
10285              (Assembler::Condition)$cmp$$cmpcode);
10286   %}
10287 
10288   ins_pipe(icond_reg);
10289 %}
10290 
10291 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10292   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10293 
10294   ins_cost(INSN_COST * 2);
10295   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10296 
10297   ins_encode %{
10298     __ cselw(as_Register($dst$$reg),
10299              zr,
10300              as_Register($src$$reg),
10301              (Assembler::Condition)$cmp$$cmpcode);
10302   %}
10303 
10304   ins_pipe(icond_reg);
10305 %}
10306 
10307 // special case for creating a boolean 0 or 1
10308 
10309 // n.b. this is selected in preference to the rule above because it
10310 // avoids loading constants 0 and 1 into a source register
10311 
10312 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10313   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10314 
10315   ins_cost(INSN_COST * 2);
10316   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10317 
10318   ins_encode %{
10319     // equivalently
10320     // cset(as_Register($dst$$reg),
10321     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10322     __ csincw(as_Register($dst$$reg),
10323              zr,
10324              zr,
10325              (Assembler::Condition)$cmp$$cmpcode);
10326   %}
10327 
10328   ins_pipe(icond_none);
10329 %}
10330 
10331 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10332   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10333 
10334   ins_cost(INSN_COST * 2);
10335   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10336 
10337   ins_encode %{
10338     // equivalently
10339     // cset(as_Register($dst$$reg),
10340     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10341     __ csincw(as_Register($dst$$reg),
10342              zr,
10343              zr,
10344              (Assembler::Condition)$cmp$$cmpcode);
10345   %}
10346 
10347   ins_pipe(icond_none);
10348 %}
10349 
10350 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10351   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10352 
10353   ins_cost(INSN_COST * 2);
10354   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10355 
10356   ins_encode %{
10357     __ csel(as_Register($dst$$reg),
10358             as_Register($src2$$reg),
10359             as_Register($src1$$reg),
10360             (Assembler::Condition)$cmp$$cmpcode);
10361   %}
10362 
10363   ins_pipe(icond_reg_reg);
10364 %}
10365 
10366 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10367   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10368 
10369   ins_cost(INSN_COST * 2);
10370   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10371 
10372   ins_encode %{
10373     __ csel(as_Register($dst$$reg),
10374             as_Register($src2$$reg),
10375             as_Register($src1$$reg),
10376             (Assembler::Condition)$cmp$$cmpcode);
10377   %}
10378 
10379   ins_pipe(icond_reg_reg);
10380 %}
10381 
10382 // special cases where one arg is zero
10383 
10384 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10385   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10386 
10387   ins_cost(INSN_COST * 2);
10388   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10389 
10390   ins_encode %{
10391     __ csel(as_Register($dst$$reg),
10392             zr,
10393             as_Register($src$$reg),
10394             (Assembler::Condition)$cmp$$cmpcode);
10395   %}
10396 
10397   ins_pipe(icond_reg);
10398 %}
10399 
10400 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10401   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10402 
10403   ins_cost(INSN_COST * 2);
10404   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10405 
10406   ins_encode %{
10407     __ csel(as_Register($dst$$reg),
10408             zr,
10409             as_Register($src$$reg),
10410             (Assembler::Condition)$cmp$$cmpcode);
10411   %}
10412 
10413   ins_pipe(icond_reg);
10414 %}
10415 
10416 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10417   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10418 
10419   ins_cost(INSN_COST * 2);
10420   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10421 
10422   ins_encode %{
10423     __ csel(as_Register($dst$$reg),
10424             as_Register($src$$reg),
10425             zr,
10426             (Assembler::Condition)$cmp$$cmpcode);
10427   %}
10428 
10429   ins_pipe(icond_reg);
10430 %}
10431 
10432 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10433   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10434 
10435   ins_cost(INSN_COST * 2);
10436   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10437 
10438   ins_encode %{
10439     __ csel(as_Register($dst$$reg),
10440             as_Register($src$$reg),
10441             zr,
10442             (Assembler::Condition)$cmp$$cmpcode);
10443   %}
10444 
10445   ins_pipe(icond_reg);
10446 %}
10447 
10448 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10449   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10450 
10451   ins_cost(INSN_COST * 2);
10452   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10453 
10454   ins_encode %{
10455     __ csel(as_Register($dst$$reg),
10456             as_Register($src2$$reg),
10457             as_Register($src1$$reg),
10458             (Assembler::Condition)$cmp$$cmpcode);
10459   %}
10460 
10461   ins_pipe(icond_reg_reg);
10462 %}
10463 
10464 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10465   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10466 
10467   ins_cost(INSN_COST * 2);
10468   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10469 
10470   ins_encode %{
10471     __ csel(as_Register($dst$$reg),
10472             as_Register($src2$$reg),
10473             as_Register($src1$$reg),
10474             (Assembler::Condition)$cmp$$cmpcode);
10475   %}
10476 
10477   ins_pipe(icond_reg_reg);
10478 %}
10479 
10480 // special cases where one arg is zero
10481 
10482 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10483   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10484 
10485   ins_cost(INSN_COST * 2);
10486   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10487 
10488   ins_encode %{
10489     __ csel(as_Register($dst$$reg),
10490             zr,
10491             as_Register($src$$reg),
10492             (Assembler::Condition)$cmp$$cmpcode);
10493   %}
10494 
10495   ins_pipe(icond_reg);
10496 %}
10497 
10498 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10499   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10500 
10501   ins_cost(INSN_COST * 2);
10502   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10503 
10504   ins_encode %{
10505     __ csel(as_Register($dst$$reg),
10506             zr,
10507             as_Register($src$$reg),
10508             (Assembler::Condition)$cmp$$cmpcode);
10509   %}
10510 
10511   ins_pipe(icond_reg);
10512 %}
10513 
10514 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10515   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10516 
10517   ins_cost(INSN_COST * 2);
10518   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10519 
10520   ins_encode %{
10521     __ csel(as_Register($dst$$reg),
10522             as_Register($src$$reg),
10523             zr,
10524             (Assembler::Condition)$cmp$$cmpcode);
10525   %}
10526 
10527   ins_pipe(icond_reg);
10528 %}
10529 
10530 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10531   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10532 
10533   ins_cost(INSN_COST * 2);
10534   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10535 
10536   ins_encode %{
10537     __ csel(as_Register($dst$$reg),
10538             as_Register($src$$reg),
10539             zr,
10540             (Assembler::Condition)$cmp$$cmpcode);
10541   %}
10542 
10543   ins_pipe(icond_reg);
10544 %}
10545 
10546 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10547   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10548 
10549   ins_cost(INSN_COST * 2);
10550   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10551 
10552   ins_encode %{
10553     __ cselw(as_Register($dst$$reg),
10554              as_Register($src2$$reg),
10555              as_Register($src1$$reg),
10556              (Assembler::Condition)$cmp$$cmpcode);
10557   %}
10558 
10559   ins_pipe(icond_reg_reg);
10560 %}
10561 
10562 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10563   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10564 
10565   ins_cost(INSN_COST * 2);
10566   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10567 
10568   ins_encode %{
10569     __ cselw(as_Register($dst$$reg),
10570              as_Register($src2$$reg),
10571              as_Register($src1$$reg),
10572              (Assembler::Condition)$cmp$$cmpcode);
10573   %}
10574 
10575   ins_pipe(icond_reg_reg);
10576 %}
10577 
10578 // special cases where one arg is zero
10579 
10580 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10581   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10582 
10583   ins_cost(INSN_COST * 2);
10584   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10585 
10586   ins_encode %{
10587     __ cselw(as_Register($dst$$reg),
10588              zr,
10589              as_Register($src$$reg),
10590              (Assembler::Condition)$cmp$$cmpcode);
10591   %}
10592 
10593   ins_pipe(icond_reg);
10594 %}
10595 
10596 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10597   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10598 
10599   ins_cost(INSN_COST * 2);
10600   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10601 
10602   ins_encode %{
10603     __ cselw(as_Register($dst$$reg),
10604              zr,
10605              as_Register($src$$reg),
10606              (Assembler::Condition)$cmp$$cmpcode);
10607   %}
10608 
10609   ins_pipe(icond_reg);
10610 %}
10611 
10612 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10613   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10614 
10615   ins_cost(INSN_COST * 2);
10616   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10617 
10618   ins_encode %{
10619     __ cselw(as_Register($dst$$reg),
10620              as_Register($src$$reg),
10621              zr,
10622              (Assembler::Condition)$cmp$$cmpcode);
10623   %}
10624 
10625   ins_pipe(icond_reg);
10626 %}
10627 
10628 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10629   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10630 
10631   ins_cost(INSN_COST * 2);
10632   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10633 
10634   ins_encode %{
10635     __ cselw(as_Register($dst$$reg),
10636              as_Register($src$$reg),
10637              zr,
10638              (Assembler::Condition)$cmp$$cmpcode);
10639   %}
10640 
10641   ins_pipe(icond_reg);
10642 %}
10643 
10644 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10645 %{
10646   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10647 
10648   ins_cost(INSN_COST * 3);
10649 
10650   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10651   ins_encode %{
10652     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10653     __ fcsels(as_FloatRegister($dst$$reg),
10654               as_FloatRegister($src2$$reg),
10655               as_FloatRegister($src1$$reg),
10656               cond);
10657   %}
10658 
10659   ins_pipe(fp_cond_reg_reg_s);
10660 %}
10661 
10662 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10663 %{
10664   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10665 
10666   ins_cost(INSN_COST * 3);
10667 
10668   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10669   ins_encode %{
10670     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10671     __ fcsels(as_FloatRegister($dst$$reg),
10672               as_FloatRegister($src2$$reg),
10673               as_FloatRegister($src1$$reg),
10674               cond);
10675   %}
10676 
10677   ins_pipe(fp_cond_reg_reg_s);
10678 %}
10679 
10680 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10681 %{
10682   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10683 
10684   ins_cost(INSN_COST * 3);
10685 
10686   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10687   ins_encode %{
10688     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10689     __ fcseld(as_FloatRegister($dst$$reg),
10690               as_FloatRegister($src2$$reg),
10691               as_FloatRegister($src1$$reg),
10692               cond);
10693   %}
10694 
10695   ins_pipe(fp_cond_reg_reg_d);
10696 %}
10697 
10698 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10699 %{
10700   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10701 
10702   ins_cost(INSN_COST * 3);
10703 
10704   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10705   ins_encode %{
10706     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10707     __ fcseld(as_FloatRegister($dst$$reg),
10708               as_FloatRegister($src2$$reg),
10709               as_FloatRegister($src1$$reg),
10710               cond);
10711   %}
10712 
10713   ins_pipe(fp_cond_reg_reg_d);
10714 %}
10715 
10716 // ============================================================================
10717 // Arithmetic Instructions
10718 //
10719 
10720 // Integer Addition
10721 
10722 // TODO
10723 // these currently employ operations which do not set CR and hence are
10724 // not flagged as killing CR but we would like to isolate the cases
10725 // where we want to set flags from those where we don't. need to work
10726 // out how to do that.
10727 
10728 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10729   match(Set dst (AddI src1 src2));
10730 
10731   ins_cost(INSN_COST);
10732   format %{ "addw  $dst, $src1, $src2" %}
10733 
10734   ins_encode %{
10735     __ addw(as_Register($dst$$reg),
10736             as_Register($src1$$reg),
10737             as_Register($src2$$reg));
10738   %}
10739 
10740   ins_pipe(ialu_reg_reg);
10741 %}
10742 
10743 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10744   match(Set dst (AddI src1 src2));
10745 
10746   ins_cost(INSN_COST);
10747   format %{ "addw $dst, $src1, $src2" %}
10748 
10749   // use opcode to indicate that this is an add not a sub
10750   opcode(0x0);
10751 
10752   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10753 
10754   ins_pipe(ialu_reg_imm);
10755 %}
10756 
10757 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10758   match(Set dst (AddI (ConvL2I src1) src2));
10759 
10760   ins_cost(INSN_COST);
10761   format %{ "addw $dst, $src1, $src2" %}
10762 
10763   // use opcode to indicate that this is an add not a sub
10764   opcode(0x0);
10765 
10766   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10767 
10768   ins_pipe(ialu_reg_imm);
10769 %}
10770 
10771 // Pointer Addition
10772 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10773   match(Set dst (AddP src1 src2));
10774 
10775   ins_cost(INSN_COST);
10776   format %{ "add $dst, $src1, $src2\t# ptr" %}
10777 
10778   ins_encode %{
10779     __ add(as_Register($dst$$reg),
10780            as_Register($src1$$reg),
10781            as_Register($src2$$reg));
10782   %}
10783 
10784   ins_pipe(ialu_reg_reg);
10785 %}
10786 
10787 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10788   match(Set dst (AddP src1 (ConvI2L src2)));
10789 
10790   ins_cost(1.9 * INSN_COST);
10791   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10792 
10793   ins_encode %{
10794     __ add(as_Register($dst$$reg),
10795            as_Register($src1$$reg),
10796            as_Register($src2$$reg), ext::sxtw);
10797   %}
10798 
10799   ins_pipe(ialu_reg_reg);
10800 %}
10801 
10802 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10803   match(Set dst (AddP src1 (LShiftL src2 scale)));
10804 
10805   ins_cost(1.9 * INSN_COST);
10806   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10807 
10808   ins_encode %{
10809     __ lea(as_Register($dst$$reg),
10810            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10811                    Address::lsl($scale$$constant)));
10812   %}
10813 
10814   ins_pipe(ialu_reg_reg_shift);
10815 %}
10816 
10817 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10818   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10819 
10820   ins_cost(1.9 * INSN_COST);
10821   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10822 
10823   ins_encode %{
10824     __ lea(as_Register($dst$$reg),
10825            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10826                    Address::sxtw($scale$$constant)));
10827   %}
10828 
10829   ins_pipe(ialu_reg_reg_shift);
10830 %}
10831 
10832 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10833   match(Set dst (LShiftL (ConvI2L src) scale));
10834 
10835   ins_cost(INSN_COST);
10836   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10837 
10838   ins_encode %{
10839     __ sbfiz(as_Register($dst$$reg),
10840           as_Register($src$$reg),
10841           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10842   %}
10843 
10844   ins_pipe(ialu_reg_shift);
10845 %}
10846 
10847 // Pointer Immediate Addition
10848 // n.b. this needs to be more expensive than using an indirect memory
10849 // operand
10850 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10851   match(Set dst (AddP src1 src2));
10852 
10853   ins_cost(INSN_COST);
10854   format %{ "add $dst, $src1, $src2\t# ptr" %}
10855 
10856   // use opcode to indicate that this is an add not a sub
10857   opcode(0x0);
10858 
10859   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10860 
10861   ins_pipe(ialu_reg_imm);
10862 %}
10863 
10864 // Long Addition
10865 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10866 
10867   match(Set dst (AddL src1 src2));
10868 
10869   ins_cost(INSN_COST);
10870   format %{ "add  $dst, $src1, $src2" %}
10871 
10872   ins_encode %{
10873     __ add(as_Register($dst$$reg),
10874            as_Register($src1$$reg),
10875            as_Register($src2$$reg));
10876   %}
10877 
10878   ins_pipe(ialu_reg_reg);
10879 %}
10880 
10881 // No constant pool entries requiredLong Immediate Addition.
10882 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10883   match(Set dst (AddL src1 src2));
10884 
10885   ins_cost(INSN_COST);
10886   format %{ "add $dst, $src1, $src2" %}
10887 
10888   // use opcode to indicate that this is an add not a sub
10889   opcode(0x0);
10890 
10891   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10892 
10893   ins_pipe(ialu_reg_imm);
10894 %}
10895 
10896 // Integer Subtraction
10897 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10898   match(Set dst (SubI src1 src2));
10899 
10900   ins_cost(INSN_COST);
10901   format %{ "subw  $dst, $src1, $src2" %}
10902 
10903   ins_encode %{
10904     __ subw(as_Register($dst$$reg),
10905             as_Register($src1$$reg),
10906             as_Register($src2$$reg));
10907   %}
10908 
10909   ins_pipe(ialu_reg_reg);
10910 %}
10911 
10912 // Immediate Subtraction
10913 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10914   match(Set dst (SubI src1 src2));
10915 
10916   ins_cost(INSN_COST);
10917   format %{ "subw $dst, $src1, $src2" %}
10918 
10919   // use opcode to indicate that this is a sub not an add
10920   opcode(0x1);
10921 
10922   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10923 
10924   ins_pipe(ialu_reg_imm);
10925 %}
10926 
10927 // Long Subtraction
10928 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10929 
10930   match(Set dst (SubL src1 src2));
10931 
10932   ins_cost(INSN_COST);
10933   format %{ "sub  $dst, $src1, $src2" %}
10934 
10935   ins_encode %{
10936     __ sub(as_Register($dst$$reg),
10937            as_Register($src1$$reg),
10938            as_Register($src2$$reg));
10939   %}
10940 
10941   ins_pipe(ialu_reg_reg);
10942 %}
10943 
10944 // No constant pool entries requiredLong Immediate Subtraction.
10945 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10946   match(Set dst (SubL src1 src2));
10947 
10948   ins_cost(INSN_COST);
10949   format %{ "sub$dst, $src1, $src2" %}
10950 
10951   // use opcode to indicate that this is a sub not an add
10952   opcode(0x1);
10953 
10954   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10955 
10956   ins_pipe(ialu_reg_imm);
10957 %}
10958 
10959 // Integer Negation (special case for sub)
10960 
10961 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10962   match(Set dst (SubI zero src));
10963 
10964   ins_cost(INSN_COST);
10965   format %{ "negw $dst, $src\t# int" %}
10966 
10967   ins_encode %{
10968     __ negw(as_Register($dst$$reg),
10969             as_Register($src$$reg));
10970   %}
10971 
10972   ins_pipe(ialu_reg);
10973 %}
10974 
10975 // Long Negation
10976 
10977 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10978   match(Set dst (SubL zero src));
10979 
10980   ins_cost(INSN_COST);
10981   format %{ "neg $dst, $src\t# long" %}
10982 
10983   ins_encode %{
10984     __ neg(as_Register($dst$$reg),
10985            as_Register($src$$reg));
10986   %}
10987 
10988   ins_pipe(ialu_reg);
10989 %}
10990 
10991 // Integer Multiply
10992 
10993 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10994   match(Set dst (MulI src1 src2));
10995 
10996   ins_cost(INSN_COST * 3);
10997   format %{ "mulw  $dst, $src1, $src2" %}
10998 
10999   ins_encode %{
11000     __ mulw(as_Register($dst$$reg),
11001             as_Register($src1$$reg),
11002             as_Register($src2$$reg));
11003   %}
11004 
11005   ins_pipe(imul_reg_reg);
11006 %}
11007 
11008 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11009   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
11010 
11011   ins_cost(INSN_COST * 3);
11012   format %{ "smull  $dst, $src1, $src2" %}
11013 
11014   ins_encode %{
11015     __ smull(as_Register($dst$$reg),
11016              as_Register($src1$$reg),
11017              as_Register($src2$$reg));
11018   %}
11019 
11020   ins_pipe(imul_reg_reg);
11021 %}
11022 
11023 // Long Multiply
11024 
11025 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11026   match(Set dst (MulL src1 src2));
11027 
11028   ins_cost(INSN_COST * 5);
11029   format %{ "mul  $dst, $src1, $src2" %}
11030 
11031   ins_encode %{
11032     __ mul(as_Register($dst$$reg),
11033            as_Register($src1$$reg),
11034            as_Register($src2$$reg));
11035   %}
11036 
11037   ins_pipe(lmul_reg_reg);
11038 %}
11039 
11040 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11041 %{
11042   match(Set dst (MulHiL src1 src2));
11043 
11044   ins_cost(INSN_COST * 7);
11045   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11046 
11047   ins_encode %{
11048     __ smulh(as_Register($dst$$reg),
11049              as_Register($src1$$reg),
11050              as_Register($src2$$reg));
11051   %}
11052 
11053   ins_pipe(lmul_reg_reg);
11054 %}
11055 
11056 // Combined Integer Multiply & Add/Sub
11057 
11058 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11059   match(Set dst (AddI src3 (MulI src1 src2)));
11060 
11061   ins_cost(INSN_COST * 3);
11062   format %{ "madd  $dst, $src1, $src2, $src3" %}
11063 
11064   ins_encode %{
11065     __ maddw(as_Register($dst$$reg),
11066              as_Register($src1$$reg),
11067              as_Register($src2$$reg),
11068              as_Register($src3$$reg));
11069   %}
11070 
11071   ins_pipe(imac_reg_reg);
11072 %}
11073 
11074 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11075   match(Set dst (SubI src3 (MulI src1 src2)));
11076 
11077   ins_cost(INSN_COST * 3);
11078   format %{ "msub  $dst, $src1, $src2, $src3" %}
11079 
11080   ins_encode %{
11081     __ msubw(as_Register($dst$$reg),
11082              as_Register($src1$$reg),
11083              as_Register($src2$$reg),
11084              as_Register($src3$$reg));
11085   %}
11086 
11087   ins_pipe(imac_reg_reg);
11088 %}
11089 
11090 // Combined Long Multiply & Add/Sub
11091 
11092 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11093   match(Set dst (AddL src3 (MulL src1 src2)));
11094 
11095   ins_cost(INSN_COST * 5);
11096   format %{ "madd  $dst, $src1, $src2, $src3" %}
11097 
11098   ins_encode %{
11099     __ madd(as_Register($dst$$reg),
11100             as_Register($src1$$reg),
11101             as_Register($src2$$reg),
11102             as_Register($src3$$reg));
11103   %}
11104 
11105   ins_pipe(lmac_reg_reg);
11106 %}
11107 
11108 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11109   match(Set dst (SubL src3 (MulL src1 src2)));
11110 
11111   ins_cost(INSN_COST * 5);
11112   format %{ "msub  $dst, $src1, $src2, $src3" %}
11113 
11114   ins_encode %{
11115     __ msub(as_Register($dst$$reg),
11116             as_Register($src1$$reg),
11117             as_Register($src2$$reg),
11118             as_Register($src3$$reg));
11119   %}
11120 
11121   ins_pipe(lmac_reg_reg);
11122 %}
11123 
11124 // Integer Divide
11125 
11126 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11127   match(Set dst (DivI src1 src2));
11128 
11129   ins_cost(INSN_COST * 19);
11130   format %{ "sdivw  $dst, $src1, $src2" %}
11131 
11132   ins_encode(aarch64_enc_divw(dst, src1, src2));
11133   ins_pipe(idiv_reg_reg);
11134 %}
11135 
11136 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11137   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11138   ins_cost(INSN_COST);
11139   format %{ "lsrw $dst, $src1, $div1" %}
11140   ins_encode %{
11141     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11142   %}
11143   ins_pipe(ialu_reg_shift);
11144 %}
11145 
11146 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11147   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11148   ins_cost(INSN_COST);
11149   format %{ "addw $dst, $src, LSR $div1" %}
11150 
11151   ins_encode %{
11152     __ addw(as_Register($dst$$reg),
11153               as_Register($src$$reg),
11154               as_Register($src$$reg),
11155               Assembler::LSR, 31);
11156   %}
11157   ins_pipe(ialu_reg);
11158 %}
11159 
11160 // Long Divide
11161 
11162 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11163   match(Set dst (DivL src1 src2));
11164 
11165   ins_cost(INSN_COST * 35);
11166   format %{ "sdiv   $dst, $src1, $src2" %}
11167 
11168   ins_encode(aarch64_enc_div(dst, src1, src2));
11169   ins_pipe(ldiv_reg_reg);
11170 %}
11171 
11172 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11173   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11174   ins_cost(INSN_COST);
11175   format %{ "lsr $dst, $src1, $div1" %}
11176   ins_encode %{
11177     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11178   %}
11179   ins_pipe(ialu_reg_shift);
11180 %}
11181 
11182 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11183   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11184   ins_cost(INSN_COST);
11185   format %{ "add $dst, $src, $div1" %}
11186 
11187   ins_encode %{
11188     __ add(as_Register($dst$$reg),
11189               as_Register($src$$reg),
11190               as_Register($src$$reg),
11191               Assembler::LSR, 63);
11192   %}
11193   ins_pipe(ialu_reg);
11194 %}
11195 
11196 // Integer Remainder
11197 
11198 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11199   match(Set dst (ModI src1 src2));
11200 
11201   ins_cost(INSN_COST * 22);
11202   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11203             "msubw($dst, rscratch1, $src2, $src1" %}
11204 
11205   ins_encode(aarch64_enc_modw(dst, src1, src2));
11206   ins_pipe(idiv_reg_reg);
11207 %}
11208 
11209 // Long Remainder
11210 
11211 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11212   match(Set dst (ModL src1 src2));
11213 
11214   ins_cost(INSN_COST * 38);
11215   format %{ "sdiv   rscratch1, $src1, $src2\n"
11216             "msub($dst, rscratch1, $src2, $src1" %}
11217 
11218   ins_encode(aarch64_enc_mod(dst, src1, src2));
11219   ins_pipe(ldiv_reg_reg);
11220 %}
11221 
11222 // Integer Shifts
11223 
11224 // Shift Left Register
11225 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11226   match(Set dst (LShiftI src1 src2));
11227 
11228   ins_cost(INSN_COST * 2);
11229   format %{ "lslvw  $dst, $src1, $src2" %}
11230 
11231   ins_encode %{
11232     __ lslvw(as_Register($dst$$reg),
11233              as_Register($src1$$reg),
11234              as_Register($src2$$reg));
11235   %}
11236 
11237   ins_pipe(ialu_reg_reg_vshift);
11238 %}
11239 
11240 // Shift Left Immediate
11241 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11242   match(Set dst (LShiftI src1 src2));
11243 
11244   ins_cost(INSN_COST);
11245   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11246 
11247   ins_encode %{
11248     __ lslw(as_Register($dst$$reg),
11249             as_Register($src1$$reg),
11250             $src2$$constant & 0x1f);
11251   %}
11252 
11253   ins_pipe(ialu_reg_shift);
11254 %}
11255 
11256 // Shift Right Logical Register
11257 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11258   match(Set dst (URShiftI src1 src2));
11259 
11260   ins_cost(INSN_COST * 2);
11261   format %{ "lsrvw  $dst, $src1, $src2" %}
11262 
11263   ins_encode %{
11264     __ lsrvw(as_Register($dst$$reg),
11265              as_Register($src1$$reg),
11266              as_Register($src2$$reg));
11267   %}
11268 
11269   ins_pipe(ialu_reg_reg_vshift);
11270 %}
11271 
11272 // Shift Right Logical Immediate
11273 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11274   match(Set dst (URShiftI src1 src2));
11275 
11276   ins_cost(INSN_COST);
11277   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11278 
11279   ins_encode %{
11280     __ lsrw(as_Register($dst$$reg),
11281             as_Register($src1$$reg),
11282             $src2$$constant & 0x1f);
11283   %}
11284 
11285   ins_pipe(ialu_reg_shift);
11286 %}
11287 
11288 // Shift Right Arithmetic Register
11289 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11290   match(Set dst (RShiftI src1 src2));
11291 
11292   ins_cost(INSN_COST * 2);
11293   format %{ "asrvw  $dst, $src1, $src2" %}
11294 
11295   ins_encode %{
11296     __ asrvw(as_Register($dst$$reg),
11297              as_Register($src1$$reg),
11298              as_Register($src2$$reg));
11299   %}
11300 
11301   ins_pipe(ialu_reg_reg_vshift);
11302 %}
11303 
11304 // Shift Right Arithmetic Immediate
11305 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11306   match(Set dst (RShiftI src1 src2));
11307 
11308   ins_cost(INSN_COST);
11309   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11310 
11311   ins_encode %{
11312     __ asrw(as_Register($dst$$reg),
11313             as_Register($src1$$reg),
11314             $src2$$constant & 0x1f);
11315   %}
11316 
11317   ins_pipe(ialu_reg_shift);
11318 %}
11319 
11320 // Combined Int Mask and Right Shift (using UBFM)
11321 // TODO
11322 
11323 // Long Shifts
11324 
11325 // Shift Left Register
11326 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11327   match(Set dst (LShiftL src1 src2));
11328 
11329   ins_cost(INSN_COST * 2);
11330   format %{ "lslv  $dst, $src1, $src2" %}
11331 
11332   ins_encode %{
11333     __ lslv(as_Register($dst$$reg),
11334             as_Register($src1$$reg),
11335             as_Register($src2$$reg));
11336   %}
11337 
11338   ins_pipe(ialu_reg_reg_vshift);
11339 %}
11340 
11341 // Shift Left Immediate
11342 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11343   match(Set dst (LShiftL src1 src2));
11344 
11345   ins_cost(INSN_COST);
11346   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11347 
11348   ins_encode %{
11349     __ lsl(as_Register($dst$$reg),
11350             as_Register($src1$$reg),
11351             $src2$$constant & 0x3f);
11352   %}
11353 
11354   ins_pipe(ialu_reg_shift);
11355 %}
11356 
11357 // Shift Right Logical Register
11358 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11359   match(Set dst (URShiftL src1 src2));
11360 
11361   ins_cost(INSN_COST * 2);
11362   format %{ "lsrv  $dst, $src1, $src2" %}
11363 
11364   ins_encode %{
11365     __ lsrv(as_Register($dst$$reg),
11366             as_Register($src1$$reg),
11367             as_Register($src2$$reg));
11368   %}
11369 
11370   ins_pipe(ialu_reg_reg_vshift);
11371 %}
11372 
11373 // Shift Right Logical Immediate
11374 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11375   match(Set dst (URShiftL src1 src2));
11376 
11377   ins_cost(INSN_COST);
11378   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11379 
11380   ins_encode %{
11381     __ lsr(as_Register($dst$$reg),
11382            as_Register($src1$$reg),
11383            $src2$$constant & 0x3f);
11384   %}
11385 
11386   ins_pipe(ialu_reg_shift);
11387 %}
11388 
11389 // A special-case pattern for card table stores.
11390 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11391   match(Set dst (URShiftL (CastP2X src1) src2));
11392 
11393   ins_cost(INSN_COST);
11394   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11395 
11396   ins_encode %{
11397     __ lsr(as_Register($dst$$reg),
11398            as_Register($src1$$reg),
11399            $src2$$constant & 0x3f);
11400   %}
11401 
11402   ins_pipe(ialu_reg_shift);
11403 %}
11404 
11405 // Shift Right Arithmetic Register
11406 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11407   match(Set dst (RShiftL src1 src2));
11408 
11409   ins_cost(INSN_COST * 2);
11410   format %{ "asrv  $dst, $src1, $src2" %}
11411 
11412   ins_encode %{
11413     __ asrv(as_Register($dst$$reg),
11414             as_Register($src1$$reg),
11415             as_Register($src2$$reg));
11416   %}
11417 
11418   ins_pipe(ialu_reg_reg_vshift);
11419 %}
11420 
11421 // Shift Right Arithmetic Immediate
11422 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11423   match(Set dst (RShiftL src1 src2));
11424 
11425   ins_cost(INSN_COST);
11426   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11427 
11428   ins_encode %{
11429     __ asr(as_Register($dst$$reg),
11430            as_Register($src1$$reg),
11431            $src2$$constant & 0x3f);
11432   %}
11433 
11434   ins_pipe(ialu_reg_shift);
11435 %}
11436 
11437 // BEGIN This section of the file is automatically generated. Do not edit --------------
11438 
11439 instruct regL_not_reg(iRegLNoSp dst,
11440                          iRegL src1, immL_M1 m1,
11441                          rFlagsReg cr) %{
11442   match(Set dst (XorL src1 m1));
11443   ins_cost(INSN_COST);
11444   format %{ "eon  $dst, $src1, zr" %}
11445 
11446   ins_encode %{
11447     __ eon(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               zr,
11450               Assembler::LSL, 0);
11451   %}
11452 
11453   ins_pipe(ialu_reg);
11454 %}
11455 instruct regI_not_reg(iRegINoSp dst,
11456                          iRegIorL2I src1, immI_M1 m1,
11457                          rFlagsReg cr) %{
11458   match(Set dst (XorI src1 m1));
11459   ins_cost(INSN_COST);
11460   format %{ "eonw  $dst, $src1, zr" %}
11461 
11462   ins_encode %{
11463     __ eonw(as_Register($dst$$reg),
11464               as_Register($src1$$reg),
11465               zr,
11466               Assembler::LSL, 0);
11467   %}
11468 
11469   ins_pipe(ialu_reg);
11470 %}
11471 
11472 instruct AndI_reg_not_reg(iRegINoSp dst,
11473                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11474                          rFlagsReg cr) %{
11475   match(Set dst (AndI src1 (XorI src2 m1)));
11476   ins_cost(INSN_COST);
11477   format %{ "bicw  $dst, $src1, $src2" %}
11478 
11479   ins_encode %{
11480     __ bicw(as_Register($dst$$reg),
11481               as_Register($src1$$reg),
11482               as_Register($src2$$reg),
11483               Assembler::LSL, 0);
11484   %}
11485 
11486   ins_pipe(ialu_reg_reg);
11487 %}
11488 
11489 instruct AndL_reg_not_reg(iRegLNoSp dst,
11490                          iRegL src1, iRegL src2, immL_M1 m1,
11491                          rFlagsReg cr) %{
11492   match(Set dst (AndL src1 (XorL src2 m1)));
11493   ins_cost(INSN_COST);
11494   format %{ "bic  $dst, $src1, $src2" %}
11495 
11496   ins_encode %{
11497     __ bic(as_Register($dst$$reg),
11498               as_Register($src1$$reg),
11499               as_Register($src2$$reg),
11500               Assembler::LSL, 0);
11501   %}
11502 
11503   ins_pipe(ialu_reg_reg);
11504 %}
11505 
11506 instruct OrI_reg_not_reg(iRegINoSp dst,
11507                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11508                          rFlagsReg cr) %{
11509   match(Set dst (OrI src1 (XorI src2 m1)));
11510   ins_cost(INSN_COST);
11511   format %{ "ornw  $dst, $src1, $src2" %}
11512 
11513   ins_encode %{
11514     __ ornw(as_Register($dst$$reg),
11515               as_Register($src1$$reg),
11516               as_Register($src2$$reg),
11517               Assembler::LSL, 0);
11518   %}
11519 
11520   ins_pipe(ialu_reg_reg);
11521 %}
11522 
11523 instruct OrL_reg_not_reg(iRegLNoSp dst,
11524                          iRegL src1, iRegL src2, immL_M1 m1,
11525                          rFlagsReg cr) %{
11526   match(Set dst (OrL src1 (XorL src2 m1)));
11527   ins_cost(INSN_COST);
11528   format %{ "orn  $dst, $src1, $src2" %}
11529 
11530   ins_encode %{
11531     __ orn(as_Register($dst$$reg),
11532               as_Register($src1$$reg),
11533               as_Register($src2$$reg),
11534               Assembler::LSL, 0);
11535   %}
11536 
11537   ins_pipe(ialu_reg_reg);
11538 %}
11539 
11540 instruct XorI_reg_not_reg(iRegINoSp dst,
11541                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11542                          rFlagsReg cr) %{
11543   match(Set dst (XorI m1 (XorI src2 src1)));
11544   ins_cost(INSN_COST);
11545   format %{ "eonw  $dst, $src1, $src2" %}
11546 
11547   ins_encode %{
11548     __ eonw(as_Register($dst$$reg),
11549               as_Register($src1$$reg),
11550               as_Register($src2$$reg),
11551               Assembler::LSL, 0);
11552   %}
11553 
11554   ins_pipe(ialu_reg_reg);
11555 %}
11556 
11557 instruct XorL_reg_not_reg(iRegLNoSp dst,
11558                          iRegL src1, iRegL src2, immL_M1 m1,
11559                          rFlagsReg cr) %{
11560   match(Set dst (XorL m1 (XorL src2 src1)));
11561   ins_cost(INSN_COST);
11562   format %{ "eon  $dst, $src1, $src2" %}
11563 
11564   ins_encode %{
11565     __ eon(as_Register($dst$$reg),
11566               as_Register($src1$$reg),
11567               as_Register($src2$$reg),
11568               Assembler::LSL, 0);
11569   %}
11570 
11571   ins_pipe(ialu_reg_reg);
11572 %}
11573 
11574 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11575                          iRegIorL2I src1, iRegIorL2I src2,
11576                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11577   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11578   ins_cost(1.9 * INSN_COST);
11579   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11580 
11581   ins_encode %{
11582     __ bicw(as_Register($dst$$reg),
11583               as_Register($src1$$reg),
11584               as_Register($src2$$reg),
11585               Assembler::LSR,
11586               $src3$$constant & 0x1f);
11587   %}
11588 
11589   ins_pipe(ialu_reg_reg_shift);
11590 %}
11591 
11592 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11593                          iRegL src1, iRegL src2,
11594                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11595   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11596   ins_cost(1.9 * INSN_COST);
11597   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11598 
11599   ins_encode %{
11600     __ bic(as_Register($dst$$reg),
11601               as_Register($src1$$reg),
11602               as_Register($src2$$reg),
11603               Assembler::LSR,
11604               $src3$$constant & 0x3f);
11605   %}
11606 
11607   ins_pipe(ialu_reg_reg_shift);
11608 %}
11609 
11610 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11611                          iRegIorL2I src1, iRegIorL2I src2,
11612                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11613   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11614   ins_cost(1.9 * INSN_COST);
11615   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11616 
11617   ins_encode %{
11618     __ bicw(as_Register($dst$$reg),
11619               as_Register($src1$$reg),
11620               as_Register($src2$$reg),
11621               Assembler::ASR,
11622               $src3$$constant & 0x1f);
11623   %}
11624 
11625   ins_pipe(ialu_reg_reg_shift);
11626 %}
11627 
11628 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11629                          iRegL src1, iRegL src2,
11630                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11631   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11632   ins_cost(1.9 * INSN_COST);
11633   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11634 
11635   ins_encode %{
11636     __ bic(as_Register($dst$$reg),
11637               as_Register($src1$$reg),
11638               as_Register($src2$$reg),
11639               Assembler::ASR,
11640               $src3$$constant & 0x3f);
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_shift);
11644 %}
11645 
11646 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11647                          iRegIorL2I src1, iRegIorL2I src2,
11648                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11649   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11650   ins_cost(1.9 * INSN_COST);
11651   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11652 
11653   ins_encode %{
11654     __ bicw(as_Register($dst$$reg),
11655               as_Register($src1$$reg),
11656               as_Register($src2$$reg),
11657               Assembler::LSL,
11658               $src3$$constant & 0x1f);
11659   %}
11660 
11661   ins_pipe(ialu_reg_reg_shift);
11662 %}
11663 
11664 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11665                          iRegL src1, iRegL src2,
11666                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11667   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11668   ins_cost(1.9 * INSN_COST);
11669   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11670 
11671   ins_encode %{
11672     __ bic(as_Register($dst$$reg),
11673               as_Register($src1$$reg),
11674               as_Register($src2$$reg),
11675               Assembler::LSL,
11676               $src3$$constant & 0x3f);
11677   %}
11678 
11679   ins_pipe(ialu_reg_reg_shift);
11680 %}
11681 
11682 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11683                          iRegIorL2I src1, iRegIorL2I src2,
11684                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11685   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11686   ins_cost(1.9 * INSN_COST);
11687   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11688 
11689   ins_encode %{
11690     __ eonw(as_Register($dst$$reg),
11691               as_Register($src1$$reg),
11692               as_Register($src2$$reg),
11693               Assembler::LSR,
11694               $src3$$constant & 0x1f);
11695   %}
11696 
11697   ins_pipe(ialu_reg_reg_shift);
11698 %}
11699 
11700 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11701                          iRegL src1, iRegL src2,
11702                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11703   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11704   ins_cost(1.9 * INSN_COST);
11705   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11706 
11707   ins_encode %{
11708     __ eon(as_Register($dst$$reg),
11709               as_Register($src1$$reg),
11710               as_Register($src2$$reg),
11711               Assembler::LSR,
11712               $src3$$constant & 0x3f);
11713   %}
11714 
11715   ins_pipe(ialu_reg_reg_shift);
11716 %}
11717 
11718 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11719                          iRegIorL2I src1, iRegIorL2I src2,
11720                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11721   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11722   ins_cost(1.9 * INSN_COST);
11723   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11724 
11725   ins_encode %{
11726     __ eonw(as_Register($dst$$reg),
11727               as_Register($src1$$reg),
11728               as_Register($src2$$reg),
11729               Assembler::ASR,
11730               $src3$$constant & 0x1f);
11731   %}
11732 
11733   ins_pipe(ialu_reg_reg_shift);
11734 %}
11735 
11736 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11737                          iRegL src1, iRegL src2,
11738                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11739   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11740   ins_cost(1.9 * INSN_COST);
11741   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11742 
11743   ins_encode %{
11744     __ eon(as_Register($dst$$reg),
11745               as_Register($src1$$reg),
11746               as_Register($src2$$reg),
11747               Assembler::ASR,
11748               $src3$$constant & 0x3f);
11749   %}
11750 
11751   ins_pipe(ialu_reg_reg_shift);
11752 %}
11753 
11754 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11755                          iRegIorL2I src1, iRegIorL2I src2,
11756                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11757   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11758   ins_cost(1.9 * INSN_COST);
11759   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11760 
11761   ins_encode %{
11762     __ eonw(as_Register($dst$$reg),
11763               as_Register($src1$$reg),
11764               as_Register($src2$$reg),
11765               Assembler::LSL,
11766               $src3$$constant & 0x1f);
11767   %}
11768 
11769   ins_pipe(ialu_reg_reg_shift);
11770 %}
11771 
11772 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11773                          iRegL src1, iRegL src2,
11774                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11775   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11776   ins_cost(1.9 * INSN_COST);
11777   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11778 
11779   ins_encode %{
11780     __ eon(as_Register($dst$$reg),
11781               as_Register($src1$$reg),
11782               as_Register($src2$$reg),
11783               Assembler::LSL,
11784               $src3$$constant & 0x3f);
11785   %}
11786 
11787   ins_pipe(ialu_reg_reg_shift);
11788 %}
11789 
11790 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11791                          iRegIorL2I src1, iRegIorL2I src2,
11792                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11793   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11796 
11797   ins_encode %{
11798     __ ornw(as_Register($dst$$reg),
11799               as_Register($src1$$reg),
11800               as_Register($src2$$reg),
11801               Assembler::LSR,
11802               $src3$$constant & 0x1f);
11803   %}
11804 
11805   ins_pipe(ialu_reg_reg_shift);
11806 %}
11807 
11808 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11809                          iRegL src1, iRegL src2,
11810                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11811   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11812   ins_cost(1.9 * INSN_COST);
11813   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11814 
11815   ins_encode %{
11816     __ orn(as_Register($dst$$reg),
11817               as_Register($src1$$reg),
11818               as_Register($src2$$reg),
11819               Assembler::LSR,
11820               $src3$$constant & 0x3f);
11821   %}
11822 
11823   ins_pipe(ialu_reg_reg_shift);
11824 %}
11825 
11826 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11827                          iRegIorL2I src1, iRegIorL2I src2,
11828                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11829   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11830   ins_cost(1.9 * INSN_COST);
11831   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11832 
11833   ins_encode %{
11834     __ ornw(as_Register($dst$$reg),
11835               as_Register($src1$$reg),
11836               as_Register($src2$$reg),
11837               Assembler::ASR,
11838               $src3$$constant & 0x1f);
11839   %}
11840 
11841   ins_pipe(ialu_reg_reg_shift);
11842 %}
11843 
11844 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11845                          iRegL src1, iRegL src2,
11846                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11847   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11848   ins_cost(1.9 * INSN_COST);
11849   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11850 
11851   ins_encode %{
11852     __ orn(as_Register($dst$$reg),
11853               as_Register($src1$$reg),
11854               as_Register($src2$$reg),
11855               Assembler::ASR,
11856               $src3$$constant & 0x3f);
11857   %}
11858 
11859   ins_pipe(ialu_reg_reg_shift);
11860 %}
11861 
11862 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11863                          iRegIorL2I src1, iRegIorL2I src2,
11864                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11865   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11866   ins_cost(1.9 * INSN_COST);
11867   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11868 
11869   ins_encode %{
11870     __ ornw(as_Register($dst$$reg),
11871               as_Register($src1$$reg),
11872               as_Register($src2$$reg),
11873               Assembler::LSL,
11874               $src3$$constant & 0x1f);
11875   %}
11876 
11877   ins_pipe(ialu_reg_reg_shift);
11878 %}
11879 
11880 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11881                          iRegL src1, iRegL src2,
11882                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11883   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11884   ins_cost(1.9 * INSN_COST);
11885   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11886 
11887   ins_encode %{
11888     __ orn(as_Register($dst$$reg),
11889               as_Register($src1$$reg),
11890               as_Register($src2$$reg),
11891               Assembler::LSL,
11892               $src3$$constant & 0x3f);
11893   %}
11894 
11895   ins_pipe(ialu_reg_reg_shift);
11896 %}
11897 
11898 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11899                          iRegIorL2I src1, iRegIorL2I src2,
11900                          immI src3, rFlagsReg cr) %{
11901   match(Set dst (AndI src1 (URShiftI src2 src3)));
11902 
11903   ins_cost(1.9 * INSN_COST);
11904   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11905 
11906   ins_encode %{
11907     __ andw(as_Register($dst$$reg),
11908               as_Register($src1$$reg),
11909               as_Register($src2$$reg),
11910               Assembler::LSR,
11911               $src3$$constant & 0x1f);
11912   %}
11913 
11914   ins_pipe(ialu_reg_reg_shift);
11915 %}
11916 
11917 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11918                          iRegL src1, iRegL src2,
11919                          immI src3, rFlagsReg cr) %{
11920   match(Set dst (AndL src1 (URShiftL src2 src3)));
11921 
11922   ins_cost(1.9 * INSN_COST);
11923   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11924 
11925   ins_encode %{
11926     __ andr(as_Register($dst$$reg),
11927               as_Register($src1$$reg),
11928               as_Register($src2$$reg),
11929               Assembler::LSR,
11930               $src3$$constant & 0x3f);
11931   %}
11932 
11933   ins_pipe(ialu_reg_reg_shift);
11934 %}
11935 
11936 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11937                          iRegIorL2I src1, iRegIorL2I src2,
11938                          immI src3, rFlagsReg cr) %{
11939   match(Set dst (AndI src1 (RShiftI src2 src3)));
11940 
11941   ins_cost(1.9 * INSN_COST);
11942   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11943 
11944   ins_encode %{
11945     __ andw(as_Register($dst$$reg),
11946               as_Register($src1$$reg),
11947               as_Register($src2$$reg),
11948               Assembler::ASR,
11949               $src3$$constant & 0x1f);
11950   %}
11951 
11952   ins_pipe(ialu_reg_reg_shift);
11953 %}
11954 
11955 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11956                          iRegL src1, iRegL src2,
11957                          immI src3, rFlagsReg cr) %{
11958   match(Set dst (AndL src1 (RShiftL src2 src3)));
11959 
11960   ins_cost(1.9 * INSN_COST);
11961   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11962 
11963   ins_encode %{
11964     __ andr(as_Register($dst$$reg),
11965               as_Register($src1$$reg),
11966               as_Register($src2$$reg),
11967               Assembler::ASR,
11968               $src3$$constant & 0x3f);
11969   %}
11970 
11971   ins_pipe(ialu_reg_reg_shift);
11972 %}
11973 
11974 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11975                          iRegIorL2I src1, iRegIorL2I src2,
11976                          immI src3, rFlagsReg cr) %{
11977   match(Set dst (AndI src1 (LShiftI src2 src3)));
11978 
11979   ins_cost(1.9 * INSN_COST);
11980   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11981 
11982   ins_encode %{
11983     __ andw(as_Register($dst$$reg),
11984               as_Register($src1$$reg),
11985               as_Register($src2$$reg),
11986               Assembler::LSL,
11987               $src3$$constant & 0x1f);
11988   %}
11989 
11990   ins_pipe(ialu_reg_reg_shift);
11991 %}
11992 
11993 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11994                          iRegL src1, iRegL src2,
11995                          immI src3, rFlagsReg cr) %{
11996   match(Set dst (AndL src1 (LShiftL src2 src3)));
11997 
11998   ins_cost(1.9 * INSN_COST);
11999   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
12000 
12001   ins_encode %{
12002     __ andr(as_Register($dst$$reg),
12003               as_Register($src1$$reg),
12004               as_Register($src2$$reg),
12005               Assembler::LSL,
12006               $src3$$constant & 0x3f);
12007   %}
12008 
12009   ins_pipe(ialu_reg_reg_shift);
12010 %}
12011 
12012 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12013                          iRegIorL2I src1, iRegIorL2I src2,
12014                          immI src3, rFlagsReg cr) %{
12015   match(Set dst (XorI src1 (URShiftI src2 src3)));
12016 
12017   ins_cost(1.9 * INSN_COST);
12018   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12019 
12020   ins_encode %{
12021     __ eorw(as_Register($dst$$reg),
12022               as_Register($src1$$reg),
12023               as_Register($src2$$reg),
12024               Assembler::LSR,
12025               $src3$$constant & 0x1f);
12026   %}
12027 
12028   ins_pipe(ialu_reg_reg_shift);
12029 %}
12030 
12031 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12032                          iRegL src1, iRegL src2,
12033                          immI src3, rFlagsReg cr) %{
12034   match(Set dst (XorL src1 (URShiftL src2 src3)));
12035 
12036   ins_cost(1.9 * INSN_COST);
12037   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12038 
12039   ins_encode %{
12040     __ eor(as_Register($dst$$reg),
12041               as_Register($src1$$reg),
12042               as_Register($src2$$reg),
12043               Assembler::LSR,
12044               $src3$$constant & 0x3f);
12045   %}
12046 
12047   ins_pipe(ialu_reg_reg_shift);
12048 %}
12049 
12050 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12051                          iRegIorL2I src1, iRegIorL2I src2,
12052                          immI src3, rFlagsReg cr) %{
12053   match(Set dst (XorI src1 (RShiftI src2 src3)));
12054 
12055   ins_cost(1.9 * INSN_COST);
12056   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12057 
12058   ins_encode %{
12059     __ eorw(as_Register($dst$$reg),
12060               as_Register($src1$$reg),
12061               as_Register($src2$$reg),
12062               Assembler::ASR,
12063               $src3$$constant & 0x1f);
12064   %}
12065 
12066   ins_pipe(ialu_reg_reg_shift);
12067 %}
12068 
12069 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12070                          iRegL src1, iRegL src2,
12071                          immI src3, rFlagsReg cr) %{
12072   match(Set dst (XorL src1 (RShiftL src2 src3)));
12073 
12074   ins_cost(1.9 * INSN_COST);
12075   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12076 
12077   ins_encode %{
12078     __ eor(as_Register($dst$$reg),
12079               as_Register($src1$$reg),
12080               as_Register($src2$$reg),
12081               Assembler::ASR,
12082               $src3$$constant & 0x3f);
12083   %}
12084 
12085   ins_pipe(ialu_reg_reg_shift);
12086 %}
12087 
12088 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12089                          iRegIorL2I src1, iRegIorL2I src2,
12090                          immI src3, rFlagsReg cr) %{
12091   match(Set dst (XorI src1 (LShiftI src2 src3)));
12092 
12093   ins_cost(1.9 * INSN_COST);
12094   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12095 
12096   ins_encode %{
12097     __ eorw(as_Register($dst$$reg),
12098               as_Register($src1$$reg),
12099               as_Register($src2$$reg),
12100               Assembler::LSL,
12101               $src3$$constant & 0x1f);
12102   %}
12103 
12104   ins_pipe(ialu_reg_reg_shift);
12105 %}
12106 
12107 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12108                          iRegL src1, iRegL src2,
12109                          immI src3, rFlagsReg cr) %{
12110   match(Set dst (XorL src1 (LShiftL src2 src3)));
12111 
12112   ins_cost(1.9 * INSN_COST);
12113   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12114 
12115   ins_encode %{
12116     __ eor(as_Register($dst$$reg),
12117               as_Register($src1$$reg),
12118               as_Register($src2$$reg),
12119               Assembler::LSL,
12120               $src3$$constant & 0x3f);
12121   %}
12122 
12123   ins_pipe(ialu_reg_reg_shift);
12124 %}
12125 
12126 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12127                          iRegIorL2I src1, iRegIorL2I src2,
12128                          immI src3, rFlagsReg cr) %{
12129   match(Set dst (OrI src1 (URShiftI src2 src3)));
12130 
12131   ins_cost(1.9 * INSN_COST);
12132   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12133 
12134   ins_encode %{
12135     __ orrw(as_Register($dst$$reg),
12136               as_Register($src1$$reg),
12137               as_Register($src2$$reg),
12138               Assembler::LSR,
12139               $src3$$constant & 0x1f);
12140   %}
12141 
12142   ins_pipe(ialu_reg_reg_shift);
12143 %}
12144 
12145 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12146                          iRegL src1, iRegL src2,
12147                          immI src3, rFlagsReg cr) %{
12148   match(Set dst (OrL src1 (URShiftL src2 src3)));
12149 
12150   ins_cost(1.9 * INSN_COST);
12151   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12152 
12153   ins_encode %{
12154     __ orr(as_Register($dst$$reg),
12155               as_Register($src1$$reg),
12156               as_Register($src2$$reg),
12157               Assembler::LSR,
12158               $src3$$constant & 0x3f);
12159   %}
12160 
12161   ins_pipe(ialu_reg_reg_shift);
12162 %}
12163 
12164 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12165                          iRegIorL2I src1, iRegIorL2I src2,
12166                          immI src3, rFlagsReg cr) %{
12167   match(Set dst (OrI src1 (RShiftI src2 src3)));
12168 
12169   ins_cost(1.9 * INSN_COST);
12170   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12171 
12172   ins_encode %{
12173     __ orrw(as_Register($dst$$reg),
12174               as_Register($src1$$reg),
12175               as_Register($src2$$reg),
12176               Assembler::ASR,
12177               $src3$$constant & 0x1f);
12178   %}
12179 
12180   ins_pipe(ialu_reg_reg_shift);
12181 %}
12182 
12183 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12184                          iRegL src1, iRegL src2,
12185                          immI src3, rFlagsReg cr) %{
12186   match(Set dst (OrL src1 (RShiftL src2 src3)));
12187 
12188   ins_cost(1.9 * INSN_COST);
12189   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12190 
12191   ins_encode %{
12192     __ orr(as_Register($dst$$reg),
12193               as_Register($src1$$reg),
12194               as_Register($src2$$reg),
12195               Assembler::ASR,
12196               $src3$$constant & 0x3f);
12197   %}
12198 
12199   ins_pipe(ialu_reg_reg_shift);
12200 %}
12201 
12202 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12203                          iRegIorL2I src1, iRegIorL2I src2,
12204                          immI src3, rFlagsReg cr) %{
12205   match(Set dst (OrI src1 (LShiftI src2 src3)));
12206 
12207   ins_cost(1.9 * INSN_COST);
12208   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12209 
12210   ins_encode %{
12211     __ orrw(as_Register($dst$$reg),
12212               as_Register($src1$$reg),
12213               as_Register($src2$$reg),
12214               Assembler::LSL,
12215               $src3$$constant & 0x1f);
12216   %}
12217 
12218   ins_pipe(ialu_reg_reg_shift);
12219 %}
12220 
12221 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12222                          iRegL src1, iRegL src2,
12223                          immI src3, rFlagsReg cr) %{
12224   match(Set dst (OrL src1 (LShiftL src2 src3)));
12225 
12226   ins_cost(1.9 * INSN_COST);
12227   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12228 
12229   ins_encode %{
12230     __ orr(as_Register($dst$$reg),
12231               as_Register($src1$$reg),
12232               as_Register($src2$$reg),
12233               Assembler::LSL,
12234               $src3$$constant & 0x3f);
12235   %}
12236 
12237   ins_pipe(ialu_reg_reg_shift);
12238 %}
12239 
12240 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12241                          iRegIorL2I src1, iRegIorL2I src2,
12242                          immI src3, rFlagsReg cr) %{
12243   match(Set dst (AddI src1 (URShiftI src2 src3)));
12244 
12245   ins_cost(1.9 * INSN_COST);
12246   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12247 
12248   ins_encode %{
12249     __ addw(as_Register($dst$$reg),
12250               as_Register($src1$$reg),
12251               as_Register($src2$$reg),
12252               Assembler::LSR,
12253               $src3$$constant & 0x1f);
12254   %}
12255 
12256   ins_pipe(ialu_reg_reg_shift);
12257 %}
12258 
12259 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12260                          iRegL src1, iRegL src2,
12261                          immI src3, rFlagsReg cr) %{
12262   match(Set dst (AddL src1 (URShiftL src2 src3)));
12263 
12264   ins_cost(1.9 * INSN_COST);
12265   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12266 
12267   ins_encode %{
12268     __ add(as_Register($dst$$reg),
12269               as_Register($src1$$reg),
12270               as_Register($src2$$reg),
12271               Assembler::LSR,
12272               $src3$$constant & 0x3f);
12273   %}
12274 
12275   ins_pipe(ialu_reg_reg_shift);
12276 %}
12277 
12278 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12279                          iRegIorL2I src1, iRegIorL2I src2,
12280                          immI src3, rFlagsReg cr) %{
12281   match(Set dst (AddI src1 (RShiftI src2 src3)));
12282 
12283   ins_cost(1.9 * INSN_COST);
12284   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12285 
12286   ins_encode %{
12287     __ addw(as_Register($dst$$reg),
12288               as_Register($src1$$reg),
12289               as_Register($src2$$reg),
12290               Assembler::ASR,
12291               $src3$$constant & 0x1f);
12292   %}
12293 
12294   ins_pipe(ialu_reg_reg_shift);
12295 %}
12296 
12297 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12298                          iRegL src1, iRegL src2,
12299                          immI src3, rFlagsReg cr) %{
12300   match(Set dst (AddL src1 (RShiftL src2 src3)));
12301 
12302   ins_cost(1.9 * INSN_COST);
12303   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12304 
12305   ins_encode %{
12306     __ add(as_Register($dst$$reg),
12307               as_Register($src1$$reg),
12308               as_Register($src2$$reg),
12309               Assembler::ASR,
12310               $src3$$constant & 0x3f);
12311   %}
12312 
12313   ins_pipe(ialu_reg_reg_shift);
12314 %}
12315 
12316 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12317                          iRegIorL2I src1, iRegIorL2I src2,
12318                          immI src3, rFlagsReg cr) %{
12319   match(Set dst (AddI src1 (LShiftI src2 src3)));
12320 
12321   ins_cost(1.9 * INSN_COST);
12322   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12323 
12324   ins_encode %{
12325     __ addw(as_Register($dst$$reg),
12326               as_Register($src1$$reg),
12327               as_Register($src2$$reg),
12328               Assembler::LSL,
12329               $src3$$constant & 0x1f);
12330   %}
12331 
12332   ins_pipe(ialu_reg_reg_shift);
12333 %}
12334 
12335 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12336                          iRegL src1, iRegL src2,
12337                          immI src3, rFlagsReg cr) %{
12338   match(Set dst (AddL src1 (LShiftL src2 src3)));
12339 
12340   ins_cost(1.9 * INSN_COST);
12341   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12342 
12343   ins_encode %{
12344     __ add(as_Register($dst$$reg),
12345               as_Register($src1$$reg),
12346               as_Register($src2$$reg),
12347               Assembler::LSL,
12348               $src3$$constant & 0x3f);
12349   %}
12350 
12351   ins_pipe(ialu_reg_reg_shift);
12352 %}
12353 
12354 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12355                          iRegIorL2I src1, iRegIorL2I src2,
12356                          immI src3, rFlagsReg cr) %{
12357   match(Set dst (SubI src1 (URShiftI src2 src3)));
12358 
12359   ins_cost(1.9 * INSN_COST);
12360   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12361 
12362   ins_encode %{
12363     __ subw(as_Register($dst$$reg),
12364               as_Register($src1$$reg),
12365               as_Register($src2$$reg),
12366               Assembler::LSR,
12367               $src3$$constant & 0x1f);
12368   %}
12369 
12370   ins_pipe(ialu_reg_reg_shift);
12371 %}
12372 
12373 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12374                          iRegL src1, iRegL src2,
12375                          immI src3, rFlagsReg cr) %{
12376   match(Set dst (SubL src1 (URShiftL src2 src3)));
12377 
12378   ins_cost(1.9 * INSN_COST);
12379   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12380 
12381   ins_encode %{
12382     __ sub(as_Register($dst$$reg),
12383               as_Register($src1$$reg),
12384               as_Register($src2$$reg),
12385               Assembler::LSR,
12386               $src3$$constant & 0x3f);
12387   %}
12388 
12389   ins_pipe(ialu_reg_reg_shift);
12390 %}
12391 
12392 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12393                          iRegIorL2I src1, iRegIorL2I src2,
12394                          immI src3, rFlagsReg cr) %{
12395   match(Set dst (SubI src1 (RShiftI src2 src3)));
12396 
12397   ins_cost(1.9 * INSN_COST);
12398   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12399 
12400   ins_encode %{
12401     __ subw(as_Register($dst$$reg),
12402               as_Register($src1$$reg),
12403               as_Register($src2$$reg),
12404               Assembler::ASR,
12405               $src3$$constant & 0x1f);
12406   %}
12407 
12408   ins_pipe(ialu_reg_reg_shift);
12409 %}
12410 
12411 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12412                          iRegL src1, iRegL src2,
12413                          immI src3, rFlagsReg cr) %{
12414   match(Set dst (SubL src1 (RShiftL src2 src3)));
12415 
12416   ins_cost(1.9 * INSN_COST);
12417   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12418 
12419   ins_encode %{
12420     __ sub(as_Register($dst$$reg),
12421               as_Register($src1$$reg),
12422               as_Register($src2$$reg),
12423               Assembler::ASR,
12424               $src3$$constant & 0x3f);
12425   %}
12426 
12427   ins_pipe(ialu_reg_reg_shift);
12428 %}
12429 
12430 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12431                          iRegIorL2I src1, iRegIorL2I src2,
12432                          immI src3, rFlagsReg cr) %{
12433   match(Set dst (SubI src1 (LShiftI src2 src3)));
12434 
12435   ins_cost(1.9 * INSN_COST);
12436   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12437 
12438   ins_encode %{
12439     __ subw(as_Register($dst$$reg),
12440               as_Register($src1$$reg),
12441               as_Register($src2$$reg),
12442               Assembler::LSL,
12443               $src3$$constant & 0x1f);
12444   %}
12445 
12446   ins_pipe(ialu_reg_reg_shift);
12447 %}
12448 
12449 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12450                          iRegL src1, iRegL src2,
12451                          immI src3, rFlagsReg cr) %{
12452   match(Set dst (SubL src1 (LShiftL src2 src3)));
12453 
12454   ins_cost(1.9 * INSN_COST);
12455   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12456 
12457   ins_encode %{
12458     __ sub(as_Register($dst$$reg),
12459               as_Register($src1$$reg),
12460               as_Register($src2$$reg),
12461               Assembler::LSL,
12462               $src3$$constant & 0x3f);
12463   %}
12464 
12465   ins_pipe(ialu_reg_reg_shift);
12466 %}
12467 
12468 
12469 
12470 // Shift Left followed by Shift Right.
12471 // This idiom is used by the compiler for the i2b bytecode etc.
12472 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12473 %{
12474   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12475   // Make sure we are not going to exceed what sbfm can do.
12476   predicate((unsigned int)n->in(2)->get_int() <= 63
12477             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12478 
12479   ins_cost(INSN_COST * 2);
12480   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12481   ins_encode %{
12482     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12483     int s = 63 - lshift;
12484     int r = (rshift - lshift) & 63;
12485     __ sbfm(as_Register($dst$$reg),
12486             as_Register($src$$reg),
12487             r, s);
12488   %}
12489 
12490   ins_pipe(ialu_reg_shift);
12491 %}
12492 
12493 // Shift Left followed by Shift Right.
12494 // This idiom is used by the compiler for the i2b bytecode etc.
12495 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12496 %{
12497   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12498   // Make sure we are not going to exceed what sbfmw can do.
12499   predicate((unsigned int)n->in(2)->get_int() <= 31
12500             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12501 
12502   ins_cost(INSN_COST * 2);
12503   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12504   ins_encode %{
12505     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12506     int s = 31 - lshift;
12507     int r = (rshift - lshift) & 31;
12508     __ sbfmw(as_Register($dst$$reg),
12509             as_Register($src$$reg),
12510             r, s);
12511   %}
12512 
12513   ins_pipe(ialu_reg_shift);
12514 %}
12515 
12516 // Shift Left followed by Shift Right.
12517 // This idiom is used by the compiler for the i2b bytecode etc.
12518 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12519 %{
12520   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12521   // Make sure we are not going to exceed what ubfm can do.
12522   predicate((unsigned int)n->in(2)->get_int() <= 63
12523             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12524 
12525   ins_cost(INSN_COST * 2);
12526   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12527   ins_encode %{
12528     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12529     int s = 63 - lshift;
12530     int r = (rshift - lshift) & 63;
12531     __ ubfm(as_Register($dst$$reg),
12532             as_Register($src$$reg),
12533             r, s);
12534   %}
12535 
12536   ins_pipe(ialu_reg_shift);
12537 %}
12538 
12539 // Shift Left followed by Shift Right.
12540 // This idiom is used by the compiler for the i2b bytecode etc.
12541 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12542 %{
12543   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12544   // Make sure we are not going to exceed what ubfmw can do.
12545   predicate((unsigned int)n->in(2)->get_int() <= 31
12546             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12547 
12548   ins_cost(INSN_COST * 2);
12549   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12550   ins_encode %{
12551     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12552     int s = 31 - lshift;
12553     int r = (rshift - lshift) & 31;
12554     __ ubfmw(as_Register($dst$$reg),
12555             as_Register($src$$reg),
12556             r, s);
12557   %}
12558 
12559   ins_pipe(ialu_reg_shift);
12560 %}
12561 // Bitfield extract with shift & mask
12562 
12563 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12564 %{
12565   match(Set dst (AndI (URShiftI src rshift) mask));
12566 
12567   ins_cost(INSN_COST);
12568   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12569   ins_encode %{
12570     int rshift = $rshift$$constant;
12571     long mask = $mask$$constant;
12572     int width = exact_log2(mask+1);
12573     __ ubfxw(as_Register($dst$$reg),
12574             as_Register($src$$reg), rshift, width);
12575   %}
12576   ins_pipe(ialu_reg_shift);
12577 %}
12578 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12579 %{
12580   match(Set dst (AndL (URShiftL src rshift) mask));
12581 
12582   ins_cost(INSN_COST);
12583   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12584   ins_encode %{
12585     int rshift = $rshift$$constant;
12586     long mask = $mask$$constant;
12587     int width = exact_log2(mask+1);
12588     __ ubfx(as_Register($dst$$reg),
12589             as_Register($src$$reg), rshift, width);
12590   %}
12591   ins_pipe(ialu_reg_shift);
12592 %}
12593 
12594 // We can use ubfx when extending an And with a mask when we know mask
12595 // is positive.  We know that because immI_bitmask guarantees it.
12596 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12597 %{
12598   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12599 
12600   ins_cost(INSN_COST * 2);
12601   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12602   ins_encode %{
12603     int rshift = $rshift$$constant;
12604     long mask = $mask$$constant;
12605     int width = exact_log2(mask+1);
12606     __ ubfx(as_Register($dst$$reg),
12607             as_Register($src$$reg), rshift, width);
12608   %}
12609   ins_pipe(ialu_reg_shift);
12610 %}
12611 
12612 // We can use ubfiz when masking by a positive number and then left shifting the result.
12613 // We know that the mask is positive because immI_bitmask guarantees it.
12614 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12615 %{
12616   match(Set dst (LShiftI (AndI src mask) lshift));
12617   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12618     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12619 
12620   ins_cost(INSN_COST);
12621   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12622   ins_encode %{
12623     int lshift = $lshift$$constant;
12624     long mask = $mask$$constant;
12625     int width = exact_log2(mask+1);
12626     __ ubfizw(as_Register($dst$$reg),
12627           as_Register($src$$reg), lshift, width);
12628   %}
12629   ins_pipe(ialu_reg_shift);
12630 %}
12631 // We can use ubfiz when masking by a positive number and then left shifting the result.
12632 // We know that the mask is positive because immL_bitmask guarantees it.
12633 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12634 %{
12635   match(Set dst (LShiftL (AndL src mask) lshift));
12636   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12637     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12638 
12639   ins_cost(INSN_COST);
12640   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12641   ins_encode %{
12642     int lshift = $lshift$$constant;
12643     long mask = $mask$$constant;
12644     int width = exact_log2(mask+1);
12645     __ ubfiz(as_Register($dst$$reg),
12646           as_Register($src$$reg), lshift, width);
12647   %}
12648   ins_pipe(ialu_reg_shift);
12649 %}
12650 
12651 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12652 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12653 %{
12654   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12655   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12656     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12657 
12658   ins_cost(INSN_COST);
12659   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12660   ins_encode %{
12661     int lshift = $lshift$$constant;
12662     long mask = $mask$$constant;
12663     int width = exact_log2(mask+1);
12664     __ ubfiz(as_Register($dst$$reg),
12665              as_Register($src$$reg), lshift, width);
12666   %}
12667   ins_pipe(ialu_reg_shift);
12668 %}
12669 
12670 // Rotations
12671 
12672 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12673 %{
12674   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12675   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12676 
12677   ins_cost(INSN_COST);
12678   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12679 
12680   ins_encode %{
12681     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12682             $rshift$$constant & 63);
12683   %}
12684   ins_pipe(ialu_reg_reg_extr);
12685 %}
12686 
12687 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12688 %{
12689   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12690   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12691 
12692   ins_cost(INSN_COST);
12693   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12694 
12695   ins_encode %{
12696     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12697             $rshift$$constant & 31);
12698   %}
12699   ins_pipe(ialu_reg_reg_extr);
12700 %}
12701 
12702 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12703 %{
12704   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12705   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12706 
12707   ins_cost(INSN_COST);
12708   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12709 
12710   ins_encode %{
12711     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12712             $rshift$$constant & 63);
12713   %}
12714   ins_pipe(ialu_reg_reg_extr);
12715 %}
12716 
12717 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12718 %{
12719   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12720   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12721 
12722   ins_cost(INSN_COST);
12723   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12724 
12725   ins_encode %{
12726     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12727             $rshift$$constant & 31);
12728   %}
12729   ins_pipe(ialu_reg_reg_extr);
12730 %}
12731 
12732 
12733 // rol expander
12734 
12735 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12736 %{
12737   effect(DEF dst, USE src, USE shift);
12738 
12739   format %{ "rol    $dst, $src, $shift" %}
12740   ins_cost(INSN_COST * 3);
12741   ins_encode %{
12742     __ subw(rscratch1, zr, as_Register($shift$$reg));
12743     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12744             rscratch1);
12745     %}
12746   ins_pipe(ialu_reg_reg_vshift);
12747 %}
12748 
12749 // rol expander
12750 
12751 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12752 %{
12753   effect(DEF dst, USE src, USE shift);
12754 
12755   format %{ "rol    $dst, $src, $shift" %}
12756   ins_cost(INSN_COST * 3);
12757   ins_encode %{
12758     __ subw(rscratch1, zr, as_Register($shift$$reg));
12759     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12760             rscratch1);
12761     %}
12762   ins_pipe(ialu_reg_reg_vshift);
12763 %}
12764 
12765 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12766 %{
12767   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12768 
12769   expand %{
12770     rolL_rReg(dst, src, shift, cr);
12771   %}
12772 %}
12773 
12774 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12775 %{
12776   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12777 
12778   expand %{
12779     rolL_rReg(dst, src, shift, cr);
12780   %}
12781 %}
12782 
12783 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12784 %{
12785   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12786 
12787   expand %{
12788     rolI_rReg(dst, src, shift, cr);
12789   %}
12790 %}
12791 
12792 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12793 %{
12794   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12795 
12796   expand %{
12797     rolI_rReg(dst, src, shift, cr);
12798   %}
12799 %}
12800 
12801 // ror expander
12802 
12803 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12804 %{
12805   effect(DEF dst, USE src, USE shift);
12806 
12807   format %{ "ror    $dst, $src, $shift" %}
12808   ins_cost(INSN_COST);
12809   ins_encode %{
12810     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12811             as_Register($shift$$reg));
12812     %}
12813   ins_pipe(ialu_reg_reg_vshift);
12814 %}
12815 
12816 // ror expander
12817 
12818 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12819 %{
12820   effect(DEF dst, USE src, USE shift);
12821 
12822   format %{ "ror    $dst, $src, $shift" %}
12823   ins_cost(INSN_COST);
12824   ins_encode %{
12825     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12826             as_Register($shift$$reg));
12827     %}
12828   ins_pipe(ialu_reg_reg_vshift);
12829 %}
12830 
12831 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12832 %{
12833   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12834 
12835   expand %{
12836     rorL_rReg(dst, src, shift, cr);
12837   %}
12838 %}
12839 
12840 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12841 %{
12842   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12843 
12844   expand %{
12845     rorL_rReg(dst, src, shift, cr);
12846   %}
12847 %}
12848 
12849 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12850 %{
12851   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12852 
12853   expand %{
12854     rorI_rReg(dst, src, shift, cr);
12855   %}
12856 %}
12857 
12858 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12859 %{
12860   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12861 
12862   expand %{
12863     rorI_rReg(dst, src, shift, cr);
12864   %}
12865 %}
12866 
12867 // Add/subtract (extended)
12868 
12869 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12870 %{
12871   match(Set dst (AddL src1 (ConvI2L src2)));
12872   ins_cost(INSN_COST);
12873   format %{ "add  $dst, $src1, $src2, sxtw" %}
12874 
12875    ins_encode %{
12876      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12877             as_Register($src2$$reg), ext::sxtw);
12878    %}
12879   ins_pipe(ialu_reg_reg);
12880 %};
12881 
12882 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12883 %{
12884   match(Set dst (SubL src1 (ConvI2L src2)));
12885   ins_cost(INSN_COST);
12886   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12887 
12888    ins_encode %{
12889      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12890             as_Register($src2$$reg), ext::sxtw);
12891    %}
12892   ins_pipe(ialu_reg_reg);
12893 %};
12894 
12895 
12896 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12897 %{
12898   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12899   ins_cost(INSN_COST);
12900   format %{ "add  $dst, $src1, $src2, sxth" %}
12901 
12902    ins_encode %{
12903      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12904             as_Register($src2$$reg), ext::sxth);
12905    %}
12906   ins_pipe(ialu_reg_reg);
12907 %}
12908 
12909 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12910 %{
12911   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12912   ins_cost(INSN_COST);
12913   format %{ "add  $dst, $src1, $src2, sxtb" %}
12914 
12915    ins_encode %{
12916      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12917             as_Register($src2$$reg), ext::sxtb);
12918    %}
12919   ins_pipe(ialu_reg_reg);
12920 %}
12921 
12922 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12923 %{
12924   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12925   ins_cost(INSN_COST);
12926   format %{ "add  $dst, $src1, $src2, uxtb" %}
12927 
12928    ins_encode %{
12929      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12930             as_Register($src2$$reg), ext::uxtb);
12931    %}
12932   ins_pipe(ialu_reg_reg);
12933 %}
12934 
12935 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12936 %{
12937   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12938   ins_cost(INSN_COST);
12939   format %{ "add  $dst, $src1, $src2, sxth" %}
12940 
12941    ins_encode %{
12942      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12943             as_Register($src2$$reg), ext::sxth);
12944    %}
12945   ins_pipe(ialu_reg_reg);
12946 %}
12947 
12948 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12949 %{
12950   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12951   ins_cost(INSN_COST);
12952   format %{ "add  $dst, $src1, $src2, sxtw" %}
12953 
12954    ins_encode %{
12955      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12956             as_Register($src2$$reg), ext::sxtw);
12957    %}
12958   ins_pipe(ialu_reg_reg);
12959 %}
12960 
12961 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12962 %{
12963   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12964   ins_cost(INSN_COST);
12965   format %{ "add  $dst, $src1, $src2, sxtb" %}
12966 
12967    ins_encode %{
12968      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12969             as_Register($src2$$reg), ext::sxtb);
12970    %}
12971   ins_pipe(ialu_reg_reg);
12972 %}
12973 
12974 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12975 %{
12976   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12977   ins_cost(INSN_COST);
12978   format %{ "add  $dst, $src1, $src2, uxtb" %}
12979 
12980    ins_encode %{
12981      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12982             as_Register($src2$$reg), ext::uxtb);
12983    %}
12984   ins_pipe(ialu_reg_reg);
12985 %}
12986 
12987 
12988 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12989 %{
12990   match(Set dst (AddI src1 (AndI src2 mask)));
12991   ins_cost(INSN_COST);
12992   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12993 
12994    ins_encode %{
12995      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12996             as_Register($src2$$reg), ext::uxtb);
12997    %}
12998   ins_pipe(ialu_reg_reg);
12999 %}
13000 
13001 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13002 %{
13003   match(Set dst (AddI src1 (AndI src2 mask)));
13004   ins_cost(INSN_COST);
13005   format %{ "addw  $dst, $src1, $src2, uxth" %}
13006 
13007    ins_encode %{
13008      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13009             as_Register($src2$$reg), ext::uxth);
13010    %}
13011   ins_pipe(ialu_reg_reg);
13012 %}
13013 
13014 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13015 %{
13016   match(Set dst (AddL src1 (AndL src2 mask)));
13017   ins_cost(INSN_COST);
13018   format %{ "add  $dst, $src1, $src2, uxtb" %}
13019 
13020    ins_encode %{
13021      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13022             as_Register($src2$$reg), ext::uxtb);
13023    %}
13024   ins_pipe(ialu_reg_reg);
13025 %}
13026 
13027 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13028 %{
13029   match(Set dst (AddL src1 (AndL src2 mask)));
13030   ins_cost(INSN_COST);
13031   format %{ "add  $dst, $src1, $src2, uxth" %}
13032 
13033    ins_encode %{
13034      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13035             as_Register($src2$$reg), ext::uxth);
13036    %}
13037   ins_pipe(ialu_reg_reg);
13038 %}
13039 
13040 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13041 %{
13042   match(Set dst (AddL src1 (AndL src2 mask)));
13043   ins_cost(INSN_COST);
13044   format %{ "add  $dst, $src1, $src2, uxtw" %}
13045 
13046    ins_encode %{
13047      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13048             as_Register($src2$$reg), ext::uxtw);
13049    %}
13050   ins_pipe(ialu_reg_reg);
13051 %}
13052 
13053 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13054 %{
13055   match(Set dst (SubI src1 (AndI src2 mask)));
13056   ins_cost(INSN_COST);
13057   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13058 
13059    ins_encode %{
13060      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13061             as_Register($src2$$reg), ext::uxtb);
13062    %}
13063   ins_pipe(ialu_reg_reg);
13064 %}
13065 
13066 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13067 %{
13068   match(Set dst (SubI src1 (AndI src2 mask)));
13069   ins_cost(INSN_COST);
13070   format %{ "subw  $dst, $src1, $src2, uxth" %}
13071 
13072    ins_encode %{
13073      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13074             as_Register($src2$$reg), ext::uxth);
13075    %}
13076   ins_pipe(ialu_reg_reg);
13077 %}
13078 
13079 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13080 %{
13081   match(Set dst (SubL src1 (AndL src2 mask)));
13082   ins_cost(INSN_COST);
13083   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13084 
13085    ins_encode %{
13086      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13087             as_Register($src2$$reg), ext::uxtb);
13088    %}
13089   ins_pipe(ialu_reg_reg);
13090 %}
13091 
13092 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13093 %{
13094   match(Set dst (SubL src1 (AndL src2 mask)));
13095   ins_cost(INSN_COST);
13096   format %{ "sub  $dst, $src1, $src2, uxth" %}
13097 
13098    ins_encode %{
13099      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13100             as_Register($src2$$reg), ext::uxth);
13101    %}
13102   ins_pipe(ialu_reg_reg);
13103 %}
13104 
13105 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13106 %{
13107   match(Set dst (SubL src1 (AndL src2 mask)));
13108   ins_cost(INSN_COST);
13109   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13110 
13111    ins_encode %{
13112      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13113             as_Register($src2$$reg), ext::uxtw);
13114    %}
13115   ins_pipe(ialu_reg_reg);
13116 %}
13117 
13118 
13119 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13120 %{
13121   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13122   ins_cost(1.9 * INSN_COST);
13123   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13124 
13125    ins_encode %{
13126      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13127             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13128    %}
13129   ins_pipe(ialu_reg_reg_shift);
13130 %}
13131 
13132 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13133 %{
13134   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13135   ins_cost(1.9 * INSN_COST);
13136   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13137 
13138    ins_encode %{
13139      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13140             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13141    %}
13142   ins_pipe(ialu_reg_reg_shift);
13143 %}
13144 
13145 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13146 %{
13147   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13148   ins_cost(1.9 * INSN_COST);
13149   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13150 
13151    ins_encode %{
13152      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13153             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13154    %}
13155   ins_pipe(ialu_reg_reg_shift);
13156 %}
13157 
13158 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13159 %{
13160   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13161   ins_cost(1.9 * INSN_COST);
13162   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13163 
13164    ins_encode %{
13165      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13166             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13167    %}
13168   ins_pipe(ialu_reg_reg_shift);
13169 %}
13170 
13171 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13172 %{
13173   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13174   ins_cost(1.9 * INSN_COST);
13175   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13176 
13177    ins_encode %{
13178      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13179             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13180    %}
13181   ins_pipe(ialu_reg_reg_shift);
13182 %}
13183 
13184 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13185 %{
13186   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13187   ins_cost(1.9 * INSN_COST);
13188   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13189 
13190    ins_encode %{
13191      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13192             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13193    %}
13194   ins_pipe(ialu_reg_reg_shift);
13195 %}
13196 
13197 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13198 %{
13199   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13200   ins_cost(1.9 * INSN_COST);
13201   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13202 
13203    ins_encode %{
13204      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13205             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13206    %}
13207   ins_pipe(ialu_reg_reg_shift);
13208 %}
13209 
13210 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13211 %{
13212   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13213   ins_cost(1.9 * INSN_COST);
13214   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13215 
13216    ins_encode %{
13217      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13218             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13219    %}
13220   ins_pipe(ialu_reg_reg_shift);
13221 %}
13222 
13223 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13224 %{
13225   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13226   ins_cost(1.9 * INSN_COST);
13227   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13228 
13229    ins_encode %{
13230      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13231             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13232    %}
13233   ins_pipe(ialu_reg_reg_shift);
13234 %}
13235 
13236 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13237 %{
13238   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13239   ins_cost(1.9 * INSN_COST);
13240   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13241 
13242    ins_encode %{
13243      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13244             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13245    %}
13246   ins_pipe(ialu_reg_reg_shift);
13247 %}
13248 
13249 
13250 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13251 %{
13252   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13253   ins_cost(1.9 * INSN_COST);
13254   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13255 
13256    ins_encode %{
13257      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13258             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13259    %}
13260   ins_pipe(ialu_reg_reg_shift);
13261 %};
13262 
13263 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13264 %{
13265   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13266   ins_cost(1.9 * INSN_COST);
13267   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13268 
13269    ins_encode %{
13270      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13271             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13272    %}
13273   ins_pipe(ialu_reg_reg_shift);
13274 %};
13275 
13276 
13277 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13278 %{
13279   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13280   ins_cost(1.9 * INSN_COST);
13281   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13282 
13283    ins_encode %{
13284      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13285             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13286    %}
13287   ins_pipe(ialu_reg_reg_shift);
13288 %}
13289 
13290 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13291 %{
13292   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13293   ins_cost(1.9 * INSN_COST);
13294   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13295 
13296    ins_encode %{
13297      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13298             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13299    %}
13300   ins_pipe(ialu_reg_reg_shift);
13301 %}
13302 
13303 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13304 %{
13305   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13306   ins_cost(1.9 * INSN_COST);
13307   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13308 
13309    ins_encode %{
13310      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13311             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13312    %}
13313   ins_pipe(ialu_reg_reg_shift);
13314 %}
13315 
13316 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13317 %{
13318   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13319   ins_cost(1.9 * INSN_COST);
13320   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13321 
13322    ins_encode %{
13323      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13324             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13325    %}
13326   ins_pipe(ialu_reg_reg_shift);
13327 %}
13328 
13329 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13330 %{
13331   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13332   ins_cost(1.9 * INSN_COST);
13333   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13334 
13335    ins_encode %{
13336      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13337             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13338    %}
13339   ins_pipe(ialu_reg_reg_shift);
13340 %}
13341 
13342 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13343 %{
13344   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13345   ins_cost(1.9 * INSN_COST);
13346   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13347 
13348    ins_encode %{
13349      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13350             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13351    %}
13352   ins_pipe(ialu_reg_reg_shift);
13353 %}
13354 
13355 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13356 %{
13357   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13358   ins_cost(1.9 * INSN_COST);
13359   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13360 
13361    ins_encode %{
13362      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13363             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13364    %}
13365   ins_pipe(ialu_reg_reg_shift);
13366 %}
13367 
13368 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13369 %{
13370   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13371   ins_cost(1.9 * INSN_COST);
13372   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13373 
13374    ins_encode %{
13375      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13376             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13377    %}
13378   ins_pipe(ialu_reg_reg_shift);
13379 %}
13380 
13381 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13382 %{
13383   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13384   ins_cost(1.9 * INSN_COST);
13385   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13386 
13387    ins_encode %{
13388      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13389             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13390    %}
13391   ins_pipe(ialu_reg_reg_shift);
13392 %}
13393 
13394 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13395 %{
13396   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13397   ins_cost(1.9 * INSN_COST);
13398   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13399 
13400    ins_encode %{
13401      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13402             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13403    %}
13404   ins_pipe(ialu_reg_reg_shift);
13405 %}
13406 // END This section of the file is automatically generated. Do not edit --------------
13407 
13408 // ============================================================================
13409 // Floating Point Arithmetic Instructions
13410 
13411 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13412   match(Set dst (AddF src1 src2));
13413 
13414   ins_cost(INSN_COST * 5);
13415   format %{ "fadds   $dst, $src1, $src2" %}
13416 
13417   ins_encode %{
13418     __ fadds(as_FloatRegister($dst$$reg),
13419              as_FloatRegister($src1$$reg),
13420              as_FloatRegister($src2$$reg));
13421   %}
13422 
13423   ins_pipe(fp_dop_reg_reg_s);
13424 %}
13425 
13426 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13427   match(Set dst (AddD src1 src2));
13428 
13429   ins_cost(INSN_COST * 5);
13430   format %{ "faddd   $dst, $src1, $src2" %}
13431 
13432   ins_encode %{
13433     __ faddd(as_FloatRegister($dst$$reg),
13434              as_FloatRegister($src1$$reg),
13435              as_FloatRegister($src2$$reg));
13436   %}
13437 
13438   ins_pipe(fp_dop_reg_reg_d);
13439 %}
13440 
13441 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13442   match(Set dst (SubF src1 src2));
13443 
13444   ins_cost(INSN_COST * 5);
13445   format %{ "fsubs   $dst, $src1, $src2" %}
13446 
13447   ins_encode %{
13448     __ fsubs(as_FloatRegister($dst$$reg),
13449              as_FloatRegister($src1$$reg),
13450              as_FloatRegister($src2$$reg));
13451   %}
13452 
13453   ins_pipe(fp_dop_reg_reg_s);
13454 %}
13455 
13456 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13457   match(Set dst (SubD src1 src2));
13458 
13459   ins_cost(INSN_COST * 5);
13460   format %{ "fsubd   $dst, $src1, $src2" %}
13461 
13462   ins_encode %{
13463     __ fsubd(as_FloatRegister($dst$$reg),
13464              as_FloatRegister($src1$$reg),
13465              as_FloatRegister($src2$$reg));
13466   %}
13467 
13468   ins_pipe(fp_dop_reg_reg_d);
13469 %}
13470 
13471 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13472   match(Set dst (MulF src1 src2));
13473 
13474   ins_cost(INSN_COST * 6);
13475   format %{ "fmuls   $dst, $src1, $src2" %}
13476 
13477   ins_encode %{
13478     __ fmuls(as_FloatRegister($dst$$reg),
13479              as_FloatRegister($src1$$reg),
13480              as_FloatRegister($src2$$reg));
13481   %}
13482 
13483   ins_pipe(fp_dop_reg_reg_s);
13484 %}
13485 
13486 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13487   match(Set dst (MulD src1 src2));
13488 
13489   ins_cost(INSN_COST * 6);
13490   format %{ "fmuld   $dst, $src1, $src2" %}
13491 
13492   ins_encode %{
13493     __ fmuld(as_FloatRegister($dst$$reg),
13494              as_FloatRegister($src1$$reg),
13495              as_FloatRegister($src2$$reg));
13496   %}
13497 
13498   ins_pipe(fp_dop_reg_reg_d);
13499 %}
13500 
13501 // src1 * src2 + src3
13502 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13503   predicate(UseFMA);
13504   match(Set dst (FmaF src3 (Binary src1 src2)));
13505 
13506   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13507 
13508   ins_encode %{
13509     __ fmadds(as_FloatRegister($dst$$reg),
13510              as_FloatRegister($src1$$reg),
13511              as_FloatRegister($src2$$reg),
13512              as_FloatRegister($src3$$reg));
13513   %}
13514 
13515   ins_pipe(pipe_class_default);
13516 %}
13517 
13518 // src1 * src2 + src3
13519 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13520   predicate(UseFMA);
13521   match(Set dst (FmaD src3 (Binary src1 src2)));
13522 
13523   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13524 
13525   ins_encode %{
13526     __ fmaddd(as_FloatRegister($dst$$reg),
13527              as_FloatRegister($src1$$reg),
13528              as_FloatRegister($src2$$reg),
13529              as_FloatRegister($src3$$reg));
13530   %}
13531 
13532   ins_pipe(pipe_class_default);
13533 %}
13534 
13535 // -src1 * src2 + src3
13536 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13537   predicate(UseFMA);
13538   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13539   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13540 
13541   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13542 
13543   ins_encode %{
13544     __ fmsubs(as_FloatRegister($dst$$reg),
13545               as_FloatRegister($src1$$reg),
13546               as_FloatRegister($src2$$reg),
13547               as_FloatRegister($src3$$reg));
13548   %}
13549 
13550   ins_pipe(pipe_class_default);
13551 %}
13552 
13553 // -src1 * src2 + src3
13554 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13555   predicate(UseFMA);
13556   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13557   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13558 
13559   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13560 
13561   ins_encode %{
13562     __ fmsubd(as_FloatRegister($dst$$reg),
13563               as_FloatRegister($src1$$reg),
13564               as_FloatRegister($src2$$reg),
13565               as_FloatRegister($src3$$reg));
13566   %}
13567 
13568   ins_pipe(pipe_class_default);
13569 %}
13570 
13571 // -src1 * src2 - src3
13572 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13573   predicate(UseFMA);
13574   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13575   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13576 
13577   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13578 
13579   ins_encode %{
13580     __ fnmadds(as_FloatRegister($dst$$reg),
13581                as_FloatRegister($src1$$reg),
13582                as_FloatRegister($src2$$reg),
13583                as_FloatRegister($src3$$reg));
13584   %}
13585 
13586   ins_pipe(pipe_class_default);
13587 %}
13588 
13589 // -src1 * src2 - src3
13590 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13591   predicate(UseFMA);
13592   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13593   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13594 
13595   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13596 
13597   ins_encode %{
13598     __ fnmaddd(as_FloatRegister($dst$$reg),
13599                as_FloatRegister($src1$$reg),
13600                as_FloatRegister($src2$$reg),
13601                as_FloatRegister($src3$$reg));
13602   %}
13603 
13604   ins_pipe(pipe_class_default);
13605 %}
13606 
13607 // src1 * src2 - src3
13608 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13609   predicate(UseFMA);
13610   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13611 
13612   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13613 
13614   ins_encode %{
13615     __ fnmsubs(as_FloatRegister($dst$$reg),
13616                as_FloatRegister($src1$$reg),
13617                as_FloatRegister($src2$$reg),
13618                as_FloatRegister($src3$$reg));
13619   %}
13620 
13621   ins_pipe(pipe_class_default);
13622 %}
13623 
13624 // src1 * src2 - src3
13625 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13626   predicate(UseFMA);
13627   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13628 
13629   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13630 
13631   ins_encode %{
13632   // n.b. insn name should be fnmsubd
13633     __ fnmsub(as_FloatRegister($dst$$reg),
13634               as_FloatRegister($src1$$reg),
13635               as_FloatRegister($src2$$reg),
13636               as_FloatRegister($src3$$reg));
13637   %}
13638 
13639   ins_pipe(pipe_class_default);
13640 %}
13641 
13642 
13643 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13644   match(Set dst (DivF src1  src2));
13645 
13646   ins_cost(INSN_COST * 18);
13647   format %{ "fdivs   $dst, $src1, $src2" %}
13648 
13649   ins_encode %{
13650     __ fdivs(as_FloatRegister($dst$$reg),
13651              as_FloatRegister($src1$$reg),
13652              as_FloatRegister($src2$$reg));
13653   %}
13654 
13655   ins_pipe(fp_div_s);
13656 %}
13657 
13658 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13659   match(Set dst (DivD src1  src2));
13660 
13661   ins_cost(INSN_COST * 32);
13662   format %{ "fdivd   $dst, $src1, $src2" %}
13663 
13664   ins_encode %{
13665     __ fdivd(as_FloatRegister($dst$$reg),
13666              as_FloatRegister($src1$$reg),
13667              as_FloatRegister($src2$$reg));
13668   %}
13669 
13670   ins_pipe(fp_div_d);
13671 %}
13672 
13673 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13674   match(Set dst (NegF src));
13675 
13676   ins_cost(INSN_COST * 3);
13677   format %{ "fneg   $dst, $src" %}
13678 
13679   ins_encode %{
13680     __ fnegs(as_FloatRegister($dst$$reg),
13681              as_FloatRegister($src$$reg));
13682   %}
13683 
13684   ins_pipe(fp_uop_s);
13685 %}
13686 
13687 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13688   match(Set dst (NegD src));
13689 
13690   ins_cost(INSN_COST * 3);
13691   format %{ "fnegd   $dst, $src" %}
13692 
13693   ins_encode %{
13694     __ fnegd(as_FloatRegister($dst$$reg),
13695              as_FloatRegister($src$$reg));
13696   %}
13697 
13698   ins_pipe(fp_uop_d);
13699 %}
13700 
13701 instruct absF_reg(vRegF dst, vRegF src) %{
13702   match(Set dst (AbsF src));
13703 
13704   ins_cost(INSN_COST * 3);
13705   format %{ "fabss   $dst, $src" %}
13706   ins_encode %{
13707     __ fabss(as_FloatRegister($dst$$reg),
13708              as_FloatRegister($src$$reg));
13709   %}
13710 
13711   ins_pipe(fp_uop_s);
13712 %}
13713 
13714 instruct absD_reg(vRegD dst, vRegD src) %{
13715   match(Set dst (AbsD src));
13716 
13717   ins_cost(INSN_COST * 3);
13718   format %{ "fabsd   $dst, $src" %}
13719   ins_encode %{
13720     __ fabsd(as_FloatRegister($dst$$reg),
13721              as_FloatRegister($src$$reg));
13722   %}
13723 
13724   ins_pipe(fp_uop_d);
13725 %}
13726 
13727 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13728   match(Set dst (SqrtD src));
13729 
13730   ins_cost(INSN_COST * 50);
13731   format %{ "fsqrtd  $dst, $src" %}
13732   ins_encode %{
13733     __ fsqrtd(as_FloatRegister($dst$$reg),
13734              as_FloatRegister($src$$reg));
13735   %}
13736 
13737   ins_pipe(fp_div_s);
13738 %}
13739 
13740 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13741   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13742 
13743   ins_cost(INSN_COST * 50);
13744   format %{ "fsqrts  $dst, $src" %}
13745   ins_encode %{
13746     __ fsqrts(as_FloatRegister($dst$$reg),
13747              as_FloatRegister($src$$reg));
13748   %}
13749 
13750   ins_pipe(fp_div_d);
13751 %}
13752 
13753 // ============================================================================
13754 // Logical Instructions
13755 
13756 // Integer Logical Instructions
13757 
13758 // And Instructions
13759 
13760 
13761 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13762   match(Set dst (AndI src1 src2));
13763 
13764   format %{ "andw  $dst, $src1, $src2\t# int" %}
13765 
13766   ins_cost(INSN_COST);
13767   ins_encode %{
13768     __ andw(as_Register($dst$$reg),
13769             as_Register($src1$$reg),
13770             as_Register($src2$$reg));
13771   %}
13772 
13773   ins_pipe(ialu_reg_reg);
13774 %}
13775 
13776 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13777   match(Set dst (AndI src1 src2));
13778 
13779   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13780 
13781   ins_cost(INSN_COST);
13782   ins_encode %{
13783     __ andw(as_Register($dst$$reg),
13784             as_Register($src1$$reg),
13785             (unsigned long)($src2$$constant));
13786   %}
13787 
13788   ins_pipe(ialu_reg_imm);
13789 %}
13790 
13791 // Or Instructions
13792 
13793 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13794   match(Set dst (OrI src1 src2));
13795 
13796   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13797 
13798   ins_cost(INSN_COST);
13799   ins_encode %{
13800     __ orrw(as_Register($dst$$reg),
13801             as_Register($src1$$reg),
13802             as_Register($src2$$reg));
13803   %}
13804 
13805   ins_pipe(ialu_reg_reg);
13806 %}
13807 
13808 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13809   match(Set dst (OrI src1 src2));
13810 
13811   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13812 
13813   ins_cost(INSN_COST);
13814   ins_encode %{
13815     __ orrw(as_Register($dst$$reg),
13816             as_Register($src1$$reg),
13817             (unsigned long)($src2$$constant));
13818   %}
13819 
13820   ins_pipe(ialu_reg_imm);
13821 %}
13822 
13823 // Xor Instructions
13824 
13825 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13826   match(Set dst (XorI src1 src2));
13827 
13828   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13829 
13830   ins_cost(INSN_COST);
13831   ins_encode %{
13832     __ eorw(as_Register($dst$$reg),
13833             as_Register($src1$$reg),
13834             as_Register($src2$$reg));
13835   %}
13836 
13837   ins_pipe(ialu_reg_reg);
13838 %}
13839 
13840 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13841   match(Set dst (XorI src1 src2));
13842 
13843   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13844 
13845   ins_cost(INSN_COST);
13846   ins_encode %{
13847     __ eorw(as_Register($dst$$reg),
13848             as_Register($src1$$reg),
13849             (unsigned long)($src2$$constant));
13850   %}
13851 
13852   ins_pipe(ialu_reg_imm);
13853 %}
13854 
13855 // Long Logical Instructions
13856 // TODO
13857 
13858 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13859   match(Set dst (AndL src1 src2));
13860 
13861   format %{ "and  $dst, $src1, $src2\t# int" %}
13862 
13863   ins_cost(INSN_COST);
13864   ins_encode %{
13865     __ andr(as_Register($dst$$reg),
13866             as_Register($src1$$reg),
13867             as_Register($src2$$reg));
13868   %}
13869 
13870   ins_pipe(ialu_reg_reg);
13871 %}
13872 
13873 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13874   match(Set dst (AndL src1 src2));
13875 
13876   format %{ "and  $dst, $src1, $src2\t# int" %}
13877 
13878   ins_cost(INSN_COST);
13879   ins_encode %{
13880     __ andr(as_Register($dst$$reg),
13881             as_Register($src1$$reg),
13882             (unsigned long)($src2$$constant));
13883   %}
13884 
13885   ins_pipe(ialu_reg_imm);
13886 %}
13887 
13888 // Or Instructions
13889 
13890 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13891   match(Set dst (OrL src1 src2));
13892 
13893   format %{ "orr  $dst, $src1, $src2\t# int" %}
13894 
13895   ins_cost(INSN_COST);
13896   ins_encode %{
13897     __ orr(as_Register($dst$$reg),
13898            as_Register($src1$$reg),
13899            as_Register($src2$$reg));
13900   %}
13901 
13902   ins_pipe(ialu_reg_reg);
13903 %}
13904 
13905 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13906   match(Set dst (OrL src1 src2));
13907 
13908   format %{ "orr  $dst, $src1, $src2\t# int" %}
13909 
13910   ins_cost(INSN_COST);
13911   ins_encode %{
13912     __ orr(as_Register($dst$$reg),
13913            as_Register($src1$$reg),
13914            (unsigned long)($src2$$constant));
13915   %}
13916 
13917   ins_pipe(ialu_reg_imm);
13918 %}
13919 
13920 // Xor Instructions
13921 
13922 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13923   match(Set dst (XorL src1 src2));
13924 
13925   format %{ "eor  $dst, $src1, $src2\t# int" %}
13926 
13927   ins_cost(INSN_COST);
13928   ins_encode %{
13929     __ eor(as_Register($dst$$reg),
13930            as_Register($src1$$reg),
13931            as_Register($src2$$reg));
13932   %}
13933 
13934   ins_pipe(ialu_reg_reg);
13935 %}
13936 
13937 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13938   match(Set dst (XorL src1 src2));
13939 
13940   ins_cost(INSN_COST);
13941   format %{ "eor  $dst, $src1, $src2\t# int" %}
13942 
13943   ins_encode %{
13944     __ eor(as_Register($dst$$reg),
13945            as_Register($src1$$reg),
13946            (unsigned long)($src2$$constant));
13947   %}
13948 
13949   ins_pipe(ialu_reg_imm);
13950 %}
13951 
13952 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13953 %{
13954   match(Set dst (ConvI2L src));
13955 
13956   ins_cost(INSN_COST);
13957   format %{ "sxtw  $dst, $src\t# i2l" %}
13958   ins_encode %{
13959     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13960   %}
13961   ins_pipe(ialu_reg_shift);
13962 %}
13963 
13964 // this pattern occurs in bigmath arithmetic
13965 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13966 %{
13967   match(Set dst (AndL (ConvI2L src) mask));
13968 
13969   ins_cost(INSN_COST);
13970   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13971   ins_encode %{
13972     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13973   %}
13974 
13975   ins_pipe(ialu_reg_shift);
13976 %}
13977 
13978 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13979   match(Set dst (ConvL2I src));
13980 
13981   ins_cost(INSN_COST);
13982   format %{ "movw  $dst, $src \t// l2i" %}
13983 
13984   ins_encode %{
13985     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13986   %}
13987 
13988   ins_pipe(ialu_reg);
13989 %}
13990 
13991 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13992 %{
13993   match(Set dst (Conv2B src));
13994   effect(KILL cr);
13995 
13996   format %{
13997     "cmpw $src, zr\n\t"
13998     "cset $dst, ne"
13999   %}
14000 
14001   ins_encode %{
14002     __ cmpw(as_Register($src$$reg), zr);
14003     __ cset(as_Register($dst$$reg), Assembler::NE);
14004   %}
14005 
14006   ins_pipe(ialu_reg);
14007 %}
14008 
14009 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14010 %{
14011   match(Set dst (Conv2B src));
14012   effect(KILL cr);
14013 
14014   format %{
14015     "cmp  $src, zr\n\t"
14016     "cset $dst, ne"
14017   %}
14018 
14019   ins_encode %{
14020     __ cmp(as_Register($src$$reg), zr);
14021     __ cset(as_Register($dst$$reg), Assembler::NE);
14022   %}
14023 
14024   ins_pipe(ialu_reg);
14025 %}
14026 
14027 instruct convD2F_reg(vRegF dst, vRegD src) %{
14028   match(Set dst (ConvD2F src));
14029 
14030   ins_cost(INSN_COST * 5);
14031   format %{ "fcvtd  $dst, $src \t// d2f" %}
14032 
14033   ins_encode %{
14034     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14035   %}
14036 
14037   ins_pipe(fp_d2f);
14038 %}
14039 
14040 instruct convF2D_reg(vRegD dst, vRegF src) %{
14041   match(Set dst (ConvF2D src));
14042 
14043   ins_cost(INSN_COST * 5);
14044   format %{ "fcvts  $dst, $src \t// f2d" %}
14045 
14046   ins_encode %{
14047     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14048   %}
14049 
14050   ins_pipe(fp_f2d);
14051 %}
14052 
14053 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14054   match(Set dst (ConvF2I src));
14055 
14056   ins_cost(INSN_COST * 5);
14057   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14058 
14059   ins_encode %{
14060     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14061   %}
14062 
14063   ins_pipe(fp_f2i);
14064 %}
14065 
14066 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14067   match(Set dst (ConvF2L src));
14068 
14069   ins_cost(INSN_COST * 5);
14070   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14071 
14072   ins_encode %{
14073     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14074   %}
14075 
14076   ins_pipe(fp_f2l);
14077 %}
14078 
14079 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14080   match(Set dst (ConvI2F src));
14081 
14082   ins_cost(INSN_COST * 5);
14083   format %{ "scvtfws  $dst, $src \t// i2f" %}
14084 
14085   ins_encode %{
14086     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14087   %}
14088 
14089   ins_pipe(fp_i2f);
14090 %}
14091 
14092 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14093   match(Set dst (ConvL2F src));
14094 
14095   ins_cost(INSN_COST * 5);
14096   format %{ "scvtfs  $dst, $src \t// l2f" %}
14097 
14098   ins_encode %{
14099     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14100   %}
14101 
14102   ins_pipe(fp_l2f);
14103 %}
14104 
14105 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14106   match(Set dst (ConvD2I src));
14107 
14108   ins_cost(INSN_COST * 5);
14109   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14110 
14111   ins_encode %{
14112     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14113   %}
14114 
14115   ins_pipe(fp_d2i);
14116 %}
14117 
14118 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14119   match(Set dst (ConvD2L src));
14120 
14121   ins_cost(INSN_COST * 5);
14122   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14123 
14124   ins_encode %{
14125     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14126   %}
14127 
14128   ins_pipe(fp_d2l);
14129 %}
14130 
14131 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14132   match(Set dst (ConvI2D src));
14133 
14134   ins_cost(INSN_COST * 5);
14135   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14136 
14137   ins_encode %{
14138     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14139   %}
14140 
14141   ins_pipe(fp_i2d);
14142 %}
14143 
14144 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14145   match(Set dst (ConvL2D src));
14146 
14147   ins_cost(INSN_COST * 5);
14148   format %{ "scvtfd  $dst, $src \t// l2d" %}
14149 
14150   ins_encode %{
14151     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14152   %}
14153 
14154   ins_pipe(fp_l2d);
14155 %}
14156 
14157 // stack <-> reg and reg <-> reg shuffles with no conversion
14158 
14159 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14160 
14161   match(Set dst (MoveF2I src));
14162 
14163   effect(DEF dst, USE src);
14164 
14165   ins_cost(4 * INSN_COST);
14166 
14167   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14168 
14169   ins_encode %{
14170     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14171   %}
14172 
14173   ins_pipe(iload_reg_reg);
14174 
14175 %}
14176 
14177 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14178 
14179   match(Set dst (MoveI2F src));
14180 
14181   effect(DEF dst, USE src);
14182 
14183   ins_cost(4 * INSN_COST);
14184 
14185   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14186 
14187   ins_encode %{
14188     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14189   %}
14190 
14191   ins_pipe(pipe_class_memory);
14192 
14193 %}
14194 
14195 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14196 
14197   match(Set dst (MoveD2L src));
14198 
14199   effect(DEF dst, USE src);
14200 
14201   ins_cost(4 * INSN_COST);
14202 
14203   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14204 
14205   ins_encode %{
14206     __ ldr($dst$$Register, Address(sp, $src$$disp));
14207   %}
14208 
14209   ins_pipe(iload_reg_reg);
14210 
14211 %}
14212 
14213 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14214 
14215   match(Set dst (MoveL2D src));
14216 
14217   effect(DEF dst, USE src);
14218 
14219   ins_cost(4 * INSN_COST);
14220 
14221   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14222 
14223   ins_encode %{
14224     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14225   %}
14226 
14227   ins_pipe(pipe_class_memory);
14228 
14229 %}
14230 
14231 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14232 
14233   match(Set dst (MoveF2I src));
14234 
14235   effect(DEF dst, USE src);
14236 
14237   ins_cost(INSN_COST);
14238 
14239   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14240 
14241   ins_encode %{
14242     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14243   %}
14244 
14245   ins_pipe(pipe_class_memory);
14246 
14247 %}
14248 
14249 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14250 
14251   match(Set dst (MoveI2F src));
14252 
14253   effect(DEF dst, USE src);
14254 
14255   ins_cost(INSN_COST);
14256 
14257   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14258 
14259   ins_encode %{
14260     __ strw($src$$Register, Address(sp, $dst$$disp));
14261   %}
14262 
14263   ins_pipe(istore_reg_reg);
14264 
14265 %}
14266 
14267 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14268 
14269   match(Set dst (MoveD2L src));
14270 
14271   effect(DEF dst, USE src);
14272 
14273   ins_cost(INSN_COST);
14274 
14275   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14276 
14277   ins_encode %{
14278     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14279   %}
14280 
14281   ins_pipe(pipe_class_memory);
14282 
14283 %}
14284 
14285 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14286 
14287   match(Set dst (MoveL2D src));
14288 
14289   effect(DEF dst, USE src);
14290 
14291   ins_cost(INSN_COST);
14292 
14293   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14294 
14295   ins_encode %{
14296     __ str($src$$Register, Address(sp, $dst$$disp));
14297   %}
14298 
14299   ins_pipe(istore_reg_reg);
14300 
14301 %}
14302 
14303 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14304 
14305   match(Set dst (MoveF2I src));
14306 
14307   effect(DEF dst, USE src);
14308 
14309   ins_cost(INSN_COST);
14310 
14311   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14312 
14313   ins_encode %{
14314     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14315   %}
14316 
14317   ins_pipe(fp_f2i);
14318 
14319 %}
14320 
14321 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14322 
14323   match(Set dst (MoveI2F src));
14324 
14325   effect(DEF dst, USE src);
14326 
14327   ins_cost(INSN_COST);
14328 
14329   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14330 
14331   ins_encode %{
14332     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14333   %}
14334 
14335   ins_pipe(fp_i2f);
14336 
14337 %}
14338 
14339 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14340 
14341   match(Set dst (MoveD2L src));
14342 
14343   effect(DEF dst, USE src);
14344 
14345   ins_cost(INSN_COST);
14346 
14347   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14348 
14349   ins_encode %{
14350     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14351   %}
14352 
14353   ins_pipe(fp_d2l);
14354 
14355 %}
14356 
14357 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14358 
14359   match(Set dst (MoveL2D src));
14360 
14361   effect(DEF dst, USE src);
14362 
14363   ins_cost(INSN_COST);
14364 
14365   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14366 
14367   ins_encode %{
14368     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14369   %}
14370 
14371   ins_pipe(fp_l2d);
14372 
14373 %}
14374 
14375 // ============================================================================
14376 // clearing of an array
14377 
14378 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14379 %{
14380   match(Set dummy (ClearArray cnt base));
14381   effect(USE_KILL cnt, USE_KILL base);
14382 
14383   ins_cost(4 * INSN_COST);
14384   format %{ "ClearArray $cnt, $base" %}
14385 
14386   ins_encode %{
14387     __ zero_words($base$$Register, $cnt$$Register);
14388   %}
14389 
14390   ins_pipe(pipe_class_memory);
14391 %}
14392 
14393 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14394 %{
14395   predicate((u_int64_t)n->in(2)->get_long()
14396             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14397   match(Set dummy (ClearArray cnt base));
14398   effect(USE_KILL base);
14399 
14400   ins_cost(4 * INSN_COST);
14401   format %{ "ClearArray $cnt, $base" %}
14402 
14403   ins_encode %{
14404     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14405   %}
14406 
14407   ins_pipe(pipe_class_memory);
14408 %}
14409 
14410 // ============================================================================
14411 // Overflow Math Instructions
14412 
14413 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14414 %{
14415   match(Set cr (OverflowAddI op1 op2));
14416 
14417   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14418   ins_cost(INSN_COST);
14419   ins_encode %{
14420     __ cmnw($op1$$Register, $op2$$Register);
14421   %}
14422 
14423   ins_pipe(icmp_reg_reg);
14424 %}
14425 
14426 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14427 %{
14428   match(Set cr (OverflowAddI op1 op2));
14429 
14430   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14431   ins_cost(INSN_COST);
14432   ins_encode %{
14433     __ cmnw($op1$$Register, $op2$$constant);
14434   %}
14435 
14436   ins_pipe(icmp_reg_imm);
14437 %}
14438 
14439 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14440 %{
14441   match(Set cr (OverflowAddL op1 op2));
14442 
14443   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14444   ins_cost(INSN_COST);
14445   ins_encode %{
14446     __ cmn($op1$$Register, $op2$$Register);
14447   %}
14448 
14449   ins_pipe(icmp_reg_reg);
14450 %}
14451 
14452 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14453 %{
14454   match(Set cr (OverflowAddL op1 op2));
14455 
14456   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14457   ins_cost(INSN_COST);
14458   ins_encode %{
14459     __ cmn($op1$$Register, $op2$$constant);
14460   %}
14461 
14462   ins_pipe(icmp_reg_imm);
14463 %}
14464 
14465 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14466 %{
14467   match(Set cr (OverflowSubI op1 op2));
14468 
14469   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14470   ins_cost(INSN_COST);
14471   ins_encode %{
14472     __ cmpw($op1$$Register, $op2$$Register);
14473   %}
14474 
14475   ins_pipe(icmp_reg_reg);
14476 %}
14477 
14478 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14479 %{
14480   match(Set cr (OverflowSubI op1 op2));
14481 
14482   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14483   ins_cost(INSN_COST);
14484   ins_encode %{
14485     __ cmpw($op1$$Register, $op2$$constant);
14486   %}
14487 
14488   ins_pipe(icmp_reg_imm);
14489 %}
14490 
14491 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14492 %{
14493   match(Set cr (OverflowSubL op1 op2));
14494 
14495   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14496   ins_cost(INSN_COST);
14497   ins_encode %{
14498     __ cmp($op1$$Register, $op2$$Register);
14499   %}
14500 
14501   ins_pipe(icmp_reg_reg);
14502 %}
14503 
14504 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14505 %{
14506   match(Set cr (OverflowSubL op1 op2));
14507 
14508   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14509   ins_cost(INSN_COST);
14510   ins_encode %{
14511     __ cmp($op1$$Register, $op2$$constant);
14512   %}
14513 
14514   ins_pipe(icmp_reg_imm);
14515 %}
14516 
14517 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14518 %{
14519   match(Set cr (OverflowSubI zero op1));
14520 
14521   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14522   ins_cost(INSN_COST);
14523   ins_encode %{
14524     __ cmpw(zr, $op1$$Register);
14525   %}
14526 
14527   ins_pipe(icmp_reg_imm);
14528 %}
14529 
14530 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14531 %{
14532   match(Set cr (OverflowSubL zero op1));
14533 
14534   format %{ "cmp   zr, $op1\t# overflow check long" %}
14535   ins_cost(INSN_COST);
14536   ins_encode %{
14537     __ cmp(zr, $op1$$Register);
14538   %}
14539 
14540   ins_pipe(icmp_reg_imm);
14541 %}
14542 
14543 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14544 %{
14545   match(Set cr (OverflowMulI op1 op2));
14546 
14547   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14548             "cmp   rscratch1, rscratch1, sxtw\n\t"
14549             "movw  rscratch1, #0x80000000\n\t"
14550             "cselw rscratch1, rscratch1, zr, NE\n\t"
14551             "cmpw  rscratch1, #1" %}
14552   ins_cost(5 * INSN_COST);
14553   ins_encode %{
14554     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14555     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14556     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14557     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14558     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14559   %}
14560 
14561   ins_pipe(pipe_slow);
14562 %}
14563 
14564 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14565 %{
14566   match(If cmp (OverflowMulI op1 op2));
14567   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14568             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14569   effect(USE labl, KILL cr);
14570 
14571   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14572             "cmp   rscratch1, rscratch1, sxtw\n\t"
14573             "b$cmp   $labl" %}
14574   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14575   ins_encode %{
14576     Label* L = $labl$$label;
14577     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14578     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14579     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14580     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14581   %}
14582 
14583   ins_pipe(pipe_serial);
14584 %}
14585 
14586 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14587 %{
14588   match(Set cr (OverflowMulL op1 op2));
14589 
14590   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14591             "smulh rscratch2, $op1, $op2\n\t"
14592             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14593             "movw  rscratch1, #0x80000000\n\t"
14594             "cselw rscratch1, rscratch1, zr, NE\n\t"
14595             "cmpw  rscratch1, #1" %}
14596   ins_cost(6 * INSN_COST);
14597   ins_encode %{
14598     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14599     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14600     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14601     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14602     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14603     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14604   %}
14605 
14606   ins_pipe(pipe_slow);
14607 %}
14608 
14609 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14610 %{
14611   match(If cmp (OverflowMulL op1 op2));
14612   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14613             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14614   effect(USE labl, KILL cr);
14615 
14616   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14617             "smulh rscratch2, $op1, $op2\n\t"
14618             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14619             "b$cmp $labl" %}
14620   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14621   ins_encode %{
14622     Label* L = $labl$$label;
14623     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14624     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14625     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14626     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14627     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14628   %}
14629 
14630   ins_pipe(pipe_serial);
14631 %}
14632 
14633 // ============================================================================
14634 // Compare Instructions
14635 
14636 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14637 %{
14638   match(Set cr (CmpI op1 op2));
14639 
14640   effect(DEF cr, USE op1, USE op2);
14641 
14642   ins_cost(INSN_COST);
14643   format %{ "cmpw  $op1, $op2" %}
14644 
14645   ins_encode(aarch64_enc_cmpw(op1, op2));
14646 
14647   ins_pipe(icmp_reg_reg);
14648 %}
14649 
14650 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14651 %{
14652   match(Set cr (CmpI op1 zero));
14653 
14654   effect(DEF cr, USE op1);
14655 
14656   ins_cost(INSN_COST);
14657   format %{ "cmpw $op1, 0" %}
14658 
14659   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14660 
14661   ins_pipe(icmp_reg_imm);
14662 %}
14663 
14664 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14665 %{
14666   match(Set cr (CmpI op1 op2));
14667 
14668   effect(DEF cr, USE op1);
14669 
14670   ins_cost(INSN_COST);
14671   format %{ "cmpw  $op1, $op2" %}
14672 
14673   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14674 
14675   ins_pipe(icmp_reg_imm);
14676 %}
14677 
14678 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14679 %{
14680   match(Set cr (CmpI op1 op2));
14681 
14682   effect(DEF cr, USE op1);
14683 
14684   ins_cost(INSN_COST * 2);
14685   format %{ "cmpw  $op1, $op2" %}
14686 
14687   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14688 
14689   ins_pipe(icmp_reg_imm);
14690 %}
14691 
14692 // Unsigned compare Instructions; really, same as signed compare
14693 // except it should only be used to feed an If or a CMovI which takes a
14694 // cmpOpU.
14695 
14696 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14697 %{
14698   match(Set cr (CmpU op1 op2));
14699 
14700   effect(DEF cr, USE op1, USE op2);
14701 
14702   ins_cost(INSN_COST);
14703   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14704 
14705   ins_encode(aarch64_enc_cmpw(op1, op2));
14706 
14707   ins_pipe(icmp_reg_reg);
14708 %}
14709 
14710 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14711 %{
14712   match(Set cr (CmpU op1 zero));
14713 
14714   effect(DEF cr, USE op1);
14715 
14716   ins_cost(INSN_COST);
14717   format %{ "cmpw $op1, #0\t# unsigned" %}
14718 
14719   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14720 
14721   ins_pipe(icmp_reg_imm);
14722 %}
14723 
14724 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14725 %{
14726   match(Set cr (CmpU op1 op2));
14727 
14728   effect(DEF cr, USE op1);
14729 
14730   ins_cost(INSN_COST);
14731   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14732 
14733   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14734 
14735   ins_pipe(icmp_reg_imm);
14736 %}
14737 
14738 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14739 %{
14740   match(Set cr (CmpU op1 op2));
14741 
14742   effect(DEF cr, USE op1);
14743 
14744   ins_cost(INSN_COST * 2);
14745   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14746 
14747   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14748 
14749   ins_pipe(icmp_reg_imm);
14750 %}
14751 
14752 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14753 %{
14754   match(Set cr (CmpL op1 op2));
14755 
14756   effect(DEF cr, USE op1, USE op2);
14757 
14758   ins_cost(INSN_COST);
14759   format %{ "cmp  $op1, $op2" %}
14760 
14761   ins_encode(aarch64_enc_cmp(op1, op2));
14762 
14763   ins_pipe(icmp_reg_reg);
14764 %}
14765 
14766 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14767 %{
14768   match(Set cr (CmpL op1 zero));
14769 
14770   effect(DEF cr, USE op1);
14771 
14772   ins_cost(INSN_COST);
14773   format %{ "tst  $op1" %}
14774 
14775   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14776 
14777   ins_pipe(icmp_reg_imm);
14778 %}
14779 
14780 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14781 %{
14782   match(Set cr (CmpL op1 op2));
14783 
14784   effect(DEF cr, USE op1);
14785 
14786   ins_cost(INSN_COST);
14787   format %{ "cmp  $op1, $op2" %}
14788 
14789   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14790 
14791   ins_pipe(icmp_reg_imm);
14792 %}
14793 
14794 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14795 %{
14796   match(Set cr (CmpL op1 op2));
14797 
14798   effect(DEF cr, USE op1);
14799 
14800   ins_cost(INSN_COST * 2);
14801   format %{ "cmp  $op1, $op2" %}
14802 
14803   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14804 
14805   ins_pipe(icmp_reg_imm);
14806 %}
14807 
14808 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14809 %{
14810   match(Set cr (CmpUL op1 op2));
14811 
14812   effect(DEF cr, USE op1, USE op2);
14813 
14814   ins_cost(INSN_COST);
14815   format %{ "cmp  $op1, $op2" %}
14816 
14817   ins_encode(aarch64_enc_cmp(op1, op2));
14818 
14819   ins_pipe(icmp_reg_reg);
14820 %}
14821 
14822 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14823 %{
14824   match(Set cr (CmpUL op1 zero));
14825 
14826   effect(DEF cr, USE op1);
14827 
14828   ins_cost(INSN_COST);
14829   format %{ "tst  $op1" %}
14830 
14831   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14832 
14833   ins_pipe(icmp_reg_imm);
14834 %}
14835 
14836 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14837 %{
14838   match(Set cr (CmpUL op1 op2));
14839 
14840   effect(DEF cr, USE op1);
14841 
14842   ins_cost(INSN_COST);
14843   format %{ "cmp  $op1, $op2" %}
14844 
14845   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14846 
14847   ins_pipe(icmp_reg_imm);
14848 %}
14849 
14850 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14851 %{
14852   match(Set cr (CmpUL op1 op2));
14853 
14854   effect(DEF cr, USE op1);
14855 
14856   ins_cost(INSN_COST * 2);
14857   format %{ "cmp  $op1, $op2" %}
14858 
14859   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14860 
14861   ins_pipe(icmp_reg_imm);
14862 %}
14863 
14864 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14865 %{
14866   match(Set cr (CmpP op1 op2));
14867 
14868   effect(DEF cr, USE op1, USE op2);
14869 
14870   ins_cost(INSN_COST);
14871   format %{ "cmp  $op1, $op2\t // ptr" %}
14872 
14873   ins_encode(aarch64_enc_cmpp(op1, op2));
14874 
14875   ins_pipe(icmp_reg_reg);
14876 %}
14877 
14878 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14879 %{
14880   match(Set cr (CmpN op1 op2));
14881 
14882   effect(DEF cr, USE op1, USE op2);
14883 
14884   ins_cost(INSN_COST);
14885   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14886 
14887   ins_encode(aarch64_enc_cmpn(op1, op2));
14888 
14889   ins_pipe(icmp_reg_reg);
14890 %}
14891 
14892 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14893 %{
14894   match(Set cr (CmpP op1 zero));
14895 
14896   effect(DEF cr, USE op1, USE zero);
14897 
14898   ins_cost(INSN_COST);
14899   format %{ "cmp  $op1, 0\t // ptr" %}
14900 
14901   ins_encode(aarch64_enc_testp(op1));
14902 
14903   ins_pipe(icmp_reg_imm);
14904 %}
14905 
14906 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14907 %{
14908   match(Set cr (CmpN op1 zero));
14909 
14910   effect(DEF cr, USE op1, USE zero);
14911 
14912   ins_cost(INSN_COST);
14913   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14914 
14915   ins_encode(aarch64_enc_testn(op1));
14916 
14917   ins_pipe(icmp_reg_imm);
14918 %}
14919 
14920 // FP comparisons
14921 //
14922 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14923 // using normal cmpOp. See declaration of rFlagsReg for details.
14924 
14925 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14926 %{
14927   match(Set cr (CmpF src1 src2));
14928 
14929   ins_cost(3 * INSN_COST);
14930   format %{ "fcmps $src1, $src2" %}
14931 
14932   ins_encode %{
14933     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14934   %}
14935 
14936   ins_pipe(pipe_class_compare);
14937 %}
14938 
14939 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14940 %{
14941   match(Set cr (CmpF src1 src2));
14942 
14943   ins_cost(3 * INSN_COST);
14944   format %{ "fcmps $src1, 0.0" %}
14945 
14946   ins_encode %{
14947     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14948   %}
14949 
14950   ins_pipe(pipe_class_compare);
14951 %}
14952 // FROM HERE
14953 
14954 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14955 %{
14956   match(Set cr (CmpD src1 src2));
14957 
14958   ins_cost(3 * INSN_COST);
14959   format %{ "fcmpd $src1, $src2" %}
14960 
14961   ins_encode %{
14962     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14963   %}
14964 
14965   ins_pipe(pipe_class_compare);
14966 %}
14967 
14968 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14969 %{
14970   match(Set cr (CmpD src1 src2));
14971 
14972   ins_cost(3 * INSN_COST);
14973   format %{ "fcmpd $src1, 0.0" %}
14974 
14975   ins_encode %{
14976     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14977   %}
14978 
14979   ins_pipe(pipe_class_compare);
14980 %}
14981 
14982 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14983 %{
14984   match(Set dst (CmpF3 src1 src2));
14985   effect(KILL cr);
14986 
14987   ins_cost(5 * INSN_COST);
14988   format %{ "fcmps $src1, $src2\n\t"
14989             "csinvw($dst, zr, zr, eq\n\t"
14990             "csnegw($dst, $dst, $dst, lt)"
14991   %}
14992 
14993   ins_encode %{
14994     Label done;
14995     FloatRegister s1 = as_FloatRegister($src1$$reg);
14996     FloatRegister s2 = as_FloatRegister($src2$$reg);
14997     Register d = as_Register($dst$$reg);
14998     __ fcmps(s1, s2);
14999     // installs 0 if EQ else -1
15000     __ csinvw(d, zr, zr, Assembler::EQ);
15001     // keeps -1 if less or unordered else installs 1
15002     __ csnegw(d, d, d, Assembler::LT);
15003     __ bind(done);
15004   %}
15005 
15006   ins_pipe(pipe_class_default);
15007 
15008 %}
15009 
15010 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15011 %{
15012   match(Set dst (CmpD3 src1 src2));
15013   effect(KILL cr);
15014 
15015   ins_cost(5 * INSN_COST);
15016   format %{ "fcmpd $src1, $src2\n\t"
15017             "csinvw($dst, zr, zr, eq\n\t"
15018             "csnegw($dst, $dst, $dst, lt)"
15019   %}
15020 
15021   ins_encode %{
15022     Label done;
15023     FloatRegister s1 = as_FloatRegister($src1$$reg);
15024     FloatRegister s2 = as_FloatRegister($src2$$reg);
15025     Register d = as_Register($dst$$reg);
15026     __ fcmpd(s1, s2);
15027     // installs 0 if EQ else -1
15028     __ csinvw(d, zr, zr, Assembler::EQ);
15029     // keeps -1 if less or unordered else installs 1
15030     __ csnegw(d, d, d, Assembler::LT);
15031     __ bind(done);
15032   %}
15033   ins_pipe(pipe_class_default);
15034 
15035 %}
15036 
15037 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15038 %{
15039   match(Set dst (CmpF3 src1 zero));
15040   effect(KILL cr);
15041 
15042   ins_cost(5 * INSN_COST);
15043   format %{ "fcmps $src1, 0.0\n\t"
15044             "csinvw($dst, zr, zr, eq\n\t"
15045             "csnegw($dst, $dst, $dst, lt)"
15046   %}
15047 
15048   ins_encode %{
15049     Label done;
15050     FloatRegister s1 = as_FloatRegister($src1$$reg);
15051     Register d = as_Register($dst$$reg);
15052     __ fcmps(s1, 0.0D);
15053     // installs 0 if EQ else -1
15054     __ csinvw(d, zr, zr, Assembler::EQ);
15055     // keeps -1 if less or unordered else installs 1
15056     __ csnegw(d, d, d, Assembler::LT);
15057     __ bind(done);
15058   %}
15059 
15060   ins_pipe(pipe_class_default);
15061 
15062 %}
15063 
15064 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15065 %{
15066   match(Set dst (CmpD3 src1 zero));
15067   effect(KILL cr);
15068 
15069   ins_cost(5 * INSN_COST);
15070   format %{ "fcmpd $src1, 0.0\n\t"
15071             "csinvw($dst, zr, zr, eq\n\t"
15072             "csnegw($dst, $dst, $dst, lt)"
15073   %}
15074 
15075   ins_encode %{
15076     Label done;
15077     FloatRegister s1 = as_FloatRegister($src1$$reg);
15078     Register d = as_Register($dst$$reg);
15079     __ fcmpd(s1, 0.0D);
15080     // installs 0 if EQ else -1
15081     __ csinvw(d, zr, zr, Assembler::EQ);
15082     // keeps -1 if less or unordered else installs 1
15083     __ csnegw(d, d, d, Assembler::LT);
15084     __ bind(done);
15085   %}
15086   ins_pipe(pipe_class_default);
15087 
15088 %}
15089 
15090 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15091 %{
15092   match(Set dst (CmpLTMask p q));
15093   effect(KILL cr);
15094 
15095   ins_cost(3 * INSN_COST);
15096 
15097   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15098             "csetw $dst, lt\n\t"
15099             "subw $dst, zr, $dst"
15100   %}
15101 
15102   ins_encode %{
15103     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15104     __ csetw(as_Register($dst$$reg), Assembler::LT);
15105     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15106   %}
15107 
15108   ins_pipe(ialu_reg_reg);
15109 %}
15110 
15111 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15112 %{
15113   match(Set dst (CmpLTMask src zero));
15114   effect(KILL cr);
15115 
15116   ins_cost(INSN_COST);
15117 
15118   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15119 
15120   ins_encode %{
15121     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15122   %}
15123 
15124   ins_pipe(ialu_reg_shift);
15125 %}
15126 
15127 // ============================================================================
15128 // Max and Min
15129 
15130 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15131 %{
15132   match(Set dst (MinI src1 src2));
15133 
15134   effect(DEF dst, USE src1, USE src2, KILL cr);
15135   size(8);
15136 
15137   ins_cost(INSN_COST * 3);
15138   format %{
15139     "cmpw $src1 $src2\t signed int\n\t"
15140     "cselw $dst, $src1, $src2 lt\t"
15141   %}
15142 
15143   ins_encode %{
15144     __ cmpw(as_Register($src1$$reg),
15145             as_Register($src2$$reg));
15146     __ cselw(as_Register($dst$$reg),
15147              as_Register($src1$$reg),
15148              as_Register($src2$$reg),
15149              Assembler::LT);
15150   %}
15151 
15152   ins_pipe(ialu_reg_reg);
15153 %}
15154 // FROM HERE
15155 
15156 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15157 %{
15158   match(Set dst (MaxI src1 src2));
15159 
15160   effect(DEF dst, USE src1, USE src2, KILL cr);
15161   size(8);
15162 
15163   ins_cost(INSN_COST * 3);
15164   format %{
15165     "cmpw $src1 $src2\t signed int\n\t"
15166     "cselw $dst, $src1, $src2 gt\t"
15167   %}
15168 
15169   ins_encode %{
15170     __ cmpw(as_Register($src1$$reg),
15171             as_Register($src2$$reg));
15172     __ cselw(as_Register($dst$$reg),
15173              as_Register($src1$$reg),
15174              as_Register($src2$$reg),
15175              Assembler::GT);
15176   %}
15177 
15178   ins_pipe(ialu_reg_reg);
15179 %}
15180 
15181 // ============================================================================
15182 // Branch Instructions
15183 
15184 // Direct Branch.
15185 instruct branch(label lbl)
15186 %{
15187   match(Goto);
15188 
15189   effect(USE lbl);
15190 
15191   ins_cost(BRANCH_COST);
15192   format %{ "b  $lbl" %}
15193 
15194   ins_encode(aarch64_enc_b(lbl));
15195 
15196   ins_pipe(pipe_branch);
15197 %}
15198 
15199 // Conditional Near Branch
15200 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15201 %{
15202   // Same match rule as `branchConFar'.
15203   match(If cmp cr);
15204 
15205   effect(USE lbl);
15206 
15207   ins_cost(BRANCH_COST);
15208   // If set to 1 this indicates that the current instruction is a
15209   // short variant of a long branch. This avoids using this
15210   // instruction in first-pass matching. It will then only be used in
15211   // the `Shorten_branches' pass.
15212   // ins_short_branch(1);
15213   format %{ "b$cmp  $lbl" %}
15214 
15215   ins_encode(aarch64_enc_br_con(cmp, lbl));
15216 
15217   ins_pipe(pipe_branch_cond);
15218 %}
15219 
15220 // Conditional Near Branch Unsigned
15221 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15222 %{
15223   // Same match rule as `branchConFar'.
15224   match(If cmp cr);
15225 
15226   effect(USE lbl);
15227 
15228   ins_cost(BRANCH_COST);
15229   // If set to 1 this indicates that the current instruction is a
15230   // short variant of a long branch. This avoids using this
15231   // instruction in first-pass matching. It will then only be used in
15232   // the `Shorten_branches' pass.
15233   // ins_short_branch(1);
15234   format %{ "b$cmp  $lbl\t# unsigned" %}
15235 
15236   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15237 
15238   ins_pipe(pipe_branch_cond);
15239 %}
15240 
15241 // Make use of CBZ and CBNZ.  These instructions, as well as being
15242 // shorter than (cmp; branch), have the additional benefit of not
15243 // killing the flags.
15244 
15245 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15246   match(If cmp (CmpI op1 op2));
15247   effect(USE labl);
15248 
15249   ins_cost(BRANCH_COST);
15250   format %{ "cbw$cmp   $op1, $labl" %}
15251   ins_encode %{
15252     Label* L = $labl$$label;
15253     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15254     if (cond == Assembler::EQ)
15255       __ cbzw($op1$$Register, *L);
15256     else
15257       __ cbnzw($op1$$Register, *L);
15258   %}
15259   ins_pipe(pipe_cmp_branch);
15260 %}
15261 
15262 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15263   match(If cmp (CmpL op1 op2));
15264   effect(USE labl);
15265 
15266   ins_cost(BRANCH_COST);
15267   format %{ "cb$cmp   $op1, $labl" %}
15268   ins_encode %{
15269     Label* L = $labl$$label;
15270     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15271     if (cond == Assembler::EQ)
15272       __ cbz($op1$$Register, *L);
15273     else
15274       __ cbnz($op1$$Register, *L);
15275   %}
15276   ins_pipe(pipe_cmp_branch);
15277 %}
15278 
15279 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15280   match(If cmp (CmpP op1 op2));
15281   effect(USE labl);
15282 
15283   ins_cost(BRANCH_COST);
15284   format %{ "cb$cmp   $op1, $labl" %}
15285   ins_encode %{
15286     Label* L = $labl$$label;
15287     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15288     if (cond == Assembler::EQ)
15289       __ cbz($op1$$Register, *L);
15290     else
15291       __ cbnz($op1$$Register, *L);
15292   %}
15293   ins_pipe(pipe_cmp_branch);
15294 %}
15295 
15296 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15297   match(If cmp (CmpN op1 op2));
15298   effect(USE labl);
15299 
15300   ins_cost(BRANCH_COST);
15301   format %{ "cbw$cmp   $op1, $labl" %}
15302   ins_encode %{
15303     Label* L = $labl$$label;
15304     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15305     if (cond == Assembler::EQ)
15306       __ cbzw($op1$$Register, *L);
15307     else
15308       __ cbnzw($op1$$Register, *L);
15309   %}
15310   ins_pipe(pipe_cmp_branch);
15311 %}
15312 
15313 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15314   match(If cmp (CmpP (DecodeN oop) zero));
15315   effect(USE labl);
15316 
15317   ins_cost(BRANCH_COST);
15318   format %{ "cb$cmp   $oop, $labl" %}
15319   ins_encode %{
15320     Label* L = $labl$$label;
15321     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15322     if (cond == Assembler::EQ)
15323       __ cbzw($oop$$Register, *L);
15324     else
15325       __ cbnzw($oop$$Register, *L);
15326   %}
15327   ins_pipe(pipe_cmp_branch);
15328 %}
15329 
15330 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15331   match(If cmp (CmpU op1 op2));
15332   effect(USE labl);
15333 
15334   ins_cost(BRANCH_COST);
15335   format %{ "cbw$cmp   $op1, $labl" %}
15336   ins_encode %{
15337     Label* L = $labl$$label;
15338     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15339     if (cond == Assembler::EQ || cond == Assembler::LS)
15340       __ cbzw($op1$$Register, *L);
15341     else
15342       __ cbnzw($op1$$Register, *L);
15343   %}
15344   ins_pipe(pipe_cmp_branch);
15345 %}
15346 
15347 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15348   match(If cmp (CmpUL op1 op2));
15349   effect(USE labl);
15350 
15351   ins_cost(BRANCH_COST);
15352   format %{ "cb$cmp   $op1, $labl" %}
15353   ins_encode %{
15354     Label* L = $labl$$label;
15355     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15356     if (cond == Assembler::EQ || cond == Assembler::LS)
15357       __ cbz($op1$$Register, *L);
15358     else
15359       __ cbnz($op1$$Register, *L);
15360   %}
15361   ins_pipe(pipe_cmp_branch);
15362 %}
15363 
15364 // Test bit and Branch
15365 
15366 // Patterns for short (< 32KiB) variants
15367 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15368   match(If cmp (CmpL op1 op2));
15369   effect(USE labl);
15370 
15371   ins_cost(BRANCH_COST);
15372   format %{ "cb$cmp   $op1, $labl # long" %}
15373   ins_encode %{
15374     Label* L = $labl$$label;
15375     Assembler::Condition cond =
15376       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15377     __ tbr(cond, $op1$$Register, 63, *L);
15378   %}
15379   ins_pipe(pipe_cmp_branch);
15380   ins_short_branch(1);
15381 %}
15382 
15383 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15384   match(If cmp (CmpI op1 op2));
15385   effect(USE labl);
15386 
15387   ins_cost(BRANCH_COST);
15388   format %{ "cb$cmp   $op1, $labl # int" %}
15389   ins_encode %{
15390     Label* L = $labl$$label;
15391     Assembler::Condition cond =
15392       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15393     __ tbr(cond, $op1$$Register, 31, *L);
15394   %}
15395   ins_pipe(pipe_cmp_branch);
15396   ins_short_branch(1);
15397 %}
15398 
15399 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15400   match(If cmp (CmpL (AndL op1 op2) op3));
15401   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15402   effect(USE labl);
15403 
15404   ins_cost(BRANCH_COST);
15405   format %{ "tb$cmp   $op1, $op2, $labl" %}
15406   ins_encode %{
15407     Label* L = $labl$$label;
15408     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15409     int bit = exact_log2($op2$$constant);
15410     __ tbr(cond, $op1$$Register, bit, *L);
15411   %}
15412   ins_pipe(pipe_cmp_branch);
15413   ins_short_branch(1);
15414 %}
15415 
15416 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15417   match(If cmp (CmpI (AndI op1 op2) op3));
15418   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15419   effect(USE labl);
15420 
15421   ins_cost(BRANCH_COST);
15422   format %{ "tb$cmp   $op1, $op2, $labl" %}
15423   ins_encode %{
15424     Label* L = $labl$$label;
15425     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15426     int bit = exact_log2($op2$$constant);
15427     __ tbr(cond, $op1$$Register, bit, *L);
15428   %}
15429   ins_pipe(pipe_cmp_branch);
15430   ins_short_branch(1);
15431 %}
15432 
15433 // And far variants
15434 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15435   match(If cmp (CmpL op1 op2));
15436   effect(USE labl);
15437 
15438   ins_cost(BRANCH_COST);
15439   format %{ "cb$cmp   $op1, $labl # long" %}
15440   ins_encode %{
15441     Label* L = $labl$$label;
15442     Assembler::Condition cond =
15443       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15444     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15445   %}
15446   ins_pipe(pipe_cmp_branch);
15447 %}
15448 
15449 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15450   match(If cmp (CmpI op1 op2));
15451   effect(USE labl);
15452 
15453   ins_cost(BRANCH_COST);
15454   format %{ "cb$cmp   $op1, $labl # int" %}
15455   ins_encode %{
15456     Label* L = $labl$$label;
15457     Assembler::Condition cond =
15458       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15459     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15460   %}
15461   ins_pipe(pipe_cmp_branch);
15462 %}
15463 
15464 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15465   match(If cmp (CmpL (AndL op1 op2) op3));
15466   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15467   effect(USE labl);
15468 
15469   ins_cost(BRANCH_COST);
15470   format %{ "tb$cmp   $op1, $op2, $labl" %}
15471   ins_encode %{
15472     Label* L = $labl$$label;
15473     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15474     int bit = exact_log2($op2$$constant);
15475     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15476   %}
15477   ins_pipe(pipe_cmp_branch);
15478 %}
15479 
15480 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15481   match(If cmp (CmpI (AndI op1 op2) op3));
15482   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15483   effect(USE labl);
15484 
15485   ins_cost(BRANCH_COST);
15486   format %{ "tb$cmp   $op1, $op2, $labl" %}
15487   ins_encode %{
15488     Label* L = $labl$$label;
15489     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15490     int bit = exact_log2($op2$$constant);
15491     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15492   %}
15493   ins_pipe(pipe_cmp_branch);
15494 %}
15495 
15496 // Test bits
15497 
15498 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15499   match(Set cr (CmpL (AndL op1 op2) op3));
15500   predicate(Assembler::operand_valid_for_logical_immediate
15501             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15502 
15503   ins_cost(INSN_COST);
15504   format %{ "tst $op1, $op2 # long" %}
15505   ins_encode %{
15506     __ tst($op1$$Register, $op2$$constant);
15507   %}
15508   ins_pipe(ialu_reg_reg);
15509 %}
15510 
15511 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15512   match(Set cr (CmpI (AndI op1 op2) op3));
15513   predicate(Assembler::operand_valid_for_logical_immediate
15514             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15515 
15516   ins_cost(INSN_COST);
15517   format %{ "tst $op1, $op2 # int" %}
15518   ins_encode %{
15519     __ tstw($op1$$Register, $op2$$constant);
15520   %}
15521   ins_pipe(ialu_reg_reg);
15522 %}
15523 
15524 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15525   match(Set cr (CmpL (AndL op1 op2) op3));
15526 
15527   ins_cost(INSN_COST);
15528   format %{ "tst $op1, $op2 # long" %}
15529   ins_encode %{
15530     __ tst($op1$$Register, $op2$$Register);
15531   %}
15532   ins_pipe(ialu_reg_reg);
15533 %}
15534 
15535 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15536   match(Set cr (CmpI (AndI op1 op2) op3));
15537 
15538   ins_cost(INSN_COST);
15539   format %{ "tstw $op1, $op2 # int" %}
15540   ins_encode %{
15541     __ tstw($op1$$Register, $op2$$Register);
15542   %}
15543   ins_pipe(ialu_reg_reg);
15544 %}
15545 
15546 
15547 // Conditional Far Branch
15548 // Conditional Far Branch Unsigned
15549 // TODO: fixme
15550 
15551 // counted loop end branch near
15552 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15553 %{
15554   match(CountedLoopEnd cmp cr);
15555 
15556   effect(USE lbl);
15557 
15558   ins_cost(BRANCH_COST);
15559   // short variant.
15560   // ins_short_branch(1);
15561   format %{ "b$cmp $lbl \t// counted loop end" %}
15562 
15563   ins_encode(aarch64_enc_br_con(cmp, lbl));
15564 
15565   ins_pipe(pipe_branch);
15566 %}
15567 
15568 // counted loop end branch near Unsigned
15569 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15570 %{
15571   match(CountedLoopEnd cmp cr);
15572 
15573   effect(USE lbl);
15574 
15575   ins_cost(BRANCH_COST);
15576   // short variant.
15577   // ins_short_branch(1);
15578   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15579 
15580   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15581 
15582   ins_pipe(pipe_branch);
15583 %}
15584 
15585 // counted loop end branch far
15586 // counted loop end branch far unsigned
15587 // TODO: fixme
15588 
15589 // ============================================================================
15590 // inlined locking and unlocking
15591 
15592 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15593 %{
15594   match(Set cr (FastLock object box));
15595   effect(TEMP tmp, TEMP tmp2);
15596 
15597   // TODO
15598   // identify correct cost
15599   ins_cost(5 * INSN_COST);
15600   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15601 
15602   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15603 
15604   ins_pipe(pipe_serial);
15605 %}
15606 
15607 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15608 %{
15609   match(Set cr (FastUnlock object box));
15610   effect(TEMP tmp, TEMP tmp2);
15611 
15612   ins_cost(5 * INSN_COST);
15613   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15614 
15615   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15616 
15617   ins_pipe(pipe_serial);
15618 %}
15619 
15620 
15621 // ============================================================================
15622 // Safepoint Instructions
15623 
15624 // TODO
15625 // provide a near and far version of this code
15626 
15627 instruct safePoint(iRegP poll)
15628 %{
15629   match(SafePoint poll);
15630 
15631   format %{
15632     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15633   %}
15634   ins_encode %{
15635     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15636   %}
15637   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15638 %}
15639 
15640 
15641 // ============================================================================
15642 // Procedure Call/Return Instructions
15643 
15644 // Call Java Static Instruction
15645 
15646 instruct CallStaticJavaDirect(method meth)
15647 %{
15648   match(CallStaticJava);
15649 
15650   effect(USE meth);
15651 
15652   ins_cost(CALL_COST);
15653 
15654   format %{ "call,static $meth \t// ==> " %}
15655 
15656   ins_encode( aarch64_enc_java_static_call(meth),
15657               aarch64_enc_call_epilog );
15658 
15659   ins_pipe(pipe_class_call);
15660 %}
15661 
15662 // TO HERE
15663 
15664 // Call Java Dynamic Instruction
15665 instruct CallDynamicJavaDirect(method meth)
15666 %{
15667   match(CallDynamicJava);
15668 
15669   effect(USE meth);
15670 
15671   ins_cost(CALL_COST);
15672 
15673   format %{ "CALL,dynamic $meth \t// ==> " %}
15674 
15675   ins_encode( aarch64_enc_java_dynamic_call(meth),
15676                aarch64_enc_call_epilog );
15677 
15678   ins_pipe(pipe_class_call);
15679 %}
15680 
15681 // Call Runtime Instruction
15682 
15683 instruct CallRuntimeDirect(method meth)
15684 %{
15685   match(CallRuntime);
15686 
15687   effect(USE meth);
15688 
15689   ins_cost(CALL_COST);
15690 
15691   format %{ "CALL, runtime $meth" %}
15692 
15693   ins_encode( aarch64_enc_java_to_runtime(meth) );
15694 
15695   ins_pipe(pipe_class_call);
15696 %}
15697 
15698 // Call Runtime Instruction
15699 
15700 instruct CallLeafDirect(method meth)
15701 %{
15702   match(CallLeaf);
15703 
15704   effect(USE meth);
15705 
15706   ins_cost(CALL_COST);
15707 
15708   format %{ "CALL, runtime leaf $meth" %}
15709 
15710   ins_encode( aarch64_enc_java_to_runtime(meth) );
15711 
15712   ins_pipe(pipe_class_call);
15713 %}
15714 
15715 // Call Runtime Instruction
15716 
15717 instruct CallLeafNoFPDirect(method meth)
15718 %{
15719   match(CallLeafNoFP);
15720 
15721   effect(USE meth);
15722 
15723   ins_cost(CALL_COST);
15724 
15725   format %{ "CALL, runtime leaf nofp $meth" %}
15726 
15727   ins_encode( aarch64_enc_java_to_runtime(meth) );
15728 
15729   ins_pipe(pipe_class_call);
15730 %}
15731 
15732 // Tail Call; Jump from runtime stub to Java code.
15733 // Also known as an 'interprocedural jump'.
15734 // Target of jump will eventually return to caller.
15735 // TailJump below removes the return address.
15736 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15737 %{
15738   match(TailCall jump_target method_oop);
15739 
15740   ins_cost(CALL_COST);
15741 
15742   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15743 
15744   ins_encode(aarch64_enc_tail_call(jump_target));
15745 
15746   ins_pipe(pipe_class_call);
15747 %}
15748 
15749 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15750 %{
15751   match(TailJump jump_target ex_oop);
15752 
15753   ins_cost(CALL_COST);
15754 
15755   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15756 
15757   ins_encode(aarch64_enc_tail_jmp(jump_target));
15758 
15759   ins_pipe(pipe_class_call);
15760 %}
15761 
15762 // Create exception oop: created by stack-crawling runtime code.
15763 // Created exception is now available to this handler, and is setup
15764 // just prior to jumping to this handler. No code emitted.
15765 // TODO check
15766 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15767 instruct CreateException(iRegP_R0 ex_oop)
15768 %{
15769   match(Set ex_oop (CreateEx));
15770 
15771   format %{ " -- \t// exception oop; no code emitted" %}
15772 
15773   size(0);
15774 
15775   ins_encode( /*empty*/ );
15776 
15777   ins_pipe(pipe_class_empty);
15778 %}
15779 
15780 // Rethrow exception: The exception oop will come in the first
15781 // argument position. Then JUMP (not call) to the rethrow stub code.
15782 instruct RethrowException() %{
15783   match(Rethrow);
15784   ins_cost(CALL_COST);
15785 
15786   format %{ "b rethrow_stub" %}
15787 
15788   ins_encode( aarch64_enc_rethrow() );
15789 
15790   ins_pipe(pipe_class_call);
15791 %}
15792 
15793 
15794 // Return Instruction
15795 // epilog node loads ret address into lr as part of frame pop
15796 instruct Ret()
15797 %{
15798   match(Return);
15799 
15800   format %{ "ret\t// return register" %}
15801 
15802   ins_encode( aarch64_enc_ret() );
15803 
15804   ins_pipe(pipe_branch);
15805 %}
15806 
15807 // Die now.
15808 instruct ShouldNotReachHere() %{
15809   match(Halt);
15810 
15811   ins_cost(CALL_COST);
15812   format %{ "ShouldNotReachHere" %}
15813 
15814   ins_encode %{
15815     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15816     // return true
15817     __ dpcs1(0xdead + 1);
15818   %}
15819 
15820   ins_pipe(pipe_class_default);
15821 %}
15822 
15823 // ============================================================================
15824 // Partial Subtype Check
15825 //
15826 // superklass array for an instance of the superklass.  Set a hidden
15827 // internal cache on a hit (cache is checked with exposed code in
15828 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15829 // encoding ALSO sets flags.
15830 
15831 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15832 %{
15833   match(Set result (PartialSubtypeCheck sub super));
15834   effect(KILL cr, KILL temp);
15835 
15836   ins_cost(1100);  // slightly larger than the next version
15837   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15838 
15839   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15840 
15841   opcode(0x1); // Force zero of result reg on hit
15842 
15843   ins_pipe(pipe_class_memory);
15844 %}
15845 
15846 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15847 %{
15848   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15849   effect(KILL temp, KILL result);
15850 
15851   ins_cost(1100);  // slightly larger than the next version
15852   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15853 
15854   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15855 
15856   opcode(0x0); // Don't zero result reg on hit
15857 
15858   ins_pipe(pipe_class_memory);
15859 %}
15860 
15861 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15862                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15863 %{
15864   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15865   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15866   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15867 
15868   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15869   ins_encode %{
15870     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15871     __ string_compare($str1$$Register, $str2$$Register,
15872                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15873                       $tmp1$$Register, $tmp2$$Register,
15874                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15875   %}
15876   ins_pipe(pipe_class_memory);
15877 %}
15878 
15879 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15880                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15881 %{
15882   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15883   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15884   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15885 
15886   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15887   ins_encode %{
15888     __ string_compare($str1$$Register, $str2$$Register,
15889                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15890                       $tmp1$$Register, $tmp2$$Register,
15891                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15892   %}
15893   ins_pipe(pipe_class_memory);
15894 %}
15895 
15896 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15897                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15898                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15899 %{
15900   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15901   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15902   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15903          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15904 
15905   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15906   ins_encode %{
15907     __ string_compare($str1$$Register, $str2$$Register,
15908                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15909                       $tmp1$$Register, $tmp2$$Register,
15910                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15911                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15912   %}
15913   ins_pipe(pipe_class_memory);
15914 %}
15915 
15916 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15917                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15918                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15919 %{
15920   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15921   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15922   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15923          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15924 
15925   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15926   ins_encode %{
15927     __ string_compare($str1$$Register, $str2$$Register,
15928                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15929                       $tmp1$$Register, $tmp2$$Register,
15930                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15931                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15932   %}
15933   ins_pipe(pipe_class_memory);
15934 %}
15935 
15936 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15937        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15938        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15939 %{
15940   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15941   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15942   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15943          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15944   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15945 
15946   ins_encode %{
15947     __ string_indexof($str1$$Register, $str2$$Register,
15948                       $cnt1$$Register, $cnt2$$Register,
15949                       $tmp1$$Register, $tmp2$$Register,
15950                       $tmp3$$Register, $tmp4$$Register,
15951                       $tmp5$$Register, $tmp6$$Register,
15952                       -1, $result$$Register, StrIntrinsicNode::UU);
15953   %}
15954   ins_pipe(pipe_class_memory);
15955 %}
15956 
15957 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15958        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15959        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15960 %{
15961   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15962   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15963   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15964          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15965   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15966 
15967   ins_encode %{
15968     __ string_indexof($str1$$Register, $str2$$Register,
15969                       $cnt1$$Register, $cnt2$$Register,
15970                       $tmp1$$Register, $tmp2$$Register,
15971                       $tmp3$$Register, $tmp4$$Register,
15972                       $tmp5$$Register, $tmp6$$Register,
15973                       -1, $result$$Register, StrIntrinsicNode::LL);
15974   %}
15975   ins_pipe(pipe_class_memory);
15976 %}
15977 
15978 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15979        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15980        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15981 %{
15982   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15983   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15984   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15985          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15986   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15987 
15988   ins_encode %{
15989     __ string_indexof($str1$$Register, $str2$$Register,
15990                       $cnt1$$Register, $cnt2$$Register,
15991                       $tmp1$$Register, $tmp2$$Register,
15992                       $tmp3$$Register, $tmp4$$Register,
15993                       $tmp5$$Register, $tmp6$$Register,
15994                       -1, $result$$Register, StrIntrinsicNode::UL);
15995   %}
15996   ins_pipe(pipe_class_memory);
15997 %}
15998 
15999 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16000                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16001                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16002 %{
16003   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16004   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16005   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16006          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16007   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16008 
16009   ins_encode %{
16010     int icnt2 = (int)$int_cnt2$$constant;
16011     __ string_indexof($str1$$Register, $str2$$Register,
16012                       $cnt1$$Register, zr,
16013                       $tmp1$$Register, $tmp2$$Register,
16014                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16015                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16016   %}
16017   ins_pipe(pipe_class_memory);
16018 %}
16019 
16020 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16021                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16022                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16023 %{
16024   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16025   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16026   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16027          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16028   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16029 
16030   ins_encode %{
16031     int icnt2 = (int)$int_cnt2$$constant;
16032     __ string_indexof($str1$$Register, $str2$$Register,
16033                       $cnt1$$Register, zr,
16034                       $tmp1$$Register, $tmp2$$Register,
16035                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16036                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16037   %}
16038   ins_pipe(pipe_class_memory);
16039 %}
16040 
16041 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16042                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16043                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16044 %{
16045   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16046   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16047   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16048          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16049   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16050 
16051   ins_encode %{
16052     int icnt2 = (int)$int_cnt2$$constant;
16053     __ string_indexof($str1$$Register, $str2$$Register,
16054                       $cnt1$$Register, zr,
16055                       $tmp1$$Register, $tmp2$$Register,
16056                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16057                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16058   %}
16059   ins_pipe(pipe_class_memory);
16060 %}
16061 
16062 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16063                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16064                               iRegINoSp tmp3, rFlagsReg cr)
16065 %{
16066   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16067   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16068          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16069 
16070   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16071 
16072   ins_encode %{
16073     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16074                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16075                            $tmp3$$Register);
16076   %}
16077   ins_pipe(pipe_class_memory);
16078 %}
16079 
16080 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16081                         iRegI_R0 result, rFlagsReg cr)
16082 %{
16083   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16084   match(Set result (StrEquals (Binary str1 str2) cnt));
16085   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16086 
16087   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16088   ins_encode %{
16089     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16090     __ string_equals($str1$$Register, $str2$$Register,
16091                      $result$$Register, $cnt$$Register, 1);
16092   %}
16093   ins_pipe(pipe_class_memory);
16094 %}
16095 
16096 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16097                         iRegI_R0 result, rFlagsReg cr)
16098 %{
16099   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16100   match(Set result (StrEquals (Binary str1 str2) cnt));
16101   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16102 
16103   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16104   ins_encode %{
16105     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16106     __ string_equals($str1$$Register, $str2$$Register,
16107                      $result$$Register, $cnt$$Register, 2);
16108   %}
16109   ins_pipe(pipe_class_memory);
16110 %}
16111 
16112 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16113                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16114                        iRegP_R10 tmp, rFlagsReg cr)
16115 %{
16116   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16117   match(Set result (AryEq ary1 ary2));
16118   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16119 
16120   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16121   ins_encode %{
16122     __ arrays_equals($ary1$$Register, $ary2$$Register,
16123                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16124                      $result$$Register, $tmp$$Register, 1);
16125     %}
16126   ins_pipe(pipe_class_memory);
16127 %}
16128 
16129 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16130                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16131                        iRegP_R10 tmp, rFlagsReg cr)
16132 %{
16133   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16134   match(Set result (AryEq ary1 ary2));
16135   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16136 
16137   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16138   ins_encode %{
16139     __ arrays_equals($ary1$$Register, $ary2$$Register,
16140                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16141                      $result$$Register, $tmp$$Register, 2);
16142   %}
16143   ins_pipe(pipe_class_memory);
16144 %}
16145 
16146 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16147 %{
16148   match(Set result (HasNegatives ary1 len));
16149   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16150   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16151   ins_encode %{
16152     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16153   %}
16154   ins_pipe( pipe_slow );
16155 %}
16156 
16157 // fast char[] to byte[] compression
16158 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16159                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16160                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16161                          iRegI_R0 result, rFlagsReg cr)
16162 %{
16163   match(Set result (StrCompressedCopy src (Binary dst len)));
16164   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16165 
16166   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16167   ins_encode %{
16168     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16169                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16170                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16171                            $result$$Register);
16172   %}
16173   ins_pipe( pipe_slow );
16174 %}
16175 
16176 // fast byte[] to char[] inflation
16177 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16178                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16179 %{
16180   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16181   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16182 
16183   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16184   ins_encode %{
16185     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16186                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16187   %}
16188   ins_pipe(pipe_class_memory);
16189 %}
16190 
16191 // encode char[] to byte[] in ISO_8859_1
16192 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16193                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16194                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16195                           iRegI_R0 result, rFlagsReg cr)
16196 %{
16197   match(Set result (EncodeISOArray src (Binary dst len)));
16198   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16199          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16200 
16201   format %{ "Encode array $src,$dst,$len -> $result" %}
16202   ins_encode %{
16203     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16204          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16205          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16206   %}
16207   ins_pipe( pipe_class_memory );
16208 %}
16209 
16210 // ============================================================================
16211 // This name is KNOWN by the ADLC and cannot be changed.
16212 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16213 // for this guy.
16214 instruct tlsLoadP(thread_RegP dst)
16215 %{
16216   match(Set dst (ThreadLocal));
16217 
16218   ins_cost(0);
16219 
16220   format %{ " -- \t// $dst=Thread::current(), empty" %}
16221 
16222   size(0);
16223 
16224   ins_encode( /*empty*/ );
16225 
16226   ins_pipe(pipe_class_empty);
16227 %}
16228 
16229 // ====================VECTOR INSTRUCTIONS=====================================
16230 
16231 // Load vector (32 bits)
16232 instruct loadV4(vecD dst, vmem4 mem)
16233 %{
16234   predicate(n->as_LoadVector()->memory_size() == 4);
16235   match(Set dst (LoadVector mem));
16236   ins_cost(4 * INSN_COST);
16237   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16238   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16239   ins_pipe(vload_reg_mem64);
16240 %}
16241 
16242 // Load vector (64 bits)
16243 instruct loadV8(vecD dst, vmem8 mem)
16244 %{
16245   predicate(n->as_LoadVector()->memory_size() == 8);
16246   match(Set dst (LoadVector mem));
16247   ins_cost(4 * INSN_COST);
16248   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16249   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16250   ins_pipe(vload_reg_mem64);
16251 %}
16252 
16253 // Load Vector (128 bits)
16254 instruct loadV16(vecX dst, vmem16 mem)
16255 %{
16256   predicate(n->as_LoadVector()->memory_size() == 16);
16257   match(Set dst (LoadVector mem));
16258   ins_cost(4 * INSN_COST);
16259   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16260   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16261   ins_pipe(vload_reg_mem128);
16262 %}
16263 
16264 // Store Vector (32 bits)
16265 instruct storeV4(vecD src, vmem4 mem)
16266 %{
16267   predicate(n->as_StoreVector()->memory_size() == 4);
16268   match(Set mem (StoreVector mem src));
16269   ins_cost(4 * INSN_COST);
16270   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16271   ins_encode( aarch64_enc_strvS(src, mem) );
16272   ins_pipe(vstore_reg_mem64);
16273 %}
16274 
16275 // Store Vector (64 bits)
16276 instruct storeV8(vecD src, vmem8 mem)
16277 %{
16278   predicate(n->as_StoreVector()->memory_size() == 8);
16279   match(Set mem (StoreVector mem src));
16280   ins_cost(4 * INSN_COST);
16281   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16282   ins_encode( aarch64_enc_strvD(src, mem) );
16283   ins_pipe(vstore_reg_mem64);
16284 %}
16285 
16286 // Store Vector (128 bits)
16287 instruct storeV16(vecX src, vmem16 mem)
16288 %{
16289   predicate(n->as_StoreVector()->memory_size() == 16);
16290   match(Set mem (StoreVector mem src));
16291   ins_cost(4 * INSN_COST);
16292   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16293   ins_encode( aarch64_enc_strvQ(src, mem) );
16294   ins_pipe(vstore_reg_mem128);
16295 %}
16296 
16297 instruct replicate8B(vecD dst, iRegIorL2I src)
16298 %{
16299   predicate(n->as_Vector()->length() == 4 ||
16300             n->as_Vector()->length() == 8);
16301   match(Set dst (ReplicateB src));
16302   ins_cost(INSN_COST);
16303   format %{ "dup  $dst, $src\t# vector (8B)" %}
16304   ins_encode %{
16305     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16306   %}
16307   ins_pipe(vdup_reg_reg64);
16308 %}
16309 
16310 instruct replicate16B(vecX dst, iRegIorL2I src)
16311 %{
16312   predicate(n->as_Vector()->length() == 16);
16313   match(Set dst (ReplicateB src));
16314   ins_cost(INSN_COST);
16315   format %{ "dup  $dst, $src\t# vector (16B)" %}
16316   ins_encode %{
16317     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16318   %}
16319   ins_pipe(vdup_reg_reg128);
16320 %}
16321 
16322 instruct replicate8B_imm(vecD dst, immI con)
16323 %{
16324   predicate(n->as_Vector()->length() == 4 ||
16325             n->as_Vector()->length() == 8);
16326   match(Set dst (ReplicateB con));
16327   ins_cost(INSN_COST);
16328   format %{ "movi  $dst, $con\t# vector(8B)" %}
16329   ins_encode %{
16330     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16331   %}
16332   ins_pipe(vmovi_reg_imm64);
16333 %}
16334 
16335 instruct replicate16B_imm(vecX dst, immI con)
16336 %{
16337   predicate(n->as_Vector()->length() == 16);
16338   match(Set dst (ReplicateB con));
16339   ins_cost(INSN_COST);
16340   format %{ "movi  $dst, $con\t# vector(16B)" %}
16341   ins_encode %{
16342     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16343   %}
16344   ins_pipe(vmovi_reg_imm128);
16345 %}
16346 
16347 instruct replicate4S(vecD dst, iRegIorL2I src)
16348 %{
16349   predicate(n->as_Vector()->length() == 2 ||
16350             n->as_Vector()->length() == 4);
16351   match(Set dst (ReplicateS src));
16352   ins_cost(INSN_COST);
16353   format %{ "dup  $dst, $src\t# vector (4S)" %}
16354   ins_encode %{
16355     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16356   %}
16357   ins_pipe(vdup_reg_reg64);
16358 %}
16359 
16360 instruct replicate8S(vecX dst, iRegIorL2I src)
16361 %{
16362   predicate(n->as_Vector()->length() == 8);
16363   match(Set dst (ReplicateS src));
16364   ins_cost(INSN_COST);
16365   format %{ "dup  $dst, $src\t# vector (8S)" %}
16366   ins_encode %{
16367     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16368   %}
16369   ins_pipe(vdup_reg_reg128);
16370 %}
16371 
16372 instruct replicate4S_imm(vecD dst, immI con)
16373 %{
16374   predicate(n->as_Vector()->length() == 2 ||
16375             n->as_Vector()->length() == 4);
16376   match(Set dst (ReplicateS con));
16377   ins_cost(INSN_COST);
16378   format %{ "movi  $dst, $con\t# vector(4H)" %}
16379   ins_encode %{
16380     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16381   %}
16382   ins_pipe(vmovi_reg_imm64);
16383 %}
16384 
16385 instruct replicate8S_imm(vecX dst, immI con)
16386 %{
16387   predicate(n->as_Vector()->length() == 8);
16388   match(Set dst (ReplicateS con));
16389   ins_cost(INSN_COST);
16390   format %{ "movi  $dst, $con\t# vector(8H)" %}
16391   ins_encode %{
16392     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16393   %}
16394   ins_pipe(vmovi_reg_imm128);
16395 %}
16396 
16397 instruct replicate2I(vecD dst, iRegIorL2I src)
16398 %{
16399   predicate(n->as_Vector()->length() == 2);
16400   match(Set dst (ReplicateI src));
16401   ins_cost(INSN_COST);
16402   format %{ "dup  $dst, $src\t# vector (2I)" %}
16403   ins_encode %{
16404     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16405   %}
16406   ins_pipe(vdup_reg_reg64);
16407 %}
16408 
16409 instruct replicate4I(vecX dst, iRegIorL2I src)
16410 %{
16411   predicate(n->as_Vector()->length() == 4);
16412   match(Set dst (ReplicateI src));
16413   ins_cost(INSN_COST);
16414   format %{ "dup  $dst, $src\t# vector (4I)" %}
16415   ins_encode %{
16416     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16417   %}
16418   ins_pipe(vdup_reg_reg128);
16419 %}
16420 
16421 instruct replicate2I_imm(vecD dst, immI con)
16422 %{
16423   predicate(n->as_Vector()->length() == 2);
16424   match(Set dst (ReplicateI con));
16425   ins_cost(INSN_COST);
16426   format %{ "movi  $dst, $con\t# vector(2I)" %}
16427   ins_encode %{
16428     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16429   %}
16430   ins_pipe(vmovi_reg_imm64);
16431 %}
16432 
16433 instruct replicate4I_imm(vecX dst, immI con)
16434 %{
16435   predicate(n->as_Vector()->length() == 4);
16436   match(Set dst (ReplicateI con));
16437   ins_cost(INSN_COST);
16438   format %{ "movi  $dst, $con\t# vector(4I)" %}
16439   ins_encode %{
16440     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16441   %}
16442   ins_pipe(vmovi_reg_imm128);
16443 %}
16444 
16445 instruct replicate2L(vecX dst, iRegL src)
16446 %{
16447   predicate(n->as_Vector()->length() == 2);
16448   match(Set dst (ReplicateL src));
16449   ins_cost(INSN_COST);
16450   format %{ "dup  $dst, $src\t# vector (2L)" %}
16451   ins_encode %{
16452     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16453   %}
16454   ins_pipe(vdup_reg_reg128);
16455 %}
16456 
16457 instruct replicate2L_zero(vecX dst, immI0 zero)
16458 %{
16459   predicate(n->as_Vector()->length() == 2);
16460   match(Set dst (ReplicateI zero));
16461   ins_cost(INSN_COST);
16462   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16463   ins_encode %{
16464     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16465            as_FloatRegister($dst$$reg),
16466            as_FloatRegister($dst$$reg));
16467   %}
16468   ins_pipe(vmovi_reg_imm128);
16469 %}
16470 
16471 instruct replicate2F(vecD dst, vRegF src)
16472 %{
16473   predicate(n->as_Vector()->length() == 2);
16474   match(Set dst (ReplicateF src));
16475   ins_cost(INSN_COST);
16476   format %{ "dup  $dst, $src\t# vector (2F)" %}
16477   ins_encode %{
16478     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16479            as_FloatRegister($src$$reg));
16480   %}
16481   ins_pipe(vdup_reg_freg64);
16482 %}
16483 
16484 instruct replicate4F(vecX dst, vRegF src)
16485 %{
16486   predicate(n->as_Vector()->length() == 4);
16487   match(Set dst (ReplicateF src));
16488   ins_cost(INSN_COST);
16489   format %{ "dup  $dst, $src\t# vector (4F)" %}
16490   ins_encode %{
16491     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16492            as_FloatRegister($src$$reg));
16493   %}
16494   ins_pipe(vdup_reg_freg128);
16495 %}
16496 
16497 instruct replicate2D(vecX dst, vRegD src)
16498 %{
16499   predicate(n->as_Vector()->length() == 2);
16500   match(Set dst (ReplicateD src));
16501   ins_cost(INSN_COST);
16502   format %{ "dup  $dst, $src\t# vector (2D)" %}
16503   ins_encode %{
16504     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16505            as_FloatRegister($src$$reg));
16506   %}
16507   ins_pipe(vdup_reg_dreg128);
16508 %}
16509 
16510 // ====================REDUCTION ARITHMETIC====================================
16511 
16512 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16513 %{
16514   match(Set dst (AddReductionVI src1 src2));
16515   ins_cost(INSN_COST);
16516   effect(TEMP tmp, TEMP tmp2);
16517   format %{ "umov  $tmp, $src2, S, 0\n\t"
16518             "umov  $tmp2, $src2, S, 1\n\t"
16519             "addw  $dst, $src1, $tmp\n\t"
16520             "addw  $dst, $dst, $tmp2\t add reduction2i"
16521   %}
16522   ins_encode %{
16523     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16524     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16525     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16526     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16527   %}
16528   ins_pipe(pipe_class_default);
16529 %}
16530 
16531 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16532 %{
16533   match(Set dst (AddReductionVI src1 src2));
16534   ins_cost(INSN_COST);
16535   effect(TEMP tmp, TEMP tmp2);
16536   format %{ "addv  $tmp, T4S, $src2\n\t"
16537             "umov  $tmp2, $tmp, S, 0\n\t"
16538             "addw  $dst, $tmp2, $src1\t add reduction4i"
16539   %}
16540   ins_encode %{
16541     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16542             as_FloatRegister($src2$$reg));
16543     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16544     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16545   %}
16546   ins_pipe(pipe_class_default);
16547 %}
16548 
16549 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16550 %{
16551   match(Set dst (MulReductionVI src1 src2));
16552   ins_cost(INSN_COST);
16553   effect(TEMP tmp, TEMP dst);
16554   format %{ "umov  $tmp, $src2, S, 0\n\t"
16555             "mul   $dst, $tmp, $src1\n\t"
16556             "umov  $tmp, $src2, S, 1\n\t"
16557             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16558   %}
16559   ins_encode %{
16560     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16561     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16562     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16563     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16564   %}
16565   ins_pipe(pipe_class_default);
16566 %}
16567 
16568 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16569 %{
16570   match(Set dst (MulReductionVI src1 src2));
16571   ins_cost(INSN_COST);
16572   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16573   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16574             "mul   $tmp, $tmp, $src2\n\t"
16575             "umov  $tmp2, $tmp, S, 0\n\t"
16576             "mul   $dst, $tmp2, $src1\n\t"
16577             "umov  $tmp2, $tmp, S, 1\n\t"
16578             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16579   %}
16580   ins_encode %{
16581     __ ins(as_FloatRegister($tmp$$reg), __ D,
16582            as_FloatRegister($src2$$reg), 0, 1);
16583     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16584            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16585     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16586     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16587     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16588     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16589   %}
16590   ins_pipe(pipe_class_default);
16591 %}
16592 
16593 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16594 %{
16595   match(Set dst (AddReductionVF src1 src2));
16596   ins_cost(INSN_COST);
16597   effect(TEMP tmp, TEMP dst);
16598   format %{ "fadds $dst, $src1, $src2\n\t"
16599             "ins   $tmp, S, $src2, 0, 1\n\t"
16600             "fadds $dst, $dst, $tmp\t add reduction2f"
16601   %}
16602   ins_encode %{
16603     __ fadds(as_FloatRegister($dst$$reg),
16604              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16605     __ ins(as_FloatRegister($tmp$$reg), __ S,
16606            as_FloatRegister($src2$$reg), 0, 1);
16607     __ fadds(as_FloatRegister($dst$$reg),
16608              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16609   %}
16610   ins_pipe(pipe_class_default);
16611 %}
16612 
16613 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16614 %{
16615   match(Set dst (AddReductionVF src1 src2));
16616   ins_cost(INSN_COST);
16617   effect(TEMP tmp, TEMP dst);
16618   format %{ "fadds $dst, $src1, $src2\n\t"
16619             "ins   $tmp, S, $src2, 0, 1\n\t"
16620             "fadds $dst, $dst, $tmp\n\t"
16621             "ins   $tmp, S, $src2, 0, 2\n\t"
16622             "fadds $dst, $dst, $tmp\n\t"
16623             "ins   $tmp, S, $src2, 0, 3\n\t"
16624             "fadds $dst, $dst, $tmp\t add reduction4f"
16625   %}
16626   ins_encode %{
16627     __ fadds(as_FloatRegister($dst$$reg),
16628              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16629     __ ins(as_FloatRegister($tmp$$reg), __ S,
16630            as_FloatRegister($src2$$reg), 0, 1);
16631     __ fadds(as_FloatRegister($dst$$reg),
16632              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16633     __ ins(as_FloatRegister($tmp$$reg), __ S,
16634            as_FloatRegister($src2$$reg), 0, 2);
16635     __ fadds(as_FloatRegister($dst$$reg),
16636              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16637     __ ins(as_FloatRegister($tmp$$reg), __ S,
16638            as_FloatRegister($src2$$reg), 0, 3);
16639     __ fadds(as_FloatRegister($dst$$reg),
16640              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16641   %}
16642   ins_pipe(pipe_class_default);
16643 %}
16644 
16645 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16646 %{
16647   match(Set dst (MulReductionVF src1 src2));
16648   ins_cost(INSN_COST);
16649   effect(TEMP tmp, TEMP dst);
16650   format %{ "fmuls $dst, $src1, $src2\n\t"
16651             "ins   $tmp, S, $src2, 0, 1\n\t"
16652             "fmuls $dst, $dst, $tmp\t add reduction4f"
16653   %}
16654   ins_encode %{
16655     __ fmuls(as_FloatRegister($dst$$reg),
16656              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16657     __ ins(as_FloatRegister($tmp$$reg), __ S,
16658            as_FloatRegister($src2$$reg), 0, 1);
16659     __ fmuls(as_FloatRegister($dst$$reg),
16660              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16661   %}
16662   ins_pipe(pipe_class_default);
16663 %}
16664 
16665 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16666 %{
16667   match(Set dst (MulReductionVF src1 src2));
16668   ins_cost(INSN_COST);
16669   effect(TEMP tmp, TEMP dst);
16670   format %{ "fmuls $dst, $src1, $src2\n\t"
16671             "ins   $tmp, S, $src2, 0, 1\n\t"
16672             "fmuls $dst, $dst, $tmp\n\t"
16673             "ins   $tmp, S, $src2, 0, 2\n\t"
16674             "fmuls $dst, $dst, $tmp\n\t"
16675             "ins   $tmp, S, $src2, 0, 3\n\t"
16676             "fmuls $dst, $dst, $tmp\t add reduction4f"
16677   %}
16678   ins_encode %{
16679     __ fmuls(as_FloatRegister($dst$$reg),
16680              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16681     __ ins(as_FloatRegister($tmp$$reg), __ S,
16682            as_FloatRegister($src2$$reg), 0, 1);
16683     __ fmuls(as_FloatRegister($dst$$reg),
16684              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16685     __ ins(as_FloatRegister($tmp$$reg), __ S,
16686            as_FloatRegister($src2$$reg), 0, 2);
16687     __ fmuls(as_FloatRegister($dst$$reg),
16688              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16689     __ ins(as_FloatRegister($tmp$$reg), __ S,
16690            as_FloatRegister($src2$$reg), 0, 3);
16691     __ fmuls(as_FloatRegister($dst$$reg),
16692              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16693   %}
16694   ins_pipe(pipe_class_default);
16695 %}
16696 
16697 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16698 %{
16699   match(Set dst (AddReductionVD src1 src2));
16700   ins_cost(INSN_COST);
16701   effect(TEMP tmp, TEMP dst);
16702   format %{ "faddd $dst, $src1, $src2\n\t"
16703             "ins   $tmp, D, $src2, 0, 1\n\t"
16704             "faddd $dst, $dst, $tmp\t add reduction2d"
16705   %}
16706   ins_encode %{
16707     __ faddd(as_FloatRegister($dst$$reg),
16708              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16709     __ ins(as_FloatRegister($tmp$$reg), __ D,
16710            as_FloatRegister($src2$$reg), 0, 1);
16711     __ faddd(as_FloatRegister($dst$$reg),
16712              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16713   %}
16714   ins_pipe(pipe_class_default);
16715 %}
16716 
16717 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16718 %{
16719   match(Set dst (MulReductionVD src1 src2));
16720   ins_cost(INSN_COST);
16721   effect(TEMP tmp, TEMP dst);
16722   format %{ "fmuld $dst, $src1, $src2\n\t"
16723             "ins   $tmp, D, $src2, 0, 1\n\t"
16724             "fmuld $dst, $dst, $tmp\t add reduction2d"
16725   %}
16726   ins_encode %{
16727     __ fmuld(as_FloatRegister($dst$$reg),
16728              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16729     __ ins(as_FloatRegister($tmp$$reg), __ D,
16730            as_FloatRegister($src2$$reg), 0, 1);
16731     __ fmuld(as_FloatRegister($dst$$reg),
16732              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16733   %}
16734   ins_pipe(pipe_class_default);
16735 %}
16736 
16737 // ====================VECTOR ARITHMETIC=======================================
16738 
16739 // --------------------------------- ADD --------------------------------------
16740 
16741 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16742 %{
16743   predicate(n->as_Vector()->length() == 4 ||
16744             n->as_Vector()->length() == 8);
16745   match(Set dst (AddVB src1 src2));
16746   ins_cost(INSN_COST);
16747   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16748   ins_encode %{
16749     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16750             as_FloatRegister($src1$$reg),
16751             as_FloatRegister($src2$$reg));
16752   %}
16753   ins_pipe(vdop64);
16754 %}
16755 
16756 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16757 %{
16758   predicate(n->as_Vector()->length() == 16);
16759   match(Set dst (AddVB src1 src2));
16760   ins_cost(INSN_COST);
16761   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16762   ins_encode %{
16763     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16764             as_FloatRegister($src1$$reg),
16765             as_FloatRegister($src2$$reg));
16766   %}
16767   ins_pipe(vdop128);
16768 %}
16769 
16770 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16771 %{
16772   predicate(n->as_Vector()->length() == 2 ||
16773             n->as_Vector()->length() == 4);
16774   match(Set dst (AddVS src1 src2));
16775   ins_cost(INSN_COST);
16776   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16777   ins_encode %{
16778     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16779             as_FloatRegister($src1$$reg),
16780             as_FloatRegister($src2$$reg));
16781   %}
16782   ins_pipe(vdop64);
16783 %}
16784 
16785 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16786 %{
16787   predicate(n->as_Vector()->length() == 8);
16788   match(Set dst (AddVS src1 src2));
16789   ins_cost(INSN_COST);
16790   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16791   ins_encode %{
16792     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16793             as_FloatRegister($src1$$reg),
16794             as_FloatRegister($src2$$reg));
16795   %}
16796   ins_pipe(vdop128);
16797 %}
16798 
16799 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16800 %{
16801   predicate(n->as_Vector()->length() == 2);
16802   match(Set dst (AddVI src1 src2));
16803   ins_cost(INSN_COST);
16804   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16805   ins_encode %{
16806     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16807             as_FloatRegister($src1$$reg),
16808             as_FloatRegister($src2$$reg));
16809   %}
16810   ins_pipe(vdop64);
16811 %}
16812 
16813 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16814 %{
16815   predicate(n->as_Vector()->length() == 4);
16816   match(Set dst (AddVI src1 src2));
16817   ins_cost(INSN_COST);
16818   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16819   ins_encode %{
16820     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16821             as_FloatRegister($src1$$reg),
16822             as_FloatRegister($src2$$reg));
16823   %}
16824   ins_pipe(vdop128);
16825 %}
16826 
16827 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16828 %{
16829   predicate(n->as_Vector()->length() == 2);
16830   match(Set dst (AddVL src1 src2));
16831   ins_cost(INSN_COST);
16832   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16833   ins_encode %{
16834     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16835             as_FloatRegister($src1$$reg),
16836             as_FloatRegister($src2$$reg));
16837   %}
16838   ins_pipe(vdop128);
16839 %}
16840 
16841 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16842 %{
16843   predicate(n->as_Vector()->length() == 2);
16844   match(Set dst (AddVF src1 src2));
16845   ins_cost(INSN_COST);
16846   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16847   ins_encode %{
16848     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16849             as_FloatRegister($src1$$reg),
16850             as_FloatRegister($src2$$reg));
16851   %}
16852   ins_pipe(vdop_fp64);
16853 %}
16854 
16855 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16856 %{
16857   predicate(n->as_Vector()->length() == 4);
16858   match(Set dst (AddVF src1 src2));
16859   ins_cost(INSN_COST);
16860   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16861   ins_encode %{
16862     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16863             as_FloatRegister($src1$$reg),
16864             as_FloatRegister($src2$$reg));
16865   %}
16866   ins_pipe(vdop_fp128);
16867 %}
16868 
16869 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16870 %{
16871   match(Set dst (AddVD src1 src2));
16872   ins_cost(INSN_COST);
16873   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16874   ins_encode %{
16875     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16876             as_FloatRegister($src1$$reg),
16877             as_FloatRegister($src2$$reg));
16878   %}
16879   ins_pipe(vdop_fp128);
16880 %}
16881 
16882 // --------------------------------- SUB --------------------------------------
16883 
16884 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16885 %{
16886   predicate(n->as_Vector()->length() == 4 ||
16887             n->as_Vector()->length() == 8);
16888   match(Set dst (SubVB src1 src2));
16889   ins_cost(INSN_COST);
16890   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16891   ins_encode %{
16892     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16893             as_FloatRegister($src1$$reg),
16894             as_FloatRegister($src2$$reg));
16895   %}
16896   ins_pipe(vdop64);
16897 %}
16898 
16899 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16900 %{
16901   predicate(n->as_Vector()->length() == 16);
16902   match(Set dst (SubVB src1 src2));
16903   ins_cost(INSN_COST);
16904   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16905   ins_encode %{
16906     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16907             as_FloatRegister($src1$$reg),
16908             as_FloatRegister($src2$$reg));
16909   %}
16910   ins_pipe(vdop128);
16911 %}
16912 
16913 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16914 %{
16915   predicate(n->as_Vector()->length() == 2 ||
16916             n->as_Vector()->length() == 4);
16917   match(Set dst (SubVS src1 src2));
16918   ins_cost(INSN_COST);
16919   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16920   ins_encode %{
16921     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16922             as_FloatRegister($src1$$reg),
16923             as_FloatRegister($src2$$reg));
16924   %}
16925   ins_pipe(vdop64);
16926 %}
16927 
16928 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16929 %{
16930   predicate(n->as_Vector()->length() == 8);
16931   match(Set dst (SubVS src1 src2));
16932   ins_cost(INSN_COST);
16933   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16934   ins_encode %{
16935     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16936             as_FloatRegister($src1$$reg),
16937             as_FloatRegister($src2$$reg));
16938   %}
16939   ins_pipe(vdop128);
16940 %}
16941 
16942 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16943 %{
16944   predicate(n->as_Vector()->length() == 2);
16945   match(Set dst (SubVI src1 src2));
16946   ins_cost(INSN_COST);
16947   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16948   ins_encode %{
16949     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16950             as_FloatRegister($src1$$reg),
16951             as_FloatRegister($src2$$reg));
16952   %}
16953   ins_pipe(vdop64);
16954 %}
16955 
16956 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16957 %{
16958   predicate(n->as_Vector()->length() == 4);
16959   match(Set dst (SubVI src1 src2));
16960   ins_cost(INSN_COST);
16961   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16962   ins_encode %{
16963     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16964             as_FloatRegister($src1$$reg),
16965             as_FloatRegister($src2$$reg));
16966   %}
16967   ins_pipe(vdop128);
16968 %}
16969 
16970 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16971 %{
16972   predicate(n->as_Vector()->length() == 2);
16973   match(Set dst (SubVL src1 src2));
16974   ins_cost(INSN_COST);
16975   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16976   ins_encode %{
16977     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16978             as_FloatRegister($src1$$reg),
16979             as_FloatRegister($src2$$reg));
16980   %}
16981   ins_pipe(vdop128);
16982 %}
16983 
16984 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16985 %{
16986   predicate(n->as_Vector()->length() == 2);
16987   match(Set dst (SubVF src1 src2));
16988   ins_cost(INSN_COST);
16989   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16990   ins_encode %{
16991     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16992             as_FloatRegister($src1$$reg),
16993             as_FloatRegister($src2$$reg));
16994   %}
16995   ins_pipe(vdop_fp64);
16996 %}
16997 
16998 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16999 %{
17000   predicate(n->as_Vector()->length() == 4);
17001   match(Set dst (SubVF src1 src2));
17002   ins_cost(INSN_COST);
17003   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17004   ins_encode %{
17005     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17006             as_FloatRegister($src1$$reg),
17007             as_FloatRegister($src2$$reg));
17008   %}
17009   ins_pipe(vdop_fp128);
17010 %}
17011 
17012 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17013 %{
17014   predicate(n->as_Vector()->length() == 2);
17015   match(Set dst (SubVD src1 src2));
17016   ins_cost(INSN_COST);
17017   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17018   ins_encode %{
17019     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17020             as_FloatRegister($src1$$reg),
17021             as_FloatRegister($src2$$reg));
17022   %}
17023   ins_pipe(vdop_fp128);
17024 %}
17025 
17026 // --------------------------------- MUL --------------------------------------
17027 
17028 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17029 %{
17030   predicate(n->as_Vector()->length() == 2 ||
17031             n->as_Vector()->length() == 4);
17032   match(Set dst (MulVS src1 src2));
17033   ins_cost(INSN_COST);
17034   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17035   ins_encode %{
17036     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17037             as_FloatRegister($src1$$reg),
17038             as_FloatRegister($src2$$reg));
17039   %}
17040   ins_pipe(vmul64);
17041 %}
17042 
17043 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17044 %{
17045   predicate(n->as_Vector()->length() == 8);
17046   match(Set dst (MulVS src1 src2));
17047   ins_cost(INSN_COST);
17048   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17049   ins_encode %{
17050     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17051             as_FloatRegister($src1$$reg),
17052             as_FloatRegister($src2$$reg));
17053   %}
17054   ins_pipe(vmul128);
17055 %}
17056 
17057 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17058 %{
17059   predicate(n->as_Vector()->length() == 2);
17060   match(Set dst (MulVI src1 src2));
17061   ins_cost(INSN_COST);
17062   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17063   ins_encode %{
17064     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17065             as_FloatRegister($src1$$reg),
17066             as_FloatRegister($src2$$reg));
17067   %}
17068   ins_pipe(vmul64);
17069 %}
17070 
17071 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17072 %{
17073   predicate(n->as_Vector()->length() == 4);
17074   match(Set dst (MulVI src1 src2));
17075   ins_cost(INSN_COST);
17076   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17077   ins_encode %{
17078     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17079             as_FloatRegister($src1$$reg),
17080             as_FloatRegister($src2$$reg));
17081   %}
17082   ins_pipe(vmul128);
17083 %}
17084 
17085 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17086 %{
17087   predicate(n->as_Vector()->length() == 2);
17088   match(Set dst (MulVF src1 src2));
17089   ins_cost(INSN_COST);
17090   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17091   ins_encode %{
17092     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17093             as_FloatRegister($src1$$reg),
17094             as_FloatRegister($src2$$reg));
17095   %}
17096   ins_pipe(vmuldiv_fp64);
17097 %}
17098 
17099 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17100 %{
17101   predicate(n->as_Vector()->length() == 4);
17102   match(Set dst (MulVF src1 src2));
17103   ins_cost(INSN_COST);
17104   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17105   ins_encode %{
17106     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17107             as_FloatRegister($src1$$reg),
17108             as_FloatRegister($src2$$reg));
17109   %}
17110   ins_pipe(vmuldiv_fp128);
17111 %}
17112 
17113 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17114 %{
17115   predicate(n->as_Vector()->length() == 2);
17116   match(Set dst (MulVD src1 src2));
17117   ins_cost(INSN_COST);
17118   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17119   ins_encode %{
17120     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17121             as_FloatRegister($src1$$reg),
17122             as_FloatRegister($src2$$reg));
17123   %}
17124   ins_pipe(vmuldiv_fp128);
17125 %}
17126 
17127 // --------------------------------- MLA --------------------------------------
17128 
17129 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17130 %{
17131   predicate(n->as_Vector()->length() == 2 ||
17132             n->as_Vector()->length() == 4);
17133   match(Set dst (AddVS dst (MulVS src1 src2)));
17134   ins_cost(INSN_COST);
17135   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17136   ins_encode %{
17137     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17138             as_FloatRegister($src1$$reg),
17139             as_FloatRegister($src2$$reg));
17140   %}
17141   ins_pipe(vmla64);
17142 %}
17143 
17144 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17145 %{
17146   predicate(n->as_Vector()->length() == 8);
17147   match(Set dst (AddVS dst (MulVS src1 src2)));
17148   ins_cost(INSN_COST);
17149   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17150   ins_encode %{
17151     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17152             as_FloatRegister($src1$$reg),
17153             as_FloatRegister($src2$$reg));
17154   %}
17155   ins_pipe(vmla128);
17156 %}
17157 
17158 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17159 %{
17160   predicate(n->as_Vector()->length() == 2);
17161   match(Set dst (AddVI dst (MulVI src1 src2)));
17162   ins_cost(INSN_COST);
17163   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17164   ins_encode %{
17165     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17166             as_FloatRegister($src1$$reg),
17167             as_FloatRegister($src2$$reg));
17168   %}
17169   ins_pipe(vmla64);
17170 %}
17171 
17172 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17173 %{
17174   predicate(n->as_Vector()->length() == 4);
17175   match(Set dst (AddVI dst (MulVI src1 src2)));
17176   ins_cost(INSN_COST);
17177   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17178   ins_encode %{
17179     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17180             as_FloatRegister($src1$$reg),
17181             as_FloatRegister($src2$$reg));
17182   %}
17183   ins_pipe(vmla128);
17184 %}
17185 
17186 // dst + src1 * src2
17187 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17188   predicate(UseFMA && n->as_Vector()->length() == 2);
17189   match(Set dst (FmaVF  dst (Binary src1 src2)));
17190   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17191   ins_cost(INSN_COST);
17192   ins_encode %{
17193     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17194             as_FloatRegister($src1$$reg),
17195             as_FloatRegister($src2$$reg));
17196   %}
17197   ins_pipe(vmuldiv_fp64);
17198 %}
17199 
17200 // dst + src1 * src2
17201 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17202   predicate(UseFMA && n->as_Vector()->length() == 4);
17203   match(Set dst (FmaVF  dst (Binary src1 src2)));
17204   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17205   ins_cost(INSN_COST);
17206   ins_encode %{
17207     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17208             as_FloatRegister($src1$$reg),
17209             as_FloatRegister($src2$$reg));
17210   %}
17211   ins_pipe(vmuldiv_fp128);
17212 %}
17213 
17214 // dst + src1 * src2
17215 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17216   predicate(UseFMA && n->as_Vector()->length() == 2);
17217   match(Set dst (FmaVD  dst (Binary src1 src2)));
17218   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17219   ins_cost(INSN_COST);
17220   ins_encode %{
17221     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17222             as_FloatRegister($src1$$reg),
17223             as_FloatRegister($src2$$reg));
17224   %}
17225   ins_pipe(vmuldiv_fp128);
17226 %}
17227 
17228 // --------------------------------- MLS --------------------------------------
17229 
17230 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17231 %{
17232   predicate(n->as_Vector()->length() == 2 ||
17233             n->as_Vector()->length() == 4);
17234   match(Set dst (SubVS dst (MulVS src1 src2)));
17235   ins_cost(INSN_COST);
17236   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17237   ins_encode %{
17238     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17239             as_FloatRegister($src1$$reg),
17240             as_FloatRegister($src2$$reg));
17241   %}
17242   ins_pipe(vmla64);
17243 %}
17244 
17245 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17246 %{
17247   predicate(n->as_Vector()->length() == 8);
17248   match(Set dst (SubVS dst (MulVS src1 src2)));
17249   ins_cost(INSN_COST);
17250   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17251   ins_encode %{
17252     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17253             as_FloatRegister($src1$$reg),
17254             as_FloatRegister($src2$$reg));
17255   %}
17256   ins_pipe(vmla128);
17257 %}
17258 
17259 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17260 %{
17261   predicate(n->as_Vector()->length() == 2);
17262   match(Set dst (SubVI dst (MulVI src1 src2)));
17263   ins_cost(INSN_COST);
17264   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17265   ins_encode %{
17266     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17267             as_FloatRegister($src1$$reg),
17268             as_FloatRegister($src2$$reg));
17269   %}
17270   ins_pipe(vmla64);
17271 %}
17272 
17273 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17274 %{
17275   predicate(n->as_Vector()->length() == 4);
17276   match(Set dst (SubVI dst (MulVI src1 src2)));
17277   ins_cost(INSN_COST);
17278   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17279   ins_encode %{
17280     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17281             as_FloatRegister($src1$$reg),
17282             as_FloatRegister($src2$$reg));
17283   %}
17284   ins_pipe(vmla128);
17285 %}
17286 
17287 // dst - src1 * src2
17288 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17289   predicate(UseFMA && n->as_Vector()->length() == 2);
17290   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17291   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17292   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17293   ins_cost(INSN_COST);
17294   ins_encode %{
17295     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17296             as_FloatRegister($src1$$reg),
17297             as_FloatRegister($src2$$reg));
17298   %}
17299   ins_pipe(vmuldiv_fp64);
17300 %}
17301 
17302 // dst - src1 * src2
17303 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17304   predicate(UseFMA && n->as_Vector()->length() == 4);
17305   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17306   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17307   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17308   ins_cost(INSN_COST);
17309   ins_encode %{
17310     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17311             as_FloatRegister($src1$$reg),
17312             as_FloatRegister($src2$$reg));
17313   %}
17314   ins_pipe(vmuldiv_fp128);
17315 %}
17316 
17317 // dst - src1 * src2
17318 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17319   predicate(UseFMA && n->as_Vector()->length() == 2);
17320   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17321   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17322   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17323   ins_cost(INSN_COST);
17324   ins_encode %{
17325     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17326             as_FloatRegister($src1$$reg),
17327             as_FloatRegister($src2$$reg));
17328   %}
17329   ins_pipe(vmuldiv_fp128);
17330 %}
17331 
17332 // --------------------------------- DIV --------------------------------------
17333 
17334 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17335 %{
17336   predicate(n->as_Vector()->length() == 2);
17337   match(Set dst (DivVF src1 src2));
17338   ins_cost(INSN_COST);
17339   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17340   ins_encode %{
17341     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17342             as_FloatRegister($src1$$reg),
17343             as_FloatRegister($src2$$reg));
17344   %}
17345   ins_pipe(vmuldiv_fp64);
17346 %}
17347 
17348 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17349 %{
17350   predicate(n->as_Vector()->length() == 4);
17351   match(Set dst (DivVF src1 src2));
17352   ins_cost(INSN_COST);
17353   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17354   ins_encode %{
17355     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17356             as_FloatRegister($src1$$reg),
17357             as_FloatRegister($src2$$reg));
17358   %}
17359   ins_pipe(vmuldiv_fp128);
17360 %}
17361 
17362 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17363 %{
17364   predicate(n->as_Vector()->length() == 2);
17365   match(Set dst (DivVD src1 src2));
17366   ins_cost(INSN_COST);
17367   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17368   ins_encode %{
17369     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17370             as_FloatRegister($src1$$reg),
17371             as_FloatRegister($src2$$reg));
17372   %}
17373   ins_pipe(vmuldiv_fp128);
17374 %}
17375 
17376 // --------------------------------- SQRT -------------------------------------
17377 
17378 instruct vsqrt2D(vecX dst, vecX src)
17379 %{
17380   predicate(n->as_Vector()->length() == 2);
17381   match(Set dst (SqrtVD src));
17382   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17383   ins_encode %{
17384     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17385              as_FloatRegister($src$$reg));
17386   %}
17387   ins_pipe(vsqrt_fp128);
17388 %}
17389 
17390 // --------------------------------- ABS --------------------------------------
17391 
17392 instruct vabs2F(vecD dst, vecD src)
17393 %{
17394   predicate(n->as_Vector()->length() == 2);
17395   match(Set dst (AbsVF src));
17396   ins_cost(INSN_COST * 3);
17397   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17398   ins_encode %{
17399     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17400             as_FloatRegister($src$$reg));
17401   %}
17402   ins_pipe(vunop_fp64);
17403 %}
17404 
17405 instruct vabs4F(vecX dst, vecX src)
17406 %{
17407   predicate(n->as_Vector()->length() == 4);
17408   match(Set dst (AbsVF src));
17409   ins_cost(INSN_COST * 3);
17410   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17411   ins_encode %{
17412     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17413             as_FloatRegister($src$$reg));
17414   %}
17415   ins_pipe(vunop_fp128);
17416 %}
17417 
17418 instruct vabs2D(vecX dst, vecX src)
17419 %{
17420   predicate(n->as_Vector()->length() == 2);
17421   match(Set dst (AbsVD src));
17422   ins_cost(INSN_COST * 3);
17423   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17424   ins_encode %{
17425     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17426             as_FloatRegister($src$$reg));
17427   %}
17428   ins_pipe(vunop_fp128);
17429 %}
17430 
17431 // --------------------------------- NEG --------------------------------------
17432 
17433 instruct vneg2F(vecD dst, vecD src)
17434 %{
17435   predicate(n->as_Vector()->length() == 2);
17436   match(Set dst (NegVF src));
17437   ins_cost(INSN_COST * 3);
17438   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17439   ins_encode %{
17440     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17441             as_FloatRegister($src$$reg));
17442   %}
17443   ins_pipe(vunop_fp64);
17444 %}
17445 
17446 instruct vneg4F(vecX dst, vecX src)
17447 %{
17448   predicate(n->as_Vector()->length() == 4);
17449   match(Set dst (NegVF src));
17450   ins_cost(INSN_COST * 3);
17451   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17452   ins_encode %{
17453     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17454             as_FloatRegister($src$$reg));
17455   %}
17456   ins_pipe(vunop_fp128);
17457 %}
17458 
17459 instruct vneg2D(vecX dst, vecX src)
17460 %{
17461   predicate(n->as_Vector()->length() == 2);
17462   match(Set dst (NegVD src));
17463   ins_cost(INSN_COST * 3);
17464   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17465   ins_encode %{
17466     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17467             as_FloatRegister($src$$reg));
17468   %}
17469   ins_pipe(vunop_fp128);
17470 %}
17471 
17472 // --------------------------------- AND --------------------------------------
17473 
17474 instruct vand8B(vecD dst, vecD src1, vecD src2)
17475 %{
17476   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17477             n->as_Vector()->length_in_bytes() == 8);
17478   match(Set dst (AndV src1 src2));
17479   ins_cost(INSN_COST);
17480   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17481   ins_encode %{
17482     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17483             as_FloatRegister($src1$$reg),
17484             as_FloatRegister($src2$$reg));
17485   %}
17486   ins_pipe(vlogical64);
17487 %}
17488 
17489 instruct vand16B(vecX dst, vecX src1, vecX src2)
17490 %{
17491   predicate(n->as_Vector()->length_in_bytes() == 16);
17492   match(Set dst (AndV src1 src2));
17493   ins_cost(INSN_COST);
17494   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17495   ins_encode %{
17496     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17497             as_FloatRegister($src1$$reg),
17498             as_FloatRegister($src2$$reg));
17499   %}
17500   ins_pipe(vlogical128);
17501 %}
17502 
17503 // --------------------------------- OR ---------------------------------------
17504 
17505 instruct vor8B(vecD dst, vecD src1, vecD src2)
17506 %{
17507   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17508             n->as_Vector()->length_in_bytes() == 8);
17509   match(Set dst (OrV src1 src2));
17510   ins_cost(INSN_COST);
17511   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17512   ins_encode %{
17513     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17514             as_FloatRegister($src1$$reg),
17515             as_FloatRegister($src2$$reg));
17516   %}
17517   ins_pipe(vlogical64);
17518 %}
17519 
17520 instruct vor16B(vecX dst, vecX src1, vecX src2)
17521 %{
17522   predicate(n->as_Vector()->length_in_bytes() == 16);
17523   match(Set dst (OrV src1 src2));
17524   ins_cost(INSN_COST);
17525   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17526   ins_encode %{
17527     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17528             as_FloatRegister($src1$$reg),
17529             as_FloatRegister($src2$$reg));
17530   %}
17531   ins_pipe(vlogical128);
17532 %}
17533 
17534 // --------------------------------- XOR --------------------------------------
17535 
17536 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17537 %{
17538   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17539             n->as_Vector()->length_in_bytes() == 8);
17540   match(Set dst (XorV src1 src2));
17541   ins_cost(INSN_COST);
17542   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17543   ins_encode %{
17544     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17545             as_FloatRegister($src1$$reg),
17546             as_FloatRegister($src2$$reg));
17547   %}
17548   ins_pipe(vlogical64);
17549 %}
17550 
17551 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17552 %{
17553   predicate(n->as_Vector()->length_in_bytes() == 16);
17554   match(Set dst (XorV src1 src2));
17555   ins_cost(INSN_COST);
17556   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17557   ins_encode %{
17558     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17559             as_FloatRegister($src1$$reg),
17560             as_FloatRegister($src2$$reg));
17561   %}
17562   ins_pipe(vlogical128);
17563 %}
17564 
17565 // ------------------------------ Shift ---------------------------------------
17566 
17567 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17568   match(Set dst (LShiftCntV cnt));
17569   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17570   ins_encode %{
17571     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17572   %}
17573   ins_pipe(vdup_reg_reg128);
17574 %}
17575 
17576 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17577 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17578   match(Set dst (RShiftCntV cnt));
17579   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17580   ins_encode %{
17581     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17582     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17583   %}
17584   ins_pipe(vdup_reg_reg128);
17585 %}
17586 
17587 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17588   predicate(n->as_Vector()->length() == 4 ||
17589             n->as_Vector()->length() == 8);
17590   match(Set dst (LShiftVB src shift));
17591   match(Set dst (RShiftVB src shift));
17592   ins_cost(INSN_COST);
17593   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17594   ins_encode %{
17595     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17596             as_FloatRegister($src$$reg),
17597             as_FloatRegister($shift$$reg));
17598   %}
17599   ins_pipe(vshift64);
17600 %}
17601 
17602 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17603   predicate(n->as_Vector()->length() == 16);
17604   match(Set dst (LShiftVB src shift));
17605   match(Set dst (RShiftVB src shift));
17606   ins_cost(INSN_COST);
17607   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17608   ins_encode %{
17609     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17610             as_FloatRegister($src$$reg),
17611             as_FloatRegister($shift$$reg));
17612   %}
17613   ins_pipe(vshift128);
17614 %}
17615 
17616 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17617   predicate(n->as_Vector()->length() == 4 ||
17618             n->as_Vector()->length() == 8);
17619   match(Set dst (URShiftVB src shift));
17620   ins_cost(INSN_COST);
17621   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17622   ins_encode %{
17623     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17624             as_FloatRegister($src$$reg),
17625             as_FloatRegister($shift$$reg));
17626   %}
17627   ins_pipe(vshift64);
17628 %}
17629 
17630 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17631   predicate(n->as_Vector()->length() == 16);
17632   match(Set dst (URShiftVB src shift));
17633   ins_cost(INSN_COST);
17634   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17635   ins_encode %{
17636     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17637             as_FloatRegister($src$$reg),
17638             as_FloatRegister($shift$$reg));
17639   %}
17640   ins_pipe(vshift128);
17641 %}
17642 
17643 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17644   predicate(n->as_Vector()->length() == 4 ||
17645             n->as_Vector()->length() == 8);
17646   match(Set dst (LShiftVB src shift));
17647   ins_cost(INSN_COST);
17648   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17649   ins_encode %{
17650     int sh = (int)$shift$$constant;
17651     if (sh >= 8) {
17652       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17653              as_FloatRegister($src$$reg),
17654              as_FloatRegister($src$$reg));
17655     } else {
17656       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17657              as_FloatRegister($src$$reg), sh);
17658     }
17659   %}
17660   ins_pipe(vshift64_imm);
17661 %}
17662 
17663 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17664   predicate(n->as_Vector()->length() == 16);
17665   match(Set dst (LShiftVB src shift));
17666   ins_cost(INSN_COST);
17667   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17668   ins_encode %{
17669     int sh = (int)$shift$$constant;
17670     if (sh >= 8) {
17671       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17672              as_FloatRegister($src$$reg),
17673              as_FloatRegister($src$$reg));
17674     } else {
17675       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17676              as_FloatRegister($src$$reg), sh);
17677     }
17678   %}
17679   ins_pipe(vshift128_imm);
17680 %}
17681 
17682 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17683   predicate(n->as_Vector()->length() == 4 ||
17684             n->as_Vector()->length() == 8);
17685   match(Set dst (RShiftVB src shift));
17686   ins_cost(INSN_COST);
17687   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17688   ins_encode %{
17689     int sh = (int)$shift$$constant;
17690     if (sh >= 8) sh = 7;
17691     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17692            as_FloatRegister($src$$reg), sh);
17693   %}
17694   ins_pipe(vshift64_imm);
17695 %}
17696 
17697 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17698   predicate(n->as_Vector()->length() == 16);
17699   match(Set dst (RShiftVB src shift));
17700   ins_cost(INSN_COST);
17701   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17702   ins_encode %{
17703     int sh = (int)$shift$$constant;
17704     if (sh >= 8) sh = 7;
17705     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17706            as_FloatRegister($src$$reg), sh);
17707   %}
17708   ins_pipe(vshift128_imm);
17709 %}
17710 
17711 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17712   predicate(n->as_Vector()->length() == 4 ||
17713             n->as_Vector()->length() == 8);
17714   match(Set dst (URShiftVB src shift));
17715   ins_cost(INSN_COST);
17716   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17717   ins_encode %{
17718     int sh = (int)$shift$$constant;
17719     if (sh >= 8) {
17720       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17721              as_FloatRegister($src$$reg),
17722              as_FloatRegister($src$$reg));
17723     } else {
17724       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17725              as_FloatRegister($src$$reg), sh);
17726     }
17727   %}
17728   ins_pipe(vshift64_imm);
17729 %}
17730 
17731 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17732   predicate(n->as_Vector()->length() == 16);
17733   match(Set dst (URShiftVB src shift));
17734   ins_cost(INSN_COST);
17735   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17736   ins_encode %{
17737     int sh = (int)$shift$$constant;
17738     if (sh >= 8) {
17739       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17740              as_FloatRegister($src$$reg),
17741              as_FloatRegister($src$$reg));
17742     } else {
17743       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17744              as_FloatRegister($src$$reg), sh);
17745     }
17746   %}
17747   ins_pipe(vshift128_imm);
17748 %}
17749 
17750 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17751   predicate(n->as_Vector()->length() == 2 ||
17752             n->as_Vector()->length() == 4);
17753   match(Set dst (LShiftVS src shift));
17754   match(Set dst (RShiftVS src shift));
17755   ins_cost(INSN_COST);
17756   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17757   ins_encode %{
17758     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17759             as_FloatRegister($src$$reg),
17760             as_FloatRegister($shift$$reg));
17761   %}
17762   ins_pipe(vshift64);
17763 %}
17764 
17765 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17766   predicate(n->as_Vector()->length() == 8);
17767   match(Set dst (LShiftVS src shift));
17768   match(Set dst (RShiftVS src shift));
17769   ins_cost(INSN_COST);
17770   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17771   ins_encode %{
17772     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17773             as_FloatRegister($src$$reg),
17774             as_FloatRegister($shift$$reg));
17775   %}
17776   ins_pipe(vshift128);
17777 %}
17778 
17779 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17780   predicate(n->as_Vector()->length() == 2 ||
17781             n->as_Vector()->length() == 4);
17782   match(Set dst (URShiftVS src shift));
17783   ins_cost(INSN_COST);
17784   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17785   ins_encode %{
17786     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17787             as_FloatRegister($src$$reg),
17788             as_FloatRegister($shift$$reg));
17789   %}
17790   ins_pipe(vshift64);
17791 %}
17792 
17793 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17794   predicate(n->as_Vector()->length() == 8);
17795   match(Set dst (URShiftVS src shift));
17796   ins_cost(INSN_COST);
17797   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17798   ins_encode %{
17799     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17800             as_FloatRegister($src$$reg),
17801             as_FloatRegister($shift$$reg));
17802   %}
17803   ins_pipe(vshift128);
17804 %}
17805 
17806 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17807   predicate(n->as_Vector()->length() == 2 ||
17808             n->as_Vector()->length() == 4);
17809   match(Set dst (LShiftVS src shift));
17810   ins_cost(INSN_COST);
17811   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17812   ins_encode %{
17813     int sh = (int)$shift$$constant;
17814     if (sh >= 16) {
17815       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17816              as_FloatRegister($src$$reg),
17817              as_FloatRegister($src$$reg));
17818     } else {
17819       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17820              as_FloatRegister($src$$reg), sh);
17821     }
17822   %}
17823   ins_pipe(vshift64_imm);
17824 %}
17825 
17826 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17827   predicate(n->as_Vector()->length() == 8);
17828   match(Set dst (LShiftVS src shift));
17829   ins_cost(INSN_COST);
17830   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17831   ins_encode %{
17832     int sh = (int)$shift$$constant;
17833     if (sh >= 16) {
17834       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17835              as_FloatRegister($src$$reg),
17836              as_FloatRegister($src$$reg));
17837     } else {
17838       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17839              as_FloatRegister($src$$reg), sh);
17840     }
17841   %}
17842   ins_pipe(vshift128_imm);
17843 %}
17844 
17845 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17846   predicate(n->as_Vector()->length() == 2 ||
17847             n->as_Vector()->length() == 4);
17848   match(Set dst (RShiftVS src shift));
17849   ins_cost(INSN_COST);
17850   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17851   ins_encode %{
17852     int sh = (int)$shift$$constant;
17853     if (sh >= 16) sh = 15;
17854     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17855            as_FloatRegister($src$$reg), sh);
17856   %}
17857   ins_pipe(vshift64_imm);
17858 %}
17859 
17860 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17861   predicate(n->as_Vector()->length() == 8);
17862   match(Set dst (RShiftVS src shift));
17863   ins_cost(INSN_COST);
17864   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17865   ins_encode %{
17866     int sh = (int)$shift$$constant;
17867     if (sh >= 16) sh = 15;
17868     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17869            as_FloatRegister($src$$reg), sh);
17870   %}
17871   ins_pipe(vshift128_imm);
17872 %}
17873 
17874 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17875   predicate(n->as_Vector()->length() == 2 ||
17876             n->as_Vector()->length() == 4);
17877   match(Set dst (URShiftVS src shift));
17878   ins_cost(INSN_COST);
17879   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17880   ins_encode %{
17881     int sh = (int)$shift$$constant;
17882     if (sh >= 16) {
17883       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17884              as_FloatRegister($src$$reg),
17885              as_FloatRegister($src$$reg));
17886     } else {
17887       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17888              as_FloatRegister($src$$reg), sh);
17889     }
17890   %}
17891   ins_pipe(vshift64_imm);
17892 %}
17893 
17894 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17895   predicate(n->as_Vector()->length() == 8);
17896   match(Set dst (URShiftVS src shift));
17897   ins_cost(INSN_COST);
17898   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17899   ins_encode %{
17900     int sh = (int)$shift$$constant;
17901     if (sh >= 16) {
17902       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17903              as_FloatRegister($src$$reg),
17904              as_FloatRegister($src$$reg));
17905     } else {
17906       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17907              as_FloatRegister($src$$reg), sh);
17908     }
17909   %}
17910   ins_pipe(vshift128_imm);
17911 %}
17912 
17913 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17914   predicate(n->as_Vector()->length() == 2);
17915   match(Set dst (LShiftVI src shift));
17916   match(Set dst (RShiftVI src shift));
17917   ins_cost(INSN_COST);
17918   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17919   ins_encode %{
17920     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17921             as_FloatRegister($src$$reg),
17922             as_FloatRegister($shift$$reg));
17923   %}
17924   ins_pipe(vshift64);
17925 %}
17926 
17927 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17928   predicate(n->as_Vector()->length() == 4);
17929   match(Set dst (LShiftVI src shift));
17930   match(Set dst (RShiftVI src shift));
17931   ins_cost(INSN_COST);
17932   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17933   ins_encode %{
17934     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17935             as_FloatRegister($src$$reg),
17936             as_FloatRegister($shift$$reg));
17937   %}
17938   ins_pipe(vshift128);
17939 %}
17940 
17941 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17942   predicate(n->as_Vector()->length() == 2);
17943   match(Set dst (URShiftVI src shift));
17944   ins_cost(INSN_COST);
17945   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17946   ins_encode %{
17947     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17948             as_FloatRegister($src$$reg),
17949             as_FloatRegister($shift$$reg));
17950   %}
17951   ins_pipe(vshift64);
17952 %}
17953 
17954 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17955   predicate(n->as_Vector()->length() == 4);
17956   match(Set dst (URShiftVI src shift));
17957   ins_cost(INSN_COST);
17958   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17959   ins_encode %{
17960     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17961             as_FloatRegister($src$$reg),
17962             as_FloatRegister($shift$$reg));
17963   %}
17964   ins_pipe(vshift128);
17965 %}
17966 
17967 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17968   predicate(n->as_Vector()->length() == 2);
17969   match(Set dst (LShiftVI src shift));
17970   ins_cost(INSN_COST);
17971   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17972   ins_encode %{
17973     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17974            as_FloatRegister($src$$reg),
17975            (int)$shift$$constant);
17976   %}
17977   ins_pipe(vshift64_imm);
17978 %}
17979 
17980 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17981   predicate(n->as_Vector()->length() == 4);
17982   match(Set dst (LShiftVI src shift));
17983   ins_cost(INSN_COST);
17984   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17985   ins_encode %{
17986     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17987            as_FloatRegister($src$$reg),
17988            (int)$shift$$constant);
17989   %}
17990   ins_pipe(vshift128_imm);
17991 %}
17992 
17993 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17994   predicate(n->as_Vector()->length() == 2);
17995   match(Set dst (RShiftVI src shift));
17996   ins_cost(INSN_COST);
17997   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17998   ins_encode %{
17999     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18000             as_FloatRegister($src$$reg),
18001             (int)$shift$$constant);
18002   %}
18003   ins_pipe(vshift64_imm);
18004 %}
18005 
18006 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18007   predicate(n->as_Vector()->length() == 4);
18008   match(Set dst (RShiftVI src shift));
18009   ins_cost(INSN_COST);
18010   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18011   ins_encode %{
18012     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18013             as_FloatRegister($src$$reg),
18014             (int)$shift$$constant);
18015   %}
18016   ins_pipe(vshift128_imm);
18017 %}
18018 
18019 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18020   predicate(n->as_Vector()->length() == 2);
18021   match(Set dst (URShiftVI src shift));
18022   ins_cost(INSN_COST);
18023   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18024   ins_encode %{
18025     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18026             as_FloatRegister($src$$reg),
18027             (int)$shift$$constant);
18028   %}
18029   ins_pipe(vshift64_imm);
18030 %}
18031 
18032 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18033   predicate(n->as_Vector()->length() == 4);
18034   match(Set dst (URShiftVI src shift));
18035   ins_cost(INSN_COST);
18036   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18037   ins_encode %{
18038     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18039             as_FloatRegister($src$$reg),
18040             (int)$shift$$constant);
18041   %}
18042   ins_pipe(vshift128_imm);
18043 %}
18044 
18045 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18046   predicate(n->as_Vector()->length() == 2);
18047   match(Set dst (LShiftVL src shift));
18048   match(Set dst (RShiftVL src shift));
18049   ins_cost(INSN_COST);
18050   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18051   ins_encode %{
18052     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18053             as_FloatRegister($src$$reg),
18054             as_FloatRegister($shift$$reg));
18055   %}
18056   ins_pipe(vshift128);
18057 %}
18058 
18059 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18060   predicate(n->as_Vector()->length() == 2);
18061   match(Set dst (URShiftVL src shift));
18062   ins_cost(INSN_COST);
18063   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18064   ins_encode %{
18065     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18066             as_FloatRegister($src$$reg),
18067             as_FloatRegister($shift$$reg));
18068   %}
18069   ins_pipe(vshift128);
18070 %}
18071 
18072 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18073   predicate(n->as_Vector()->length() == 2);
18074   match(Set dst (LShiftVL src shift));
18075   ins_cost(INSN_COST);
18076   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18077   ins_encode %{
18078     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18079            as_FloatRegister($src$$reg),
18080            (int)$shift$$constant);
18081   %}
18082   ins_pipe(vshift128_imm);
18083 %}
18084 
18085 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18086   predicate(n->as_Vector()->length() == 2);
18087   match(Set dst (RShiftVL src shift));
18088   ins_cost(INSN_COST);
18089   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18090   ins_encode %{
18091     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18092             as_FloatRegister($src$$reg),
18093             (int)$shift$$constant);
18094   %}
18095   ins_pipe(vshift128_imm);
18096 %}
18097 
18098 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18099   predicate(n->as_Vector()->length() == 2);
18100   match(Set dst (URShiftVL src shift));
18101   ins_cost(INSN_COST);
18102   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18103   ins_encode %{
18104     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18105             as_FloatRegister($src$$reg),
18106             (int)$shift$$constant);
18107   %}
18108   ins_pipe(vshift128_imm);
18109 %}
18110 
18111 //----------PEEPHOLE RULES-----------------------------------------------------
18112 // These must follow all instruction definitions as they use the names
18113 // defined in the instructions definitions.
18114 //
18115 // peepmatch ( root_instr_name [preceding_instruction]* );
18116 //
18117 // peepconstraint %{
18118 // (instruction_number.operand_name relational_op instruction_number.operand_name
18119 //  [, ...] );
18120 // // instruction numbers are zero-based using left to right order in peepmatch
18121 //
18122 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18123 // // provide an instruction_number.operand_name for each operand that appears
18124 // // in the replacement instruction's match rule
18125 //
18126 // ---------VM FLAGS---------------------------------------------------------
18127 //
18128 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18129 //
18130 // Each peephole rule is given an identifying number starting with zero and
18131 // increasing by one in the order seen by the parser.  An individual peephole
18132 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18133 // on the command-line.
18134 //
18135 // ---------CURRENT LIMITATIONS----------------------------------------------
18136 //
18137 // Only match adjacent instructions in same basic block
18138 // Only equality constraints
18139 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18140 // Only one replacement instruction
18141 //
18142 // ---------EXAMPLE----------------------------------------------------------
18143 //
18144 // // pertinent parts of existing instructions in architecture description
18145 // instruct movI(iRegINoSp dst, iRegI src)
18146 // %{
18147 //   match(Set dst (CopyI src));
18148 // %}
18149 //
18150 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18151 // %{
18152 //   match(Set dst (AddI dst src));
18153 //   effect(KILL cr);
18154 // %}
18155 //
18156 // // Change (inc mov) to lea
18157 // peephole %{
18158 //   // increment preceeded by register-register move
18159 //   peepmatch ( incI_iReg movI );
18160 //   // require that the destination register of the increment
18161 //   // match the destination register of the move
18162 //   peepconstraint ( 0.dst == 1.dst );
18163 //   // construct a replacement instruction that sets
18164 //   // the destination to ( move's source register + one )
18165 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18166 // %}
18167 //
18168 
18169 // Implementation no longer uses movX instructions since
18170 // machine-independent system no longer uses CopyX nodes.
18171 //
18172 // peephole
18173 // %{
18174 //   peepmatch (incI_iReg movI);
18175 //   peepconstraint (0.dst == 1.dst);
18176 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18177 // %}
18178 
18179 // peephole
18180 // %{
18181 //   peepmatch (decI_iReg movI);
18182 //   peepconstraint (0.dst == 1.dst);
18183 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18184 // %}
18185 
18186 // peephole
18187 // %{
18188 //   peepmatch (addI_iReg_imm movI);
18189 //   peepconstraint (0.dst == 1.dst);
18190 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18191 // %}
18192 
18193 // peephole
18194 // %{
18195 //   peepmatch (incL_iReg movL);
18196 //   peepconstraint (0.dst == 1.dst);
18197 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18198 // %}
18199 
18200 // peephole
18201 // %{
18202 //   peepmatch (decL_iReg movL);
18203 //   peepconstraint (0.dst == 1.dst);
18204 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18205 // %}
18206 
18207 // peephole
18208 // %{
18209 //   peepmatch (addL_iReg_imm movL);
18210 //   peepconstraint (0.dst == 1.dst);
18211 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18212 // %}
18213 
18214 // peephole
18215 // %{
18216 //   peepmatch (addP_iReg_imm movP);
18217 //   peepconstraint (0.dst == 1.dst);
18218 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18219 // %}
18220 
18221 // // Change load of spilled value to only a spill
18222 // instruct storeI(memory mem, iRegI src)
18223 // %{
18224 //   match(Set mem (StoreI mem src));
18225 // %}
18226 //
18227 // instruct loadI(iRegINoSp dst, memory mem)
18228 // %{
18229 //   match(Set dst (LoadI mem));
18230 // %}
18231 //
18232 
18233 //----------SMARTSPILL RULES---------------------------------------------------
18234 // These must follow all instruction definitions as they use the names
18235 // defined in the instructions definitions.
18236 
18237 // Local Variables:
18238 // mode: c++
18239 // End: