1 //
   2 // Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 #include "opto/addnode.hpp"
1000 
1001 class CallStubImpl {
1002 
1003   //--------------------------------------------------------------
1004   //---<  Used for optimization in Compile::shorten_branches  >---
1005   //--------------------------------------------------------------
1006 
1007  public:
1008   // Size of call trampoline stub.
1009   static uint size_call_trampoline() {
1010     return 0; // no call trampolines on this platform
1011   }
1012 
1013   // number of relocations needed by a call trampoline stub
1014   static uint reloc_call_trampoline() {
1015     return 0; // no call trampolines on this platform
1016   }
1017 };
1018 
1019 class HandlerImpl {
1020 
1021  public:
1022 
1023   static int emit_exception_handler(CodeBuffer &cbuf);
1024   static int emit_deopt_handler(CodeBuffer& cbuf);
1025 
1026   static uint size_exception_handler() {
1027     return MacroAssembler::far_branch_size();
1028   }
1029 
1030   static uint size_deopt_handler() {
1031     // count one adr and one far branch instruction
1032     return 4 * NativeInstruction::instruction_size;
1033   }
1034 };
1035 
1036   // graph traversal helpers
1037 
1038   MemBarNode *parent_membar(const Node *n);
1039   MemBarNode *child_membar(const MemBarNode *n);
1040   bool leading_membar(const MemBarNode *barrier);
1041 
1042   bool is_card_mark_membar(const MemBarNode *barrier);
1043   bool is_CAS(int opcode);
1044 
1045   MemBarNode *leading_to_normal(MemBarNode *leading);
1046   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1047   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1048   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1049   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1050 
1051   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1052 
1053   bool unnecessary_acquire(const Node *barrier);
1054   bool needs_acquiring_load(const Node *load);
1055 
1056   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1057 
1058   bool unnecessary_release(const Node *barrier);
1059   bool unnecessary_volatile(const Node *barrier);
1060   bool needs_releasing_store(const Node *store);
1061 
1062   // predicate controlling translation of CompareAndSwapX
1063   bool needs_acquiring_load_exclusive(const Node *load);
1064 
1065   // predicate controlling translation of StoreCM
1066   bool unnecessary_storestore(const Node *storecm);
1067 
1068   // predicate controlling addressing modes
1069   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1070 %}
1071 
1072 source %{
1073 
1074   // Optimizaton of volatile gets and puts
1075   // -------------------------------------
1076   //
1077   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1078   // use to implement volatile reads and writes. For a volatile read
1079   // we simply need
1080   //
1081   //   ldar<x>
1082   //
1083   // and for a volatile write we need
1084   //
1085   //   stlr<x>
1086   //
1087   // Alternatively, we can implement them by pairing a normal
1088   // load/store with a memory barrier. For a volatile read we need
1089   //
1090   //   ldr<x>
1091   //   dmb ishld
1092   //
1093   // for a volatile write
1094   //
1095   //   dmb ish
1096   //   str<x>
1097   //   dmb ish
1098   //
1099   // We can also use ldaxr and stlxr to implement compare and swap CAS
1100   // sequences. These are normally translated to an instruction
1101   // sequence like the following
1102   //
1103   //   dmb      ish
1104   // retry:
1105   //   ldxr<x>   rval raddr
1106   //   cmp       rval rold
1107   //   b.ne done
1108   //   stlxr<x>  rval, rnew, rold
1109   //   cbnz      rval retry
1110   // done:
1111   //   cset      r0, eq
1112   //   dmb ishld
1113   //
1114   // Note that the exclusive store is already using an stlxr
1115   // instruction. That is required to ensure visibility to other
1116   // threads of the exclusive write (assuming it succeeds) before that
1117   // of any subsequent writes.
1118   //
1119   // The following instruction sequence is an improvement on the above
1120   //
1121   // retry:
1122   //   ldaxr<x>  rval raddr
1123   //   cmp       rval rold
1124   //   b.ne done
1125   //   stlxr<x>  rval, rnew, rold
1126   //   cbnz      rval retry
1127   // done:
1128   //   cset      r0, eq
1129   //
1130   // We don't need the leading dmb ish since the stlxr guarantees
1131   // visibility of prior writes in the case that the swap is
1132   // successful. Crucially we don't have to worry about the case where
1133   // the swap is not successful since no valid program should be
1134   // relying on visibility of prior changes by the attempting thread
1135   // in the case where the CAS fails.
1136   //
1137   // Similarly, we don't need the trailing dmb ishld if we substitute
1138   // an ldaxr instruction since that will provide all the guarantees we
1139   // require regarding observation of changes made by other threads
1140   // before any change to the CAS address observed by the load.
1141   //
1142   // In order to generate the desired instruction sequence we need to
1143   // be able to identify specific 'signature' ideal graph node
1144   // sequences which i) occur as a translation of a volatile reads or
1145   // writes or CAS operations and ii) do not occur through any other
1146   // translation or graph transformation. We can then provide
1147   // alternative aldc matching rules which translate these node
1148   // sequences to the desired machine code sequences. Selection of the
1149   // alternative rules can be implemented by predicates which identify
1150   // the relevant node sequences.
1151   //
1152   // The ideal graph generator translates a volatile read to the node
1153   // sequence
1154   //
1155   //   LoadX[mo_acquire]
1156   //   MemBarAcquire
1157   //
1158   // As a special case when using the compressed oops optimization we
1159   // may also see this variant
1160   //
1161   //   LoadN[mo_acquire]
1162   //   DecodeN
1163   //   MemBarAcquire
1164   //
1165   // A volatile write is translated to the node sequence
1166   //
1167   //   MemBarRelease
1168   //   StoreX[mo_release] {CardMark}-optional
1169   //   MemBarVolatile
1170   //
1171   // n.b. the above node patterns are generated with a strict
1172   // 'signature' configuration of input and output dependencies (see
1173   // the predicates below for exact details). The card mark may be as
1174   // simple as a few extra nodes or, in a few GC configurations, may
1175   // include more complex control flow between the leading and
1176   // trailing memory barriers. However, whatever the card mark
1177   // configuration these signatures are unique to translated volatile
1178   // reads/stores -- they will not appear as a result of any other
1179   // bytecode translation or inlining nor as a consequence of
1180   // optimizing transforms.
1181   //
1182   // We also want to catch inlined unsafe volatile gets and puts and
1183   // be able to implement them using either ldar<x>/stlr<x> or some
1184   // combination of ldr<x>/stlr<x> and dmb instructions.
1185   //
1186   // Inlined unsafe volatiles puts manifest as a minor variant of the
1187   // normal volatile put node sequence containing an extra cpuorder
1188   // membar
1189   //
1190   //   MemBarRelease
1191   //   MemBarCPUOrder
1192   //   StoreX[mo_release] {CardMark}-optional
1193   //   MemBarVolatile
1194   //
1195   // n.b. as an aside, the cpuorder membar is not itself subject to
1196   // matching and translation by adlc rules.  However, the rule
1197   // predicates need to detect its presence in order to correctly
1198   // select the desired adlc rules.
1199   //
1200   // Inlined unsafe volatile gets manifest as a somewhat different
1201   // node sequence to a normal volatile get
1202   //
1203   //   MemBarCPUOrder
1204   //        ||       \\
1205   //   MemBarAcquire LoadX[mo_acquire]
1206   //        ||
1207   //   MemBarCPUOrder
1208   //
1209   // In this case the acquire membar does not directly depend on the
1210   // load. However, we can be sure that the load is generated from an
1211   // inlined unsafe volatile get if we see it dependent on this unique
1212   // sequence of membar nodes. Similarly, given an acquire membar we
1213   // can know that it was added because of an inlined unsafe volatile
1214   // get if it is fed and feeds a cpuorder membar and if its feed
1215   // membar also feeds an acquiring load.
1216   //
1217   // Finally an inlined (Unsafe) CAS operation is translated to the
1218   // following ideal graph
1219   //
1220   //   MemBarRelease
1221   //   MemBarCPUOrder
1222   //   CompareAndSwapX {CardMark}-optional
1223   //   MemBarCPUOrder
1224   //   MemBarAcquire
1225   //
1226   // So, where we can identify these volatile read and write
1227   // signatures we can choose to plant either of the above two code
1228   // sequences. For a volatile read we can simply plant a normal
1229   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1230   // also choose to inhibit translation of the MemBarAcquire and
1231   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1232   //
1233   // When we recognise a volatile store signature we can choose to
1234   // plant at a dmb ish as a translation for the MemBarRelease, a
1235   // normal str<x> and then a dmb ish for the MemBarVolatile.
1236   // Alternatively, we can inhibit translation of the MemBarRelease
1237   // and MemBarVolatile and instead plant a simple stlr<x>
1238   // instruction.
1239   //
1240   // when we recognise a CAS signature we can choose to plant a dmb
1241   // ish as a translation for the MemBarRelease, the conventional
1242   // macro-instruction sequence for the CompareAndSwap node (which
1243   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1244   // Alternatively, we can elide generation of the dmb instructions
1245   // and plant the alternative CompareAndSwap macro-instruction
1246   // sequence (which uses ldaxr<x>).
1247   //
1248   // Of course, the above only applies when we see these signature
1249   // configurations. We still want to plant dmb instructions in any
1250   // other cases where we may see a MemBarAcquire, MemBarRelease or
1251   // MemBarVolatile. For example, at the end of a constructor which
1252   // writes final/volatile fields we will see a MemBarRelease
1253   // instruction and this needs a 'dmb ish' lest we risk the
1254   // constructed object being visible without making the
1255   // final/volatile field writes visible.
1256   //
1257   // n.b. the translation rules below which rely on detection of the
1258   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1259   // If we see anything other than the signature configurations we
1260   // always just translate the loads and stores to ldr<x> and str<x>
1261   // and translate acquire, release and volatile membars to the
1262   // relevant dmb instructions.
1263   //
1264 
1265   // graph traversal helpers used for volatile put/get and CAS
1266   // optimization
1267 
1268   // 1) general purpose helpers
1269 
1270   // if node n is linked to a parent MemBarNode by an intervening
1271   // Control and Memory ProjNode return the MemBarNode otherwise return
1272   // NULL.
1273   //
1274   // n may only be a Load or a MemBar.
1275 
1276   MemBarNode *parent_membar(const Node *n)
1277   {
1278     Node *ctl = NULL;
1279     Node *mem = NULL;
1280     Node *membar = NULL;
1281 
1282     if (n->is_Load()) {
1283       ctl = n->lookup(LoadNode::Control);
1284       mem = n->lookup(LoadNode::Memory);
1285     } else if (n->is_MemBar()) {
1286       ctl = n->lookup(TypeFunc::Control);
1287       mem = n->lookup(TypeFunc::Memory);
1288     } else {
1289         return NULL;
1290     }
1291 
1292     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1293       return NULL;
1294     }
1295 
1296     membar = ctl->lookup(0);
1297 
1298     if (!membar || !membar->is_MemBar()) {
1299       return NULL;
1300     }
1301 
1302     if (mem->lookup(0) != membar) {
1303       return NULL;
1304     }
1305 
1306     return membar->as_MemBar();
1307   }
1308 
1309   // if n is linked to a child MemBarNode by intervening Control and
1310   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1311 
1312   MemBarNode *child_membar(const MemBarNode *n)
1313   {
1314     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1315     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1316 
1317     // MemBar needs to have both a Ctl and Mem projection
1318     if (! ctl || ! mem)
1319       return NULL;
1320 
1321     MemBarNode *child = NULL;
1322     Node *x;
1323 
1324     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1325       x = ctl->fast_out(i);
1326       // if we see a membar we keep hold of it. we may also see a new
1327       // arena copy of the original but it will appear later
1328       if (x->is_MemBar()) {
1329           child = x->as_MemBar();
1330           break;
1331       }
1332     }
1333 
1334     if (child == NULL) {
1335       return NULL;
1336     }
1337 
1338     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1339       x = mem->fast_out(i);
1340       // if we see a membar we keep hold of it. we may also see a new
1341       // arena copy of the original but it will appear later
1342       if (x == child) {
1343         return child;
1344       }
1345     }
1346     return NULL;
1347   }
1348 
1349   // helper predicate use to filter candidates for a leading memory
1350   // barrier
1351   //
1352   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1353   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1354 
1355   bool leading_membar(const MemBarNode *barrier)
1356   {
1357     int opcode = barrier->Opcode();
1358     // if this is a release membar we are ok
1359     if (opcode == Op_MemBarRelease) {
1360       return true;
1361     }
1362     // if its a cpuorder membar . . .
1363     if (opcode != Op_MemBarCPUOrder) {
1364       return false;
1365     }
1366     // then the parent has to be a release membar
1367     MemBarNode *parent = parent_membar(barrier);
1368     if (!parent) {
1369       return false;
1370     }
1371     opcode = parent->Opcode();
1372     return opcode == Op_MemBarRelease;
1373   }
1374 
1375   // 2) card mark detection helper
1376 
1377   // helper predicate which can be used to detect a volatile membar
1378   // introduced as part of a conditional card mark sequence either by
1379   // G1 or by CMS when UseCondCardMark is true.
1380   //
1381   // membar can be definitively determined to be part of a card mark
1382   // sequence if and only if all the following hold
1383   //
1384   // i) it is a MemBarVolatile
1385   //
1386   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1387   // true
1388   //
1389   // iii) the node's Mem projection feeds a StoreCM node.
1390 
1391   bool is_card_mark_membar(const MemBarNode *barrier)
1392   {
1393     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1394       return false;
1395     }
1396 
1397     if (barrier->Opcode() != Op_MemBarVolatile) {
1398       return false;
1399     }
1400 
1401     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1402 
1403     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1404       Node *y = mem->fast_out(i);
1405       if (y->Opcode() == Op_StoreCM) {
1406         return true;
1407       }
1408     }
1409 
1410     return false;
1411   }
1412 
1413 
1414   // 3) helper predicates to traverse volatile put or CAS graphs which
1415   // may contain GC barrier subgraphs
1416 
1417   // Preamble
1418   // --------
1419   //
1420   // for volatile writes we can omit generating barriers and employ a
1421   // releasing store when we see a node sequence sequence with a
1422   // leading MemBarRelease and a trailing MemBarVolatile as follows
1423   //
1424   //   MemBarRelease
1425   //  {      ||      } -- optional
1426   //  {MemBarCPUOrder}
1427   //         ||     \\
1428   //         ||     StoreX[mo_release]
1429   //         | \     /
1430   //         | MergeMem
1431   //         | /
1432   //   MemBarVolatile
1433   //
1434   // where
1435   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1436   //  | \ and / indicate further routing of the Ctl and Mem feeds
1437   //
1438   // this is the graph we see for non-object stores. however, for a
1439   // volatile Object store (StoreN/P) we may see other nodes below the
1440   // leading membar because of the need for a GC pre- or post-write
1441   // barrier.
1442   //
1443   // with most GC configurations we with see this simple variant which
1444   // includes a post-write barrier card mark.
1445   //
1446   //   MemBarRelease______________________________
1447   //         ||    \\               Ctl \        \\
1448   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1449   //         | \     /                       . . .  /
1450   //         | MergeMem
1451   //         | /
1452   //         ||      /
1453   //   MemBarVolatile
1454   //
1455   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1456   // the object address to an int used to compute the card offset) and
1457   // Ctl+Mem to a StoreB node (which does the actual card mark).
1458   //
1459   // n.b. a StoreCM node will only appear in this configuration when
1460   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1461   // because it implies a requirement to order visibility of the card
1462   // mark (StoreCM) relative to the object put (StoreP/N) using a
1463   // StoreStore memory barrier (arguably this ought to be represented
1464   // explicitly in the ideal graph but that is not how it works). This
1465   // ordering is required for both non-volatile and volatile
1466   // puts. Normally that means we need to translate a StoreCM using
1467   // the sequence
1468   //
1469   //   dmb ishst
1470   //   stlrb
1471   //
1472   // However, in the case of a volatile put if we can recognise this
1473   // configuration and plant an stlr for the object write then we can
1474   // omit the dmb and just plant an strb since visibility of the stlr
1475   // is ordered before visibility of subsequent stores. StoreCM nodes
1476   // also arise when using G1 or using CMS with conditional card
1477   // marking. In these cases (as we shall see) we don't need to insert
1478   // the dmb when translating StoreCM because there is already an
1479   // intervening StoreLoad barrier between it and the StoreP/N.
1480   //
1481   // It is also possible to perform the card mark conditionally on it
1482   // currently being unmarked in which case the volatile put graph
1483   // will look slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \     /                              \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that both the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //          |  \      . . .
1501   //          |  StoreX[mo_release]  . . .
1502   //          |   /
1503   //         MergeMem
1504   //          |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* subgraph. It can easily be
1508   // detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile.
1510   //
1511   // A simple variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic graph for a non-object CAS is
1513   //
1514   //   MemBarRelease
1515   //         ||
1516   //   MemBarCPUOrder
1517   //         ||     \\   . . .
1518   //         ||     CompareAndSwapX
1519   //         ||       |
1520   //         ||     SCMemProj
1521   //         | \     /
1522   //         | MergeMem
1523   //         | /
1524   //   MemBarCPUOrder
1525   //         ||
1526   //   MemBarAcquire
1527   //
1528   // The same basic variations on this arrangement (mutatis mutandis)
1529   // occur when a card mark is introduced. i.e. we se the same basic
1530   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1531   // tail of the graph is a pair comprising a MemBarCPUOrder +
1532   // MemBarAcquire.
1533   //
1534   // So, in the case of a CAS the normal graph has the variant form
1535   //
1536   //   MemBarRelease
1537   //   MemBarCPUOrder
1538   //          |   \      . . .
1539   //          |  CompareAndSwapX  . . .
1540   //          |    |
1541   //          |   SCMemProj
1542   //          |   /  . . .
1543   //         MergeMem
1544   //          |
1545   //   MemBarCPUOrder
1546   //   MemBarAcquire
1547   //
1548   // This graph can also easily be detected starting from any
1549   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1550   //
1551   // the code below uses two helper predicates, leading_to_normal and
1552   // normal_to_leading to identify these normal graphs, one validating
1553   // the layout starting from the top membar and searching down and
1554   // the other validating the layout starting from the lower membar
1555   // and searching up.
1556   //
1557   // There are two special case GC configurations when a normal graph
1558   // may not be generated: when using G1 (which always employs a
1559   // conditional card mark); and when using CMS with conditional card
1560   // marking configured. These GCs are both concurrent rather than
1561   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1562   // graph between the leading and trailing membar nodes, in
1563   // particular enforcing stronger memory serialisation beween the
1564   // object put and the corresponding conditional card mark. CMS
1565   // employs a post-write GC barrier while G1 employs both a pre- and
1566   // post-write GC barrier. Of course the extra nodes may be absent --
1567   // they are only inserted for object puts. This significantly
1568   // complicates the task of identifying whether a MemBarRelease,
1569   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1570   // when using these GC configurations (see below). It adds similar
1571   // complexity to the task of identifying whether a MemBarRelease,
1572   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1573   //
1574   // In both cases the post-write subtree includes an auxiliary
1575   // MemBarVolatile (StoreLoad barrier) separating the object put and
1576   // the read of the corresponding card. This poses two additional
1577   // problems.
1578   //
1579   // Firstly, a card mark MemBarVolatile needs to be distinguished
1580   // from a normal trailing MemBarVolatile. Resolving this first
1581   // problem is straightforward: a card mark MemBarVolatile always
1582   // projects a Mem feed to a StoreCM node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // The second problem is how the code generator is to translate the
1590   // card mark barrier? It always needs to be translated to a "dmb
1591   // ish" instruction whether or not it occurs as part of a volatile
1592   // put. A StoreLoad barrier is needed after the object put to ensure
1593   // i) visibility to GC threads of the object put and ii) visibility
1594   // to the mutator thread of any card clearing write by a GC
1595   // thread. Clearly a normal store (str) will not guarantee this
1596   // ordering but neither will a releasing store (stlr). The latter
1597   // guarantees that the object put is visible but does not guarantee
1598   // that writes by other threads have also been observed.
1599   //
1600   // So, returning to the task of translating the object put and the
1601   // leading/trailing membar nodes: what do the non-normal node graph
1602   // look like for these 2 special cases? and how can we determine the
1603   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1604   // in both normal and non-normal cases?
1605   //
1606   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1607   // which selects conditonal execution based on the value loaded
1608   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1609   // intervening StoreLoad barrier (MemBarVolatile).
1610   //
1611   // So, with CMS we may see a node graph for a volatile object store
1612   // which looks like this
1613   //
1614   //   MemBarRelease
1615   //   MemBarCPUOrder_(leading)__________________
1616   //     C |    M \       \\                   C \
1617   //       |       \    StoreN/P[mo_release]  CastP2X
1618   //       |    Bot \    /
1619   //       |       MergeMem
1620   //       |         /
1621   //      MemBarVolatile (card mark)
1622   //     C |  ||    M |
1623   //       | LoadB    |
1624   //       |   |      |
1625   //       | Cmp      |\
1626   //       | /        | \
1627   //       If         |  \
1628   //       | \        |   \
1629   // IfFalse  IfTrue  |    \
1630   //       \     / \  |     \
1631   //        \   / StoreCM    |
1632   //         \ /      |      |
1633   //        Region   . . .   |
1634   //          | \           /
1635   //          |  . . .  \  / Bot
1636   //          |       MergeMem
1637   //          |          |
1638   //        MemBarVolatile (trailing)
1639   //
1640   // The first MergeMem merges the AliasIdxBot Mem slice from the
1641   // leading membar and the oopptr Mem slice from the Store into the
1642   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1643   // Mem slice from the card mark membar and the AliasIdxRaw slice
1644   // from the StoreCM into the trailing membar (n.b. the latter
1645   // proceeds via a Phi associated with the If region).
1646   //
1647   // The graph for a CAS varies slightly, the obvious difference being
1648   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1649   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1650   // MemBarAcquire pair. The other important difference is that the
1651   // CompareAndSwap node's SCMemProj is not merged into the card mark
1652   // membar - it still feeds the trailing MergeMem. This also means
1653   // that the card mark membar receives its Mem feed directly from the
1654   // leading membar rather than via a MergeMem.
1655   //
1656   //   MemBarRelease
1657   //   MemBarCPUOrder__(leading)_________________________
1658   //       ||                       \\                 C \
1659   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1660   //     C |  ||    M |              |
1661   //       | LoadB    |       ______/|
1662   //       |   |      |      /       |
1663   //       | Cmp      |     /      SCMemProj
1664   //       | /        |    /         |
1665   //       If         |   /         /
1666   //       | \        |  /         /
1667   // IfFalse  IfTrue  | /         /
1668   //       \     / \  |/ prec    /
1669   //        \   / StoreCM       /
1670   //         \ /      |        /
1671   //        Region   . . .    /
1672   //          | \            /
1673   //          |  . . .  \   / Bot
1674   //          |       MergeMem
1675   //          |          |
1676   //        MemBarCPUOrder
1677   //        MemBarAcquire (trailing)
1678   //
1679   // This has a slightly different memory subgraph to the one seen
1680   // previously but the core of it is the same as for the CAS normal
1681   // sungraph
1682   //
1683   //   MemBarRelease
1684   //   MemBarCPUOrder____
1685   //      ||             \      . . .
1686   //   MemBarVolatile  CompareAndSwapX  . . .
1687   //      |  \            |
1688   //        . . .   SCMemProj
1689   //          |     /  . . .
1690   //         MergeMem
1691   //          |
1692   //   MemBarCPUOrder
1693   //   MemBarAcquire
1694   //
1695   //
1696   // G1 is quite a lot more complicated. The nodes inserted on behalf
1697   // of G1 may comprise: a pre-write graph which adds the old value to
1698   // the SATB queue; the releasing store itself; and, finally, a
1699   // post-write graph which performs a card mark.
1700   //
1701   // The pre-write graph may be omitted, but only when the put is
1702   // writing to a newly allocated (young gen) object and then only if
1703   // there is a direct memory chain to the Initialize node for the
1704   // object allocation. This will not happen for a volatile put since
1705   // any memory chain passes through the leading membar.
1706   //
1707   // The pre-write graph includes a series of 3 If tests. The outermost
1708   // If tests whether SATB is enabled (no else case). The next If tests
1709   // whether the old value is non-NULL (no else case). The third tests
1710   // whether the SATB queue index is > 0, if so updating the queue. The
1711   // else case for this third If calls out to the runtime to allocate a
1712   // new queue buffer.
1713   //
1714   // So with G1 the pre-write and releasing store subgraph looks like
1715   // this (the nested Ifs are omitted).
1716   //
1717   //  MemBarRelease (leading)____________
1718   //     C |  ||  M \   M \    M \  M \ . . .
1719   //       | LoadB   \  LoadL  LoadN   \
1720   //       | /        \                 \
1721   //       If         |\                 \
1722   //       | \        | \                 \
1723   //  IfFalse  IfTrue |  \                 \
1724   //       |     |    |   \                 |
1725   //       |     If   |   /\                |
1726   //       |     |          \               |
1727   //       |                 \              |
1728   //       |    . . .         \             |
1729   //       | /       | /       |            |
1730   //      Region  Phi[M]       |            |
1731   //       | \       |         |            |
1732   //       |  \_____ | ___     |            |
1733   //     C | C \     |   C \ M |            |
1734   //       | CastP2X | StoreN/P[mo_release] |
1735   //       |         |         |            |
1736   //     C |       M |       M |          M |
1737   //        \        |         |           /
1738   //                  . . .
1739   //          (post write subtree elided)
1740   //                    . . .
1741   //             C \         M /
1742   //         MemBarVolatile (trailing)
1743   //
1744   // n.b. the LoadB in this subgraph is not the card read -- it's a
1745   // read of the SATB queue active flag.
1746   //
1747   // Once again the CAS graph is a minor variant on the above with the
1748   // expected substitutions of CompareAndSawpX for StoreN/P and
1749   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1750   //
1751   // The G1 post-write subtree is also optional, this time when the
1752   // new value being written is either null or can be identified as a
1753   // newly allocated (young gen) object with no intervening control
1754   // flow. The latter cannot happen but the former may, in which case
1755   // the card mark membar is omitted and the memory feeds form the
1756   // leading membar and the SToreN/P are merged direct into the
1757   // trailing membar as per the normal subgraph. So, the only special
1758   // case which arises is when the post-write subgraph is generated.
1759   //
1760   // The kernel of the post-write G1 subgraph is the card mark itself
1761   // which includes a card mark memory barrier (MemBarVolatile), a
1762   // card test (LoadB), and a conditional update (If feeding a
1763   // StoreCM). These nodes are surrounded by a series of nested Ifs
1764   // which try to avoid doing the card mark. The top level If skips if
1765   // the object reference does not cross regions (i.e. it tests if
1766   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1767   // need not be recorded. The next If, which skips on a NULL value,
1768   // may be absent (it is not generated if the type of value is >=
1769   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1770   // checking if card_val != young).  n.b. although this test requires
1771   // a pre-read of the card it can safely be done before the StoreLoad
1772   // barrier. However that does not bypass the need to reread the card
1773   // after the barrier.
1774   //
1775   //                (pre-write subtree elided)
1776   //        . . .                  . . .    . . .  . . .
1777   //        C |                    M |     M |    M |
1778   //       Region                  Phi[M] StoreN    |
1779   //          |                     / \      |      |
1780   //         / \_______            /   \     |      |
1781   //      C / C \      . . .            \    |      |
1782   //       If   CastP2X . . .            |   |      |
1783   //       / \                           |   |      |
1784   //      /   \                          |   |      |
1785   // IfFalse IfTrue                      |   |      |
1786   //   |       |                         |   |     /|
1787   //   |       If                        |   |    / |
1788   //   |      / \                        |   |   /  |
1789   //   |     /   \                        \  |  /   |
1790   //   | IfFalse IfTrue                   MergeMem  |
1791   //   |  . . .    / \                       /      |
1792   //   |          /   \                     /       |
1793   //   |     IfFalse IfTrue                /        |
1794   //   |      . . .    |                  /         |
1795   //   |               If                /          |
1796   //   |               / \              /           |
1797   //   |              /   \            /            |
1798   //   |         IfFalse IfTrue       /             |
1799   //   |           . . .   |         /              |
1800   //   |                    \       /               |
1801   //   |                     \     /                |
1802   //   |             MemBarVolatile__(card mark)    |
1803   //   |                ||   C |  M \  M \          |
1804   //   |               LoadB   If    |    |         |
1805   //   |                      / \    |    |         |
1806   //   |                     . . .   |    |         |
1807   //   |                          \  |    |        /
1808   //   |                        StoreCM   |       /
1809   //   |                          . . .   |      /
1810   //   |                        _________/      /
1811   //   |                       /  _____________/
1812   //   |   . . .       . . .  |  /            /
1813   //   |    |                 | /   _________/
1814   //   |    |               Phi[M] /        /
1815   //   |    |                 |   /        /
1816   //   |    |                 |  /        /
1817   //   |  Region  . . .     Phi[M]  _____/
1818   //   |    /                 |    /
1819   //   |                      |   /
1820   //   | . . .   . . .        |  /
1821   //   | /                    | /
1822   // Region           |  |  Phi[M]
1823   //   |              |  |  / Bot
1824   //    \            MergeMem
1825   //     \            /
1826   //     MemBarVolatile
1827   //
1828   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1829   // from the leading membar and the oopptr Mem slice from the Store
1830   // into the card mark membar i.e. the memory flow to the card mark
1831   // membar still looks like a normal graph.
1832   //
1833   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1834   // Mem slices (from the StoreCM and other card mark queue stores).
1835   // However in this case the AliasIdxBot Mem slice does not come
1836   // direct from the card mark membar. It is merged through a series
1837   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1838   // from the leading membar with the Mem feed from the card mark
1839   // membar. Each Phi corresponds to one of the Ifs which may skip
1840   // around the card mark membar. So when the If implementing the NULL
1841   // value check has been elided the total number of Phis is 2
1842   // otherwise it is 3.
1843   //
1844   // The CAS graph when using G1GC also includes a pre-write subgraph
1845   // and an optional post-write subgraph. Teh sam evarioations are
1846   // introduced as for CMS with conditional card marking i.e. the
1847   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1848   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1849   // Mem feed from the CompareAndSwapP/N includes a precedence
1850   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1851   // trailing membar. So, as before the configuration includes the
1852   // normal CAS graph as a subgraph of the memory flow.
1853   //
1854   // So, the upshot is that in all cases the volatile put graph will
1855   // include a *normal* memory subgraph betwen the leading membar and
1856   // its child membar, either a volatile put graph (including a
1857   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1858   // When that child is not a card mark membar then it marks the end
1859   // of the volatile put or CAS subgraph. If the child is a card mark
1860   // membar then the normal subgraph will form part of a volatile put
1861   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1862   // to a trailing barrier via a MergeMem. That feed is either direct
1863   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1864   // memory flow (for G1).
1865   //
1866   // The predicates controlling generation of instructions for store
1867   // and barrier nodes employ a few simple helper functions (described
1868   // below) which identify the presence or absence of all these
1869   // subgraph configurations and provide a means of traversing from
1870   // one node in the subgraph to another.
1871 
1872   // is_CAS(int opcode)
1873   //
1874   // return true if opcode is one of the possible CompareAndSwapX
1875   // values otherwise false.
1876 
1877   bool is_CAS(int opcode)
1878   {
1879     switch(opcode) {
1880       // We handle these
1881     case Op_CompareAndSwapI:
1882     case Op_CompareAndSwapL:
1883     case Op_CompareAndSwapP:
1884     case Op_CompareAndSwapN:
1885  // case Op_CompareAndSwapB:
1886  // case Op_CompareAndSwapS:
1887       return true;
1888       // These are TBD
1889     case Op_WeakCompareAndSwapB:
1890     case Op_WeakCompareAndSwapS:
1891     case Op_WeakCompareAndSwapI:
1892     case Op_WeakCompareAndSwapL:
1893     case Op_WeakCompareAndSwapP:
1894     case Op_WeakCompareAndSwapN:
1895     case Op_CompareAndExchangeB:
1896     case Op_CompareAndExchangeS:
1897     case Op_CompareAndExchangeI:
1898     case Op_CompareAndExchangeL:
1899     case Op_CompareAndExchangeP:
1900     case Op_CompareAndExchangeN:
1901       return false;
1902     default:
1903       return false;
1904     }
1905   }
1906 
1907 
1908   // leading_to_normal
1909   //
1910   //graph traversal helper which detects the normal case Mem feed from
1911   // a release membar (or, optionally, its cpuorder child) to a
1912   // dependent volatile membar i.e. it ensures that one or other of
1913   // the following Mem flow subgraph is present.
1914   //
1915   //   MemBarRelease
1916   //   MemBarCPUOrder {leading}
1917   //          |  \      . . .
1918   //          |  StoreN/P[mo_release]  . . .
1919   //          |   /
1920   //         MergeMem
1921   //          |
1922   //   MemBarVolatile {trailing or card mark}
1923   //
1924   //   MemBarRelease
1925   //   MemBarCPUOrder {leading}
1926   //      |       \      . . .
1927   //      |     CompareAndSwapX  . . .
1928   //               |
1929   //     . . .    SCMemProj
1930   //           \   |
1931   //      |    MergeMem
1932   //      |       /
1933   //    MemBarCPUOrder
1934   //    MemBarAcquire {trailing}
1935   //
1936   // if the correct configuration is present returns the trailing
1937   // membar otherwise NULL.
1938   //
1939   // the input membar is expected to be either a cpuorder membar or a
1940   // release membar. in the latter case it should not have a cpu membar
1941   // child.
1942   //
1943   // the returned value may be a card mark or trailing membar
1944   //
1945 
1946   MemBarNode *leading_to_normal(MemBarNode *leading)
1947   {
1948     assert((leading->Opcode() == Op_MemBarRelease ||
1949             leading->Opcode() == Op_MemBarCPUOrder),
1950            "expecting a volatile or cpuroder membar!");
1951 
1952     // check the mem flow
1953     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1954 
1955     if (!mem) {
1956       return NULL;
1957     }
1958 
1959     Node *x = NULL;
1960     StoreNode * st = NULL;
1961     LoadStoreNode *cas = NULL;
1962     MergeMemNode *mm = NULL;
1963 
1964     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1965       x = mem->fast_out(i);
1966       if (x->is_MergeMem()) {
1967         if (mm != NULL) {
1968           return NULL;
1969         }
1970         // two merge mems is one too many
1971         mm = x->as_MergeMem();
1972       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1973         // two releasing stores/CAS nodes is one too many
1974         if (st != NULL || cas != NULL) {
1975           return NULL;
1976         }
1977         st = x->as_Store();
1978       } else if (is_CAS(x->Opcode())) {
1979         if (st != NULL || cas != NULL) {
1980           return NULL;
1981         }
1982         cas = x->as_LoadStore();
1983       }
1984     }
1985 
1986     // must have a store or a cas
1987     if (!st && !cas) {
1988       return NULL;
1989     }
1990 
1991     // must have a merge if we also have st
1992     if (st && !mm) {
1993       return NULL;
1994     }
1995 
1996     Node *y = NULL;
1997     if (cas) {
1998       // look for an SCMemProj
1999       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2000         x = cas->fast_out(i);
2001         if (x->is_Proj()) {
2002           y = x;
2003           break;
2004         }
2005       }
2006       if (y == NULL) {
2007         return NULL;
2008       }
2009       // the proj must feed a MergeMem
2010       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2011         x = y->fast_out(i);
2012         if (x->is_MergeMem()) {
2013           mm = x->as_MergeMem();
2014           break;
2015         }
2016       }
2017       if (mm == NULL)
2018         return NULL;
2019     } else {
2020       // ensure the store feeds the existing mergemem;
2021       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2022         if (st->fast_out(i) == mm) {
2023           y = st;
2024           break;
2025         }
2026       }
2027       if (y == NULL) {
2028         return NULL;
2029       }
2030     }
2031 
2032     MemBarNode *mbar = NULL;
2033     // ensure the merge feeds to the expected type of membar
2034     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2035       x = mm->fast_out(i);
2036       if (x->is_MemBar()) {
2037         int opcode = x->Opcode();
2038         if (opcode == Op_MemBarVolatile && st) {
2039           mbar = x->as_MemBar();
2040         } else if (cas && opcode == Op_MemBarCPUOrder) {
2041           MemBarNode *y =  x->as_MemBar();
2042           y = child_membar(y);
2043           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2044             mbar = y;
2045           }
2046         }
2047         break;
2048       }
2049     }
2050 
2051     return mbar;
2052   }
2053 
2054   // normal_to_leading
2055   //
2056   // graph traversal helper which detects the normal case Mem feed
2057   // from either a card mark or a trailing membar to a preceding
2058   // release membar (optionally its cpuorder child) i.e. it ensures
2059   // that one or other of the following Mem flow subgraphs is present.
2060   //
2061   //   MemBarRelease
2062   //   MemBarCPUOrder {leading}
2063   //          |  \      . . .
2064   //          |  StoreN/P[mo_release]  . . .
2065   //          |   /
2066   //         MergeMem
2067   //          |
2068   //   MemBarVolatile {card mark or trailing}
2069   //
2070   //   MemBarRelease
2071   //   MemBarCPUOrder {leading}
2072   //      |       \      . . .
2073   //      |     CompareAndSwapX  . . .
2074   //               |
2075   //     . . .    SCMemProj
2076   //           \   |
2077   //      |    MergeMem
2078   //      |        /
2079   //    MemBarCPUOrder
2080   //    MemBarAcquire {trailing}
2081   //
2082   // this predicate checks for the same flow as the previous predicate
2083   // but starting from the bottom rather than the top.
2084   //
2085   // if the configuration is present returns the cpuorder member for
2086   // preference or when absent the release membar otherwise NULL.
2087   //
2088   // n.b. the input membar is expected to be a MemBarVolatile but
2089   // need not be a card mark membar.
2090 
2091   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2092   {
2093     // input must be a volatile membar
2094     assert((barrier->Opcode() == Op_MemBarVolatile ||
2095             barrier->Opcode() == Op_MemBarAcquire),
2096            "expecting a volatile or an acquire membar");
2097     Node *x;
2098     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2099 
2100     // if we have an acquire membar then it must be fed via a CPUOrder
2101     // membar
2102 
2103     if (is_cas) {
2104       // skip to parent barrier which must be a cpuorder
2105       x = parent_membar(barrier);
2106       if (x->Opcode() != Op_MemBarCPUOrder)
2107         return NULL;
2108     } else {
2109       // start from the supplied barrier
2110       x = (Node *)barrier;
2111     }
2112 
2113     // the Mem feed to the membar should be a merge
2114     x = x ->in(TypeFunc::Memory);
2115     if (!x->is_MergeMem())
2116       return NULL;
2117 
2118     MergeMemNode *mm = x->as_MergeMem();
2119 
2120     if (is_cas) {
2121       // the merge should be fed from the CAS via an SCMemProj node
2122       x = NULL;
2123       for (uint idx = 1; idx < mm->req(); idx++) {
2124         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2125           x = mm->in(idx);
2126           break;
2127         }
2128       }
2129       if (x == NULL) {
2130         return NULL;
2131       }
2132       // check for a CAS feeding this proj
2133       x = x->in(0);
2134       int opcode = x->Opcode();
2135       if (!is_CAS(opcode)) {
2136         return NULL;
2137       }
2138       // the CAS should get its mem feed from the leading membar
2139       x = x->in(MemNode::Memory);
2140     } else {
2141       // the merge should get its Bottom mem feed from the leading membar
2142       x = mm->in(Compile::AliasIdxBot);
2143     }
2144 
2145     // ensure this is a non control projection
2146     if (!x->is_Proj() || x->is_CFG()) {
2147       return NULL;
2148     }
2149     // if it is fed by a membar that's the one we want
2150     x = x->in(0);
2151 
2152     if (!x->is_MemBar()) {
2153       return NULL;
2154     }
2155 
2156     MemBarNode *leading = x->as_MemBar();
2157     // reject invalid candidates
2158     if (!leading_membar(leading)) {
2159       return NULL;
2160     }
2161 
2162     // ok, we have a leading membar, now for the sanity clauses
2163 
2164     // the leading membar must feed Mem to a releasing store or CAS
2165     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2166     StoreNode *st = NULL;
2167     LoadStoreNode *cas = NULL;
2168     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2169       x = mem->fast_out(i);
2170       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2171         // two stores or CASes is one too many
2172         if (st != NULL || cas != NULL) {
2173           return NULL;
2174         }
2175         st = x->as_Store();
2176       } else if (is_CAS(x->Opcode())) {
2177         if (st != NULL || cas != NULL) {
2178           return NULL;
2179         }
2180         cas = x->as_LoadStore();
2181       }
2182     }
2183 
2184     // we should not have both a store and a cas
2185     if (st == NULL & cas == NULL) {
2186       return NULL;
2187     }
2188 
2189     if (st == NULL) {
2190       // nothing more to check
2191       return leading;
2192     } else {
2193       // we should not have a store if we started from an acquire
2194       if (is_cas) {
2195         return NULL;
2196       }
2197 
2198       // the store should feed the merge we used to get here
2199       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2200         if (st->fast_out(i) == mm) {
2201           return leading;
2202         }
2203       }
2204     }
2205 
2206     return NULL;
2207   }
2208 
2209   // card_mark_to_trailing
2210   //
2211   // graph traversal helper which detects extra, non-normal Mem feed
2212   // from a card mark volatile membar to a trailing membar i.e. it
2213   // ensures that one of the following three GC post-write Mem flow
2214   // subgraphs is present.
2215   //
2216   // 1)
2217   //     . . .
2218   //       |
2219   //   MemBarVolatile (card mark)
2220   //      |          |
2221   //      |        StoreCM
2222   //      |          |
2223   //      |        . . .
2224   //  Bot |  /
2225   //   MergeMem
2226   //      |
2227   //      |
2228   //    MemBarVolatile {trailing}
2229   //
2230   // 2)
2231   //   MemBarRelease/CPUOrder (leading)
2232   //    |
2233   //    |
2234   //    |\       . . .
2235   //    | \        |
2236   //    |  \  MemBarVolatile (card mark)
2237   //    |   \   |     |
2238   //     \   \  |   StoreCM    . . .
2239   //      \   \ |
2240   //       \  Phi
2241   //        \ /
2242   //        Phi  . . .
2243   //     Bot |   /
2244   //       MergeMem
2245   //         |
2246   //    MemBarVolatile {trailing}
2247   //
2248   //
2249   // 3)
2250   //   MemBarRelease/CPUOrder (leading)
2251   //    |
2252   //    |\
2253   //    | \
2254   //    |  \      . . .
2255   //    |   \       |
2256   //    |\   \  MemBarVolatile (card mark)
2257   //    | \   \   |     |
2258   //    |  \   \  |   StoreCM    . . .
2259   //    |   \   \ |
2260   //     \   \  Phi
2261   //      \   \ /
2262   //       \  Phi
2263   //        \ /
2264   //        Phi  . . .
2265   //     Bot |   /
2266   //       MergeMem
2267   //         |
2268   //         |
2269   //    MemBarVolatile {trailing}
2270   //
2271   // configuration 1 is only valid if UseConcMarkSweepGC &&
2272   // UseCondCardMark
2273   //
2274   // configurations 2 and 3 are only valid if UseG1GC.
2275   //
2276   // if a valid configuration is present returns the trailing membar
2277   // otherwise NULL.
2278   //
2279   // n.b. the supplied membar is expected to be a card mark
2280   // MemBarVolatile i.e. the caller must ensure the input node has the
2281   // correct operand and feeds Mem to a StoreCM node
2282 
2283   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2284   {
2285     // input must be a card mark volatile membar
2286     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2287 
2288     Node *feed = barrier->proj_out(TypeFunc::Memory);
2289     Node *x;
2290     MergeMemNode *mm = NULL;
2291 
2292     const int MAX_PHIS = 3;     // max phis we will search through
2293     int phicount = 0;           // current search count
2294 
2295     bool retry_feed = true;
2296     while (retry_feed) {
2297       // see if we have a direct MergeMem feed
2298       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2299         x = feed->fast_out(i);
2300         // the correct Phi will be merging a Bot memory slice
2301         if (x->is_MergeMem()) {
2302           mm = x->as_MergeMem();
2303           break;
2304         }
2305       }
2306       if (mm) {
2307         retry_feed = false;
2308       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2309         // the barrier may feed indirectly via one or two Phi nodes
2310         PhiNode *phi = NULL;
2311         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2312           x = feed->fast_out(i);
2313           // the correct Phi will be merging a Bot memory slice
2314           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2315             phi = x->as_Phi();
2316             break;
2317           }
2318         }
2319         if (!phi) {
2320           return NULL;
2321         }
2322         // look for another merge below this phi
2323         feed = phi;
2324       } else {
2325         // couldn't find a merge
2326         return NULL;
2327       }
2328     }
2329 
2330     // sanity check this feed turns up as the expected slice
2331     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2332 
2333     MemBarNode *trailing = NULL;
2334     // be sure we have a trailing membar the merge
2335     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2336       x = mm->fast_out(i);
2337       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2338         trailing = x->as_MemBar();
2339         break;
2340       }
2341     }
2342 
2343     return trailing;
2344   }
2345 
2346   // trailing_to_card_mark
2347   //
2348   // graph traversal helper which detects extra, non-normal Mem feed
2349   // from a trailing volatile membar to a preceding card mark volatile
2350   // membar i.e. it identifies whether one of the three possible extra
2351   // GC post-write Mem flow subgraphs is present
2352   //
2353   // this predicate checks for the same flow as the previous predicate
2354   // but starting from the bottom rather than the top.
2355   //
2356   // if the configuration is present returns the card mark membar
2357   // otherwise NULL
2358   //
2359   // n.b. the supplied membar is expected to be a trailing
2360   // MemBarVolatile i.e. the caller must ensure the input node has the
2361   // correct opcode
2362 
2363   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2364   {
2365     assert(trailing->Opcode() == Op_MemBarVolatile,
2366            "expecting a volatile membar");
2367     assert(!is_card_mark_membar(trailing),
2368            "not expecting a card mark membar");
2369 
2370     // the Mem feed to the membar should be a merge
2371     Node *x = trailing->in(TypeFunc::Memory);
2372     if (!x->is_MergeMem()) {
2373       return NULL;
2374     }
2375 
2376     MergeMemNode *mm = x->as_MergeMem();
2377 
2378     x = mm->in(Compile::AliasIdxBot);
2379     // with G1 we may possibly see a Phi or two before we see a Memory
2380     // Proj from the card mark membar
2381 
2382     const int MAX_PHIS = 3;     // max phis we will search through
2383     int phicount = 0;           // current search count
2384 
2385     bool retry_feed = !x->is_Proj();
2386 
2387     while (retry_feed) {
2388       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2389         PhiNode *phi = x->as_Phi();
2390         ProjNode *proj = NULL;
2391         PhiNode *nextphi = NULL;
2392         bool found_leading = false;
2393         for (uint i = 1; i < phi->req(); i++) {
2394           x = phi->in(i);
2395           if (x->is_Phi()) {
2396             nextphi = x->as_Phi();
2397           } else if (x->is_Proj()) {
2398             int opcode = x->in(0)->Opcode();
2399             if (opcode == Op_MemBarVolatile) {
2400               proj = x->as_Proj();
2401             } else if (opcode == Op_MemBarRelease ||
2402                        opcode == Op_MemBarCPUOrder) {
2403               // probably a leading membar
2404               found_leading = true;
2405             }
2406           }
2407         }
2408         // if we found a correct looking proj then retry from there
2409         // otherwise we must see a leading and a phi or this the
2410         // wrong config
2411         if (proj != NULL) {
2412           x = proj;
2413           retry_feed = false;
2414         } else if (found_leading && nextphi != NULL) {
2415           // retry from this phi to check phi2
2416           x = nextphi;
2417         } else {
2418           // not what we were looking for
2419           return NULL;
2420         }
2421       } else {
2422         return NULL;
2423       }
2424     }
2425     // the proj has to come from the card mark membar
2426     x = x->in(0);
2427     if (!x->is_MemBar()) {
2428       return NULL;
2429     }
2430 
2431     MemBarNode *card_mark_membar = x->as_MemBar();
2432 
2433     if (!is_card_mark_membar(card_mark_membar)) {
2434       return NULL;
2435     }
2436 
2437     return card_mark_membar;
2438   }
2439 
2440   // trailing_to_leading
2441   //
2442   // graph traversal helper which checks the Mem flow up the graph
2443   // from a (non-card mark) trailing membar attempting to locate and
2444   // return an associated leading membar. it first looks for a
2445   // subgraph in the normal configuration (relying on helper
2446   // normal_to_leading). failing that it then looks for one of the
2447   // possible post-write card mark subgraphs linking the trailing node
2448   // to a the card mark membar (relying on helper
2449   // trailing_to_card_mark), and then checks that the card mark membar
2450   // is fed by a leading membar (once again relying on auxiliary
2451   // predicate normal_to_leading).
2452   //
2453   // if the configuration is valid returns the cpuorder member for
2454   // preference or when absent the release membar otherwise NULL.
2455   //
2456   // n.b. the input membar is expected to be either a volatile or
2457   // acquire membar but in the former case must *not* be a card mark
2458   // membar.
2459 
2460   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2461   {
2462     assert((trailing->Opcode() == Op_MemBarAcquire ||
2463             trailing->Opcode() == Op_MemBarVolatile),
2464            "expecting an acquire or volatile membar");
2465     assert((trailing->Opcode() != Op_MemBarVolatile ||
2466             !is_card_mark_membar(trailing)),
2467            "not expecting a card mark membar");
2468 
2469     MemBarNode *leading = normal_to_leading(trailing);
2470 
2471     if (leading) {
2472       return leading;
2473     }
2474 
2475     // nothing more to do if this is an acquire
2476     if (trailing->Opcode() == Op_MemBarAcquire) {
2477       return NULL;
2478     }
2479 
2480     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2481 
2482     if (!card_mark_membar) {
2483       return NULL;
2484     }
2485 
2486     return normal_to_leading(card_mark_membar);
2487   }
2488 
2489   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2490 
2491 bool unnecessary_acquire(const Node *barrier)
2492 {
2493   assert(barrier->is_MemBar(), "expecting a membar");
2494 
2495   if (UseBarriersForVolatile) {
2496     // we need to plant a dmb
2497     return false;
2498   }
2499 
2500   // a volatile read derived from bytecode (or also from an inlined
2501   // SHA field read via LibraryCallKit::load_field_from_object)
2502   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2503   // with a bogus read dependency on it's preceding load. so in those
2504   // cases we will find the load node at the PARMS offset of the
2505   // acquire membar.  n.b. there may be an intervening DecodeN node.
2506   //
2507   // a volatile load derived from an inlined unsafe field access
2508   // manifests as a cpuorder membar with Ctl and Mem projections
2509   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2510   // acquire then feeds another cpuorder membar via Ctl and Mem
2511   // projections. The load has no output dependency on these trailing
2512   // membars because subsequent nodes inserted into the graph take
2513   // their control feed from the final membar cpuorder meaning they
2514   // are all ordered after the load.
2515 
2516   Node *x = barrier->lookup(TypeFunc::Parms);
2517   if (x) {
2518     // we are starting from an acquire and it has a fake dependency
2519     //
2520     // need to check for
2521     //
2522     //   LoadX[mo_acquire]
2523     //   {  |1   }
2524     //   {DecodeN}
2525     //      |Parms
2526     //   MemBarAcquire*
2527     //
2528     // where * tags node we were passed
2529     // and |k means input k
2530     if (x->is_DecodeNarrowPtr()) {
2531       x = x->in(1);
2532     }
2533 
2534     return (x->is_Load() && x->as_Load()->is_acquire());
2535   }
2536 
2537   // now check for an unsafe volatile get
2538 
2539   // need to check for
2540   //
2541   //   MemBarCPUOrder
2542   //        ||       \\
2543   //   MemBarAcquire* LoadX[mo_acquire]
2544   //        ||
2545   //   MemBarCPUOrder
2546   //
2547   // where * tags node we were passed
2548   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2549 
2550   // check for a parent MemBarCPUOrder
2551   ProjNode *ctl;
2552   ProjNode *mem;
2553   MemBarNode *parent = parent_membar(barrier);
2554   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2555     return false;
2556   ctl = parent->proj_out(TypeFunc::Control);
2557   mem = parent->proj_out(TypeFunc::Memory);
2558   if (!ctl || !mem) {
2559     return false;
2560   }
2561   // ensure the proj nodes both feed a LoadX[mo_acquire]
2562   LoadNode *ld = NULL;
2563   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2564     x = ctl->fast_out(i);
2565     // if we see a load we keep hold of it and stop searching
2566     if (x->is_Load()) {
2567       ld = x->as_Load();
2568       break;
2569     }
2570   }
2571   // it must be an acquiring load
2572   if (ld && ld->is_acquire()) {
2573 
2574     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2575       x = mem->fast_out(i);
2576       // if we see the same load we drop it and stop searching
2577       if (x == ld) {
2578         ld = NULL;
2579         break;
2580       }
2581     }
2582     // we must have dropped the load
2583     if (ld == NULL) {
2584       // check for a child cpuorder membar
2585       MemBarNode *child  = child_membar(barrier->as_MemBar());
2586       if (child && child->Opcode() == Op_MemBarCPUOrder)
2587         return true;
2588     }
2589   }
2590 
2591   // final option for unnecessary mebar is that it is a trailing node
2592   // belonging to a CAS
2593 
2594   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2595 
2596   return leading != NULL;
2597 }
2598 
2599 bool needs_acquiring_load(const Node *n)
2600 {
2601   assert(n->is_Load(), "expecting a load");
2602   if (UseBarriersForVolatile) {
2603     // we use a normal load and a dmb
2604     return false;
2605   }
2606 
2607   LoadNode *ld = n->as_Load();
2608 
2609   if (!ld->is_acquire()) {
2610     return false;
2611   }
2612 
2613   // check if this load is feeding an acquire membar
2614   //
2615   //   LoadX[mo_acquire]
2616   //   {  |1   }
2617   //   {DecodeN}
2618   //      |Parms
2619   //   MemBarAcquire*
2620   //
2621   // where * tags node we were passed
2622   // and |k means input k
2623 
2624   Node *start = ld;
2625   Node *mbacq = NULL;
2626 
2627   // if we hit a DecodeNarrowPtr we reset the start node and restart
2628   // the search through the outputs
2629  restart:
2630 
2631   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2632     Node *x = start->fast_out(i);
2633     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2634       mbacq = x;
2635     } else if (!mbacq &&
2636                (x->is_DecodeNarrowPtr() ||
2637                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2638       start = x;
2639       goto restart;
2640     }
2641   }
2642 
2643   if (mbacq) {
2644     return true;
2645   }
2646 
2647   // now check for an unsafe volatile get
2648 
2649   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2650   //
2651   //     MemBarCPUOrder
2652   //        ||       \\
2653   //   MemBarAcquire* LoadX[mo_acquire]
2654   //        ||
2655   //   MemBarCPUOrder
2656 
2657   MemBarNode *membar;
2658 
2659   membar = parent_membar(ld);
2660 
2661   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2662     return false;
2663   }
2664 
2665   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2666 
2667   membar = child_membar(membar);
2668 
2669   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2670     return false;
2671   }
2672 
2673   membar = child_membar(membar);
2674 
2675   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2676     return false;
2677   }
2678 
2679   return true;
2680 }
2681 
2682 bool unnecessary_release(const Node *n)
2683 {
2684   assert((n->is_MemBar() &&
2685           n->Opcode() == Op_MemBarRelease),
2686          "expecting a release membar");
2687 
2688   if (UseBarriersForVolatile) {
2689     // we need to plant a dmb
2690     return false;
2691   }
2692 
2693   // if there is a dependent CPUOrder barrier then use that as the
2694   // leading
2695 
2696   MemBarNode *barrier = n->as_MemBar();
2697   // check for an intervening cpuorder membar
2698   MemBarNode *b = child_membar(barrier);
2699   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2700     // ok, so start the check from the dependent cpuorder barrier
2701     barrier = b;
2702   }
2703 
2704   // must start with a normal feed
2705   MemBarNode *child_barrier = leading_to_normal(barrier);
2706 
2707   if (!child_barrier) {
2708     return false;
2709   }
2710 
2711   if (!is_card_mark_membar(child_barrier)) {
2712     // this is the trailing membar and we are done
2713     return true;
2714   }
2715 
2716   // must be sure this card mark feeds a trailing membar
2717   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2718   return (trailing != NULL);
2719 }
2720 
2721 bool unnecessary_volatile(const Node *n)
2722 {
2723   // assert n->is_MemBar();
2724   if (UseBarriersForVolatile) {
2725     // we need to plant a dmb
2726     return false;
2727   }
2728 
2729   MemBarNode *mbvol = n->as_MemBar();
2730 
2731   // first we check if this is part of a card mark. if so then we have
2732   // to generate a StoreLoad barrier
2733 
2734   if (is_card_mark_membar(mbvol)) {
2735       return false;
2736   }
2737 
2738   // ok, if it's not a card mark then we still need to check if it is
2739   // a trailing membar of a volatile put hgraph.
2740 
2741   return (trailing_to_leading(mbvol) != NULL);
2742 }
2743 
2744 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2745 
2746 bool needs_releasing_store(const Node *n)
2747 {
2748   // assert n->is_Store();
2749   if (UseBarriersForVolatile) {
2750     // we use a normal store and dmb combination
2751     return false;
2752   }
2753 
2754   StoreNode *st = n->as_Store();
2755 
2756   // the store must be marked as releasing
2757   if (!st->is_release()) {
2758     return false;
2759   }
2760 
2761   // the store must be fed by a membar
2762 
2763   Node *x = st->lookup(StoreNode::Memory);
2764 
2765   if (! x || !x->is_Proj()) {
2766     return false;
2767   }
2768 
2769   ProjNode *proj = x->as_Proj();
2770 
2771   x = proj->lookup(0);
2772 
2773   if (!x || !x->is_MemBar()) {
2774     return false;
2775   }
2776 
2777   MemBarNode *barrier = x->as_MemBar();
2778 
2779   // if the barrier is a release membar or a cpuorder mmebar fed by a
2780   // release membar then we need to check whether that forms part of a
2781   // volatile put graph.
2782 
2783   // reject invalid candidates
2784   if (!leading_membar(barrier)) {
2785     return false;
2786   }
2787 
2788   // does this lead a normal subgraph?
2789   MemBarNode *mbvol = leading_to_normal(barrier);
2790 
2791   if (!mbvol) {
2792     return false;
2793   }
2794 
2795   // all done unless this is a card mark
2796   if (!is_card_mark_membar(mbvol)) {
2797     return true;
2798   }
2799 
2800   // we found a card mark -- just make sure we have a trailing barrier
2801 
2802   return (card_mark_to_trailing(mbvol) != NULL);
2803 }
2804 
2805 // predicate controlling translation of CAS
2806 //
2807 // returns true if CAS needs to use an acquiring load otherwise false
2808 
2809 bool needs_acquiring_load_exclusive(const Node *n)
2810 {
2811   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2812   if (UseBarriersForVolatile) {
2813     return false;
2814   }
2815 
2816   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2817 #ifdef ASSERT
2818   LoadStoreNode *st = n->as_LoadStore();
2819 
2820   // the store must be fed by a membar
2821 
2822   Node *x = st->lookup(StoreNode::Memory);
2823 
2824   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2825 
2826   ProjNode *proj = x->as_Proj();
2827 
2828   x = proj->lookup(0);
2829 
2830   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2831 
2832   MemBarNode *barrier = x->as_MemBar();
2833 
2834   // the barrier must be a cpuorder mmebar fed by a release membar
2835 
2836   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2837          "CAS not fed by cpuorder membar!");
2838 
2839   MemBarNode *b = parent_membar(barrier);
2840   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2841           "CAS not fed by cpuorder+release membar pair!");
2842 
2843   // does this lead a normal subgraph?
2844   MemBarNode *mbar = leading_to_normal(barrier);
2845 
2846   assert(mbar != NULL, "CAS not embedded in normal graph!");
2847 
2848   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2849 #endif // ASSERT
2850   // so we can just return true here
2851   return true;
2852 }
2853 
2854 // predicate controlling translation of StoreCM
2855 //
2856 // returns true if a StoreStore must precede the card write otherwise
2857 // false
2858 
2859 bool unnecessary_storestore(const Node *storecm)
2860 {
2861   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2862 
2863   // we only ever need to generate a dmb ishst between an object put
2864   // and the associated card mark when we are using CMS without
2865   // conditional card marking
2866 
2867   if (!UseConcMarkSweepGC || UseCondCardMark) {
2868     return true;
2869   }
2870 
2871   // if we are implementing volatile puts using barriers then the
2872   // object put as an str so we must insert the dmb ishst
2873 
2874   if (UseBarriersForVolatile) {
2875     return false;
2876   }
2877 
2878   // we can omit the dmb ishst if this StoreCM is part of a volatile
2879   // put because in thta case the put will be implemented by stlr
2880   //
2881   // we need to check for a normal subgraph feeding this StoreCM.
2882   // that means the StoreCM must be fed Memory from a leading membar,
2883   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2884   // leading membar must be part of a normal subgraph
2885 
2886   Node *x = storecm->in(StoreNode::Memory);
2887 
2888   if (!x->is_Proj()) {
2889     return false;
2890   }
2891 
2892   x = x->in(0);
2893 
2894   if (!x->is_MemBar()) {
2895     return false;
2896   }
2897 
2898   MemBarNode *leading = x->as_MemBar();
2899 
2900   // reject invalid candidates
2901   if (!leading_membar(leading)) {
2902     return false;
2903   }
2904 
2905   // we can omit the StoreStore if it is the head of a normal subgraph
2906   return (leading_to_normal(leading) != NULL);
2907 }
2908 
2909 
2910 #define __ _masm.
2911 
2912 // advance declarations for helper functions to convert register
2913 // indices to register objects
2914 
2915 // the ad file has to provide implementations of certain methods
2916 // expected by the generic code
2917 //
2918 // REQUIRED FUNCTIONALITY
2919 
2920 //=============================================================================
2921 
2922 // !!!!! Special hack to get all types of calls to specify the byte offset
2923 //       from the start of the call to the point where the return address
2924 //       will point.
2925 
2926 int MachCallStaticJavaNode::ret_addr_offset()
2927 {
2928   // call should be a simple bl
2929   int off = 4;
2930   return off;
2931 }
2932 
2933 int MachCallDynamicJavaNode::ret_addr_offset()
2934 {
2935   return 16; // movz, movk, movk, bl
2936 }
2937 
2938 int MachCallRuntimeNode::ret_addr_offset() {
2939   // for generated stubs the call will be
2940   //   far_call(addr)
2941   // for real runtime callouts it will be six instructions
2942   // see aarch64_enc_java_to_runtime
2943   //   adr(rscratch2, retaddr)
2944   //   lea(rscratch1, RuntimeAddress(addr)
2945   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2946   //   blrt rscratch1
2947   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2948   if (cb) {
2949     return MacroAssembler::far_branch_size();
2950   } else {
2951     return 6 * NativeInstruction::instruction_size;
2952   }
2953 }
2954 
2955 // Indicate if the safepoint node needs the polling page as an input
2956 
2957 // the shared code plants the oop data at the start of the generated
2958 // code for the safepoint node and that needs ot be at the load
2959 // instruction itself. so we cannot plant a mov of the safepoint poll
2960 // address followed by a load. setting this to true means the mov is
2961 // scheduled as a prior instruction. that's better for scheduling
2962 // anyway.
2963 
2964 bool SafePointNode::needs_polling_address_input()
2965 {
2966   return true;
2967 }
2968 
2969 //=============================================================================
2970 
2971 #ifndef PRODUCT
2972 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2973   st->print("BREAKPOINT");
2974 }
2975 #endif
2976 
2977 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2978   MacroAssembler _masm(&cbuf);
2979   __ brk(0);
2980 }
2981 
2982 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2983   return MachNode::size(ra_);
2984 }
2985 
2986 //=============================================================================
2987 
2988 #ifndef PRODUCT
2989   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2990     st->print("nop \t# %d bytes pad for loops and calls", _count);
2991   }
2992 #endif
2993 
2994   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2995     MacroAssembler _masm(&cbuf);
2996     for (int i = 0; i < _count; i++) {
2997       __ nop();
2998     }
2999   }
3000 
3001   uint MachNopNode::size(PhaseRegAlloc*) const {
3002     return _count * NativeInstruction::instruction_size;
3003   }
3004 
3005 //=============================================================================
3006 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
3007 
3008 int Compile::ConstantTable::calculate_table_base_offset() const {
3009   return 0;  // absolute addressing, no offset
3010 }
3011 
3012 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
3013 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
3014   ShouldNotReachHere();
3015 }
3016 
3017 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
3018   // Empty encoding
3019 }
3020 
3021 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
3022   return 0;
3023 }
3024 
3025 #ifndef PRODUCT
3026 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3027   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3028 }
3029 #endif
3030 
3031 #ifndef PRODUCT
3032 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3033   Compile* C = ra_->C;
3034 
3035   int framesize = C->frame_slots() << LogBytesPerInt;
3036 
3037   if (C->need_stack_bang(framesize))
3038     st->print("# stack bang size=%d\n\t", framesize);
3039 
3040   if (framesize < ((1 << 9) + 2 * wordSize)) {
3041     st->print("sub  sp, sp, #%d\n\t", framesize);
3042     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3043     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3044   } else {
3045     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3046     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3047     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3048     st->print("sub  sp, sp, rscratch1");
3049   }
3050 }
3051 #endif
3052 
3053 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3054   Compile* C = ra_->C;
3055   MacroAssembler _masm(&cbuf);
3056 
3057   // n.b. frame size includes space for return pc and rfp
3058   const long framesize = C->frame_size_in_bytes();
3059   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3060 
3061   // insert a nop at the start of the prolog so we can patch in a
3062   // branch if we need to invalidate the method later
3063   __ nop();
3064 
3065   int bangsize = C->bang_size_in_bytes();
3066   if (C->need_stack_bang(bangsize) && UseStackBanging)
3067     __ generate_stack_overflow_check(bangsize);
3068 
3069   __ build_frame(framesize);
3070 
3071   if (NotifySimulator) {
3072     __ notify(Assembler::method_entry);
3073   }
3074 
3075   if (VerifyStackAtCalls) {
3076     Unimplemented();
3077   }
3078 
3079   C->set_frame_complete(cbuf.insts_size());
3080 
3081   if (C->has_mach_constant_base_node()) {
3082     // NOTE: We set the table base offset here because users might be
3083     // emitted before MachConstantBaseNode.
3084     Compile::ConstantTable& constant_table = C->constant_table();
3085     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3086   }
3087 }
3088 
3089 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3090 {
3091   return MachNode::size(ra_); // too many variables; just compute it
3092                               // the hard way
3093 }
3094 
3095 int MachPrologNode::reloc() const
3096 {
3097   return 0;
3098 }
3099 
3100 //=============================================================================
3101 
3102 #ifndef PRODUCT
3103 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3104   Compile* C = ra_->C;
3105   int framesize = C->frame_slots() << LogBytesPerInt;
3106 
3107   st->print("# pop frame %d\n\t",framesize);
3108 
3109   if (framesize == 0) {
3110     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3111   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3112     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3113     st->print("add  sp, sp, #%d\n\t", framesize);
3114   } else {
3115     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3116     st->print("add  sp, sp, rscratch1\n\t");
3117     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3118   }
3119 
3120   if (do_polling() && C->is_method_compilation()) {
3121     st->print("# touch polling page\n\t");
3122     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3123     st->print("ldr zr, [rscratch1]");
3124   }
3125 }
3126 #endif
3127 
3128 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3129   Compile* C = ra_->C;
3130   MacroAssembler _masm(&cbuf);
3131   int framesize = C->frame_slots() << LogBytesPerInt;
3132 
3133   __ remove_frame(framesize);
3134 
3135   if (NotifySimulator) {
3136     __ notify(Assembler::method_reentry);
3137   }
3138 
3139   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3140     __ reserved_stack_check();
3141   }
3142 
3143   if (do_polling() && C->is_method_compilation()) {
3144     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3145   }
3146 }
3147 
3148 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3149   // Variable size. Determine dynamically.
3150   return MachNode::size(ra_);
3151 }
3152 
3153 int MachEpilogNode::reloc() const {
3154   // Return number of relocatable values contained in this instruction.
3155   return 1; // 1 for polling page.
3156 }
3157 
3158 const Pipeline * MachEpilogNode::pipeline() const {
3159   return MachNode::pipeline_class();
3160 }
3161 
3162 // This method seems to be obsolete. It is declared in machnode.hpp
3163 // and defined in all *.ad files, but it is never called. Should we
3164 // get rid of it?
3165 int MachEpilogNode::safepoint_offset() const {
3166   assert(do_polling(), "no return for this epilog node");
3167   return 4;
3168 }
3169 
3170 //=============================================================================
3171 
3172 // Figure out which register class each belongs in: rc_int, rc_float or
3173 // rc_stack.
3174 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3175 
3176 static enum RC rc_class(OptoReg::Name reg) {
3177 
3178   if (reg == OptoReg::Bad) {
3179     return rc_bad;
3180   }
3181 
3182   // we have 30 int registers * 2 halves
3183   // (rscratch1 and rscratch2 are omitted)
3184 
3185   if (reg < 60) {
3186     return rc_int;
3187   }
3188 
3189   // we have 32 float register * 2 halves
3190   if (reg < 60 + 128) {
3191     return rc_float;
3192   }
3193 
3194   // Between float regs & stack is the flags regs.
3195   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3196 
3197   return rc_stack;
3198 }
3199 
3200 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3201   Compile* C = ra_->C;
3202 
3203   // Get registers to move.
3204   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3205   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3206   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3207   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3208 
3209   enum RC src_hi_rc = rc_class(src_hi);
3210   enum RC src_lo_rc = rc_class(src_lo);
3211   enum RC dst_hi_rc = rc_class(dst_hi);
3212   enum RC dst_lo_rc = rc_class(dst_lo);
3213 
3214   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3215 
3216   if (src_hi != OptoReg::Bad) {
3217     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3218            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3219            "expected aligned-adjacent pairs");
3220   }
3221 
3222   if (src_lo == dst_lo && src_hi == dst_hi) {
3223     return 0;            // Self copy, no move.
3224   }
3225 
3226   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3227               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3228   int src_offset = ra_->reg2offset(src_lo);
3229   int dst_offset = ra_->reg2offset(dst_lo);
3230 
3231   if (bottom_type()->isa_vect() != NULL) {
3232     uint ireg = ideal_reg();
3233     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3234     if (cbuf) {
3235       MacroAssembler _masm(cbuf);
3236       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3237       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3238         // stack->stack
3239         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3240         if (ireg == Op_VecD) {
3241           __ unspill(rscratch1, true, src_offset);
3242           __ spill(rscratch1, true, dst_offset);
3243         } else {
3244           __ spill_copy128(src_offset, dst_offset);
3245         }
3246       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3247         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3248                ireg == Op_VecD ? __ T8B : __ T16B,
3249                as_FloatRegister(Matcher::_regEncode[src_lo]));
3250       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3251         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3252                        ireg == Op_VecD ? __ D : __ Q,
3253                        ra_->reg2offset(dst_lo));
3254       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3255         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3256                        ireg == Op_VecD ? __ D : __ Q,
3257                        ra_->reg2offset(src_lo));
3258       } else {
3259         ShouldNotReachHere();
3260       }
3261     }
3262   } else if (cbuf) {
3263     MacroAssembler _masm(cbuf);
3264     switch (src_lo_rc) {
3265     case rc_int:
3266       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3267         if (is64) {
3268             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3269                    as_Register(Matcher::_regEncode[src_lo]));
3270         } else {
3271             MacroAssembler _masm(cbuf);
3272             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3273                     as_Register(Matcher::_regEncode[src_lo]));
3274         }
3275       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3276         if (is64) {
3277             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3278                      as_Register(Matcher::_regEncode[src_lo]));
3279         } else {
3280             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3281                      as_Register(Matcher::_regEncode[src_lo]));
3282         }
3283       } else {                    // gpr --> stack spill
3284         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3285         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3286       }
3287       break;
3288     case rc_float:
3289       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3290         if (is64) {
3291             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3292                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3293         } else {
3294             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3295                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3296         }
3297       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3298           if (cbuf) {
3299             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3300                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3301         } else {
3302             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3303                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3304         }
3305       } else {                    // fpr --> stack spill
3306         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3307         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3308                  is64 ? __ D : __ S, dst_offset);
3309       }
3310       break;
3311     case rc_stack:
3312       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3313         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3314       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3315         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3316                    is64 ? __ D : __ S, src_offset);
3317       } else {                    // stack --> stack copy
3318         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3319         __ unspill(rscratch1, is64, src_offset);
3320         __ spill(rscratch1, is64, dst_offset);
3321       }
3322       break;
3323     default:
3324       assert(false, "bad rc_class for spill");
3325       ShouldNotReachHere();
3326     }
3327   }
3328 
3329   if (st) {
3330     st->print("spill ");
3331     if (src_lo_rc == rc_stack) {
3332       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3333     } else {
3334       st->print("%s -> ", Matcher::regName[src_lo]);
3335     }
3336     if (dst_lo_rc == rc_stack) {
3337       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3338     } else {
3339       st->print("%s", Matcher::regName[dst_lo]);
3340     }
3341     if (bottom_type()->isa_vect() != NULL) {
3342       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3343     } else {
3344       st->print("\t# spill size = %d", is64 ? 64:32);
3345     }
3346   }
3347 
3348   return 0;
3349 
3350 }
3351 
3352 #ifndef PRODUCT
3353 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3354   if (!ra_)
3355     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3356   else
3357     implementation(NULL, ra_, false, st);
3358 }
3359 #endif
3360 
3361 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3362   implementation(&cbuf, ra_, false, NULL);
3363 }
3364 
3365 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3366   return MachNode::size(ra_);
3367 }
3368 
3369 //=============================================================================
3370 
3371 #ifndef PRODUCT
3372 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3373   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3374   int reg = ra_->get_reg_first(this);
3375   st->print("add %s, rsp, #%d]\t# box lock",
3376             Matcher::regName[reg], offset);
3377 }
3378 #endif
3379 
3380 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3381   MacroAssembler _masm(&cbuf);
3382 
3383   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3384   int reg    = ra_->get_encode(this);
3385 
3386   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3387     __ add(as_Register(reg), sp, offset);
3388   } else {
3389     ShouldNotReachHere();
3390   }
3391 }
3392 
3393 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3394   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3395   return 4;
3396 }
3397 
3398 //=============================================================================
3399 
3400 #ifndef PRODUCT
3401 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3402 {
3403   st->print_cr("# MachUEPNode");
3404   if (UseCompressedClassPointers) {
3405     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3406     if (Universe::narrow_klass_shift() != 0) {
3407       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3408     }
3409   } else {
3410    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3411   }
3412   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3413   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3414 }
3415 #endif
3416 
3417 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3418 {
3419   // This is the unverified entry point.
3420   MacroAssembler _masm(&cbuf);
3421 
3422   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3423   Label skip;
3424   // TODO
3425   // can we avoid this skip and still use a reloc?
3426   __ br(Assembler::EQ, skip);
3427   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3428   __ bind(skip);
3429 }
3430 
3431 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3432 {
3433   return MachNode::size(ra_);
3434 }
3435 
3436 // REQUIRED EMIT CODE
3437 
3438 //=============================================================================
3439 
3440 // Emit exception handler code.
3441 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3442 {
3443   // mov rscratch1 #exception_blob_entry_point
3444   // br rscratch1
3445   // Note that the code buffer's insts_mark is always relative to insts.
3446   // That's why we must use the macroassembler to generate a handler.
3447   MacroAssembler _masm(&cbuf);
3448   address base = __ start_a_stub(size_exception_handler());
3449   if (base == NULL) {
3450     ciEnv::current()->record_failure("CodeCache is full");
3451     return 0;  // CodeBuffer::expand failed
3452   }
3453   int offset = __ offset();
3454   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3455   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3456   __ end_a_stub();
3457   return offset;
3458 }
3459 
3460 // Emit deopt handler code.
3461 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3462 {
3463   // Note that the code buffer's insts_mark is always relative to insts.
3464   // That's why we must use the macroassembler to generate a handler.
3465   MacroAssembler _masm(&cbuf);
3466   address base = __ start_a_stub(size_deopt_handler());
3467   if (base == NULL) {
3468     ciEnv::current()->record_failure("CodeCache is full");
3469     return 0;  // CodeBuffer::expand failed
3470   }
3471   int offset = __ offset();
3472 
3473   __ adr(lr, __ pc());
3474   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3475 
3476   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3477   __ end_a_stub();
3478   return offset;
3479 }
3480 
3481 // REQUIRED MATCHER CODE
3482 
3483 //=============================================================================
3484 
3485 const bool Matcher::match_rule_supported(int opcode) {
3486 
3487   switch (opcode) {
3488   default:
3489     break;
3490   }
3491 
3492   if (!has_match_rule(opcode)) {
3493     return false;
3494   }
3495 
3496   return true;  // Per default match rules are supported.
3497 }
3498 
3499 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3500 
3501   // TODO
3502   // identify extra cases that we might want to provide match rules for
3503   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3504   bool ret_value = match_rule_supported(opcode);
3505   // Add rules here.
3506 
3507   return ret_value;  // Per default match rules are supported.
3508 }
3509 
3510 const bool Matcher::has_predicated_vectors(void) {
3511   return false;
3512 }
3513 
3514 const int Matcher::float_pressure(int default_pressure_threshold) {
3515   return default_pressure_threshold;
3516 }
3517 
3518 int Matcher::regnum_to_fpu_offset(int regnum)
3519 {
3520   Unimplemented();
3521   return 0;
3522 }
3523 
3524 // Is this branch offset short enough that a short branch can be used?
3525 //
3526 // NOTE: If the platform does not provide any short branch variants, then
3527 //       this method should return false for offset 0.
3528 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3529   // The passed offset is relative to address of the branch.
3530 
3531   return (-32768 <= offset && offset < 32768);
3532 }
3533 
3534 const bool Matcher::isSimpleConstant64(jlong value) {
3535   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3536   // Probably always true, even if a temp register is required.
3537   return true;
3538 }
3539 
3540 // true just means we have fast l2f conversion
3541 const bool Matcher::convL2FSupported(void) {
3542   return true;
3543 }
3544 
3545 // Vector width in bytes.
3546 const int Matcher::vector_width_in_bytes(BasicType bt) {
3547   int size = MIN2(16,(int)MaxVectorSize);
3548   // Minimum 2 values in vector
3549   if (size < 2*type2aelembytes(bt)) size = 0;
3550   // But never < 4
3551   if (size < 4) size = 0;
3552   return size;
3553 }
3554 
3555 // Limits on vector size (number of elements) loaded into vector.
3556 const int Matcher::max_vector_size(const BasicType bt) {
3557   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3558 }
3559 const int Matcher::min_vector_size(const BasicType bt) {
3560 //  For the moment limit the vector size to 8 bytes
3561     int size = 8 / type2aelembytes(bt);
3562     if (size < 2) size = 2;
3563     return size;
3564 }
3565 
3566 // Vector ideal reg.
3567 const uint Matcher::vector_ideal_reg(int len) {
3568   switch(len) {
3569     case  8: return Op_VecD;
3570     case 16: return Op_VecX;
3571   }
3572   ShouldNotReachHere();
3573   return 0;
3574 }
3575 
3576 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3577   return Op_VecX;
3578 }
3579 
3580 // AES support not yet implemented
3581 const bool Matcher::pass_original_key_for_aes() {
3582   return false;
3583 }
3584 
3585 // x86 supports misaligned vectors store/load.
3586 const bool Matcher::misaligned_vectors_ok() {
3587   return !AlignVector; // can be changed by flag
3588 }
3589 
3590 // false => size gets scaled to BytesPerLong, ok.
3591 const bool Matcher::init_array_count_is_in_bytes = false;
3592 
3593 // Use conditional move (CMOVL)
3594 const int Matcher::long_cmove_cost() {
3595   // long cmoves are no more expensive than int cmoves
3596   return 0;
3597 }
3598 
3599 const int Matcher::float_cmove_cost() {
3600   // float cmoves are no more expensive than int cmoves
3601   return 0;
3602 }
3603 
3604 // Does the CPU require late expand (see block.cpp for description of late expand)?
3605 const bool Matcher::require_postalloc_expand = false;
3606 
3607 // Do we need to mask the count passed to shift instructions or does
3608 // the cpu only look at the lower 5/6 bits anyway?
3609 const bool Matcher::need_masked_shift_count = false;
3610 
3611 // This affects two different things:
3612 //  - how Decode nodes are matched
3613 //  - how ImplicitNullCheck opportunities are recognized
3614 // If true, the matcher will try to remove all Decodes and match them
3615 // (as operands) into nodes. NullChecks are not prepared to deal with
3616 // Decodes by final_graph_reshaping().
3617 // If false, final_graph_reshaping() forces the decode behind the Cmp
3618 // for a NullCheck. The matcher matches the Decode node into a register.
3619 // Implicit_null_check optimization moves the Decode along with the
3620 // memory operation back up before the NullCheck.
3621 bool Matcher::narrow_oop_use_complex_address() {
3622   return Universe::narrow_oop_shift() == 0;
3623 }
3624 
3625 bool Matcher::narrow_klass_use_complex_address() {
3626 // TODO
3627 // decide whether we need to set this to true
3628   return false;
3629 }
3630 
3631 bool Matcher::const_oop_prefer_decode() {
3632   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3633   return Universe::narrow_oop_base() == NULL;
3634 }
3635 
3636 bool Matcher::const_klass_prefer_decode() {
3637   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3638   return Universe::narrow_klass_base() == NULL;
3639 }
3640 
3641 // Is it better to copy float constants, or load them directly from
3642 // memory?  Intel can load a float constant from a direct address,
3643 // requiring no extra registers.  Most RISCs will have to materialize
3644 // an address into a register first, so they would do better to copy
3645 // the constant from stack.
3646 const bool Matcher::rematerialize_float_constants = false;
3647 
3648 // If CPU can load and store mis-aligned doubles directly then no
3649 // fixup is needed.  Else we split the double into 2 integer pieces
3650 // and move it piece-by-piece.  Only happens when passing doubles into
3651 // C code as the Java calling convention forces doubles to be aligned.
3652 const bool Matcher::misaligned_doubles_ok = true;
3653 
3654 // No-op on amd64
3655 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3656   Unimplemented();
3657 }
3658 
3659 // Advertise here if the CPU requires explicit rounding operations to
3660 // implement the UseStrictFP mode.
3661 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3662 
3663 // Are floats converted to double when stored to stack during
3664 // deoptimization?
3665 bool Matcher::float_in_double() { return true; }
3666 
3667 // Do ints take an entire long register or just half?
3668 // The relevant question is how the int is callee-saved:
3669 // the whole long is written but de-opt'ing will have to extract
3670 // the relevant 32 bits.
3671 const bool Matcher::int_in_long = true;
3672 
3673 // Return whether or not this register is ever used as an argument.
3674 // This function is used on startup to build the trampoline stubs in
3675 // generateOptoStub.  Registers not mentioned will be killed by the VM
3676 // call in the trampoline, and arguments in those registers not be
3677 // available to the callee.
3678 bool Matcher::can_be_java_arg(int reg)
3679 {
3680   return
3681     reg ==  R0_num || reg == R0_H_num ||
3682     reg ==  R1_num || reg == R1_H_num ||
3683     reg ==  R2_num || reg == R2_H_num ||
3684     reg ==  R3_num || reg == R3_H_num ||
3685     reg ==  R4_num || reg == R4_H_num ||
3686     reg ==  R5_num || reg == R5_H_num ||
3687     reg ==  R6_num || reg == R6_H_num ||
3688     reg ==  R7_num || reg == R7_H_num ||
3689     reg ==  V0_num || reg == V0_H_num ||
3690     reg ==  V1_num || reg == V1_H_num ||
3691     reg ==  V2_num || reg == V2_H_num ||
3692     reg ==  V3_num || reg == V3_H_num ||
3693     reg ==  V4_num || reg == V4_H_num ||
3694     reg ==  V5_num || reg == V5_H_num ||
3695     reg ==  V6_num || reg == V6_H_num ||
3696     reg ==  V7_num || reg == V7_H_num;
3697 }
3698 
3699 bool Matcher::is_spillable_arg(int reg)
3700 {
3701   return can_be_java_arg(reg);
3702 }
3703 
3704 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3705   return false;
3706 }
3707 
3708 RegMask Matcher::divI_proj_mask() {
3709   ShouldNotReachHere();
3710   return RegMask();
3711 }
3712 
3713 // Register for MODI projection of divmodI.
3714 RegMask Matcher::modI_proj_mask() {
3715   ShouldNotReachHere();
3716   return RegMask();
3717 }
3718 
3719 // Register for DIVL projection of divmodL.
3720 RegMask Matcher::divL_proj_mask() {
3721   ShouldNotReachHere();
3722   return RegMask();
3723 }
3724 
3725 // Register for MODL projection of divmodL.
3726 RegMask Matcher::modL_proj_mask() {
3727   ShouldNotReachHere();
3728   return RegMask();
3729 }
3730 
3731 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3732   return FP_REG_mask();
3733 }
3734 
3735 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3736   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3737     Node* u = addp->fast_out(i);
3738     if (u->is_Mem()) {
3739       int opsize = u->as_Mem()->memory_size();
3740       assert(opsize > 0, "unexpected memory operand size");
3741       if (u->as_Mem()->memory_size() != (1<<shift)) {
3742         return false;
3743       }
3744     }
3745   }
3746   return true;
3747 }
3748 
3749 const bool Matcher::convi2l_type_required = false;
3750 
3751 // Should the Matcher clone shifts on addressing modes, expecting them
3752 // to be subsumed into complex addressing expressions or compute them
3753 // into registers?
3754 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3755   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3756     return true;
3757   }
3758 
3759   Node *off = m->in(AddPNode::Offset);
3760   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3761       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3762       // Are there other uses besides address expressions?
3763       !is_visited(off)) {
3764     address_visited.set(off->_idx); // Flag as address_visited
3765     mstack.push(off->in(2), Visit);
3766     Node *conv = off->in(1);
3767     if (conv->Opcode() == Op_ConvI2L &&
3768         // Are there other uses besides address expressions?
3769         !is_visited(conv)) {
3770       address_visited.set(conv->_idx); // Flag as address_visited
3771       mstack.push(conv->in(1), Pre_Visit);
3772     } else {
3773       mstack.push(conv, Pre_Visit);
3774     }
3775     address_visited.test_set(m->_idx); // Flag as address_visited
3776     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3777     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3778     return true;
3779   } else if (off->Opcode() == Op_ConvI2L &&
3780              // Are there other uses besides address expressions?
3781              !is_visited(off)) {
3782     address_visited.test_set(m->_idx); // Flag as address_visited
3783     address_visited.set(off->_idx); // Flag as address_visited
3784     mstack.push(off->in(1), Pre_Visit);
3785     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3786     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3787     return true;
3788   }
3789   return false;
3790 }
3791 
3792 // Transform:
3793 // (AddP base (AddP base address (LShiftL index con)) offset)
3794 // into:
3795 // (AddP base (AddP base offset) (LShiftL index con))
3796 // to take full advantage of ARM's addressing modes
3797 void Compile::reshape_address(AddPNode* addp) {
3798   Node *addr = addp->in(AddPNode::Address);
3799   if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
3800     const AddPNode *addp2 = addr->as_AddP();
3801     if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
3802          addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
3803          size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
3804         addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
3805 
3806       // Any use that can't embed the address computation?
3807       for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3808         Node* u = addp->fast_out(i);
3809         if (!u->is_Mem() || u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
3810           return;
3811         }
3812       }
3813       
3814       Node* off = addp->in(AddPNode::Offset);
3815       Node* addr2 = addp2->in(AddPNode::Address);
3816       Node* base = addp->in(AddPNode::Base);
3817       
3818       Node* new_addr = NULL;
3819       // Check whether the graph already has the new AddP we need
3820       // before we create one (no GVN available here).
3821       for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
3822         Node* u = addr2->fast_out(i);
3823         if (u->is_AddP() &&
3824             u->in(AddPNode::Base) == base &&
3825             u->in(AddPNode::Address) == addr2 &&
3826             u->in(AddPNode::Offset) == off) {
3827           new_addr = u;
3828           break;
3829         }
3830       }
3831       
3832       if (new_addr == NULL) {
3833         new_addr = new AddPNode(base, addr2, off);
3834       }
3835       Node* new_off = addp2->in(AddPNode::Offset);
3836       addp->set_req(AddPNode::Address, new_addr);
3837       if (addr->outcnt() == 0) {
3838         addr->disconnect_inputs(NULL, this);
3839       }
3840       addp->set_req(AddPNode::Offset, new_off);
3841       if (off->outcnt() == 0) {
3842         off->disconnect_inputs(NULL, this);
3843       }
3844     }
3845   }
3846 }
3847 
3848 // helper for encoding java_to_runtime calls on sim
3849 //
3850 // this is needed to compute the extra arguments required when
3851 // planting a call to the simulator blrt instruction. the TypeFunc
3852 // can be queried to identify the counts for integral, and floating
3853 // arguments and the return type
3854 
3855 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3856 {
3857   int gps = 0;
3858   int fps = 0;
3859   const TypeTuple *domain = tf->domain();
3860   int max = domain->cnt();
3861   for (int i = TypeFunc::Parms; i < max; i++) {
3862     const Type *t = domain->field_at(i);
3863     switch(t->basic_type()) {
3864     case T_FLOAT:
3865     case T_DOUBLE:
3866       fps++;
3867     default:
3868       gps++;
3869     }
3870   }
3871   gpcnt = gps;
3872   fpcnt = fps;
3873   BasicType rt = tf->return_type();
3874   switch (rt) {
3875   case T_VOID:
3876     rtype = MacroAssembler::ret_type_void;
3877     break;
3878   default:
3879     rtype = MacroAssembler::ret_type_integral;
3880     break;
3881   case T_FLOAT:
3882     rtype = MacroAssembler::ret_type_float;
3883     break;
3884   case T_DOUBLE:
3885     rtype = MacroAssembler::ret_type_double;
3886     break;
3887   }
3888 }
3889 
3890 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3891   MacroAssembler _masm(&cbuf);                                          \
3892   {                                                                     \
3893     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3894     guarantee(DISP == 0, "mode not permitted for volatile");            \
3895     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3896     __ INSN(REG, as_Register(BASE));                                    \
3897   }
3898 
3899 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3900 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3901 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3902                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3903 
3904   // Used for all non-volatile memory accesses.  The use of
3905   // $mem->opcode() to discover whether this pattern uses sign-extended
3906   // offsets is something of a kludge.
3907   static void loadStore(MacroAssembler masm, mem_insn insn,
3908                          Register reg, int opcode,
3909                          Register base, int index, int size, int disp)
3910   {
3911     Address::extend scale;
3912 
3913     // Hooboy, this is fugly.  We need a way to communicate to the
3914     // encoder that the index needs to be sign extended, so we have to
3915     // enumerate all the cases.
3916     switch (opcode) {
3917     case INDINDEXSCALEDI2L:
3918     case INDINDEXSCALEDI2LN:
3919     case INDINDEXI2L:
3920     case INDINDEXI2LN:
3921       scale = Address::sxtw(size);
3922       break;
3923     default:
3924       scale = Address::lsl(size);
3925     }
3926 
3927     if (index == -1) {
3928       (masm.*insn)(reg, Address(base, disp));
3929     } else {
3930       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3931       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3932     }
3933   }
3934 
3935   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3936                          FloatRegister reg, int opcode,
3937                          Register base, int index, int size, int disp)
3938   {
3939     Address::extend scale;
3940 
3941     switch (opcode) {
3942     case INDINDEXSCALEDI2L:
3943     case INDINDEXSCALEDI2LN:
3944       scale = Address::sxtw(size);
3945       break;
3946     default:
3947       scale = Address::lsl(size);
3948     }
3949 
3950      if (index == -1) {
3951       (masm.*insn)(reg, Address(base, disp));
3952     } else {
3953       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3954       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3955     }
3956   }
3957 
3958   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3959                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3960                          int opcode, Register base, int index, int size, int disp)
3961   {
3962     if (index == -1) {
3963       (masm.*insn)(reg, T, Address(base, disp));
3964     } else {
3965       assert(disp == 0, "unsupported address mode");
3966       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3967     }
3968   }
3969 
3970 %}
3971 
3972 
3973 
3974 //----------ENCODING BLOCK-----------------------------------------------------
3975 // This block specifies the encoding classes used by the compiler to
3976 // output byte streams.  Encoding classes are parameterized macros
3977 // used by Machine Instruction Nodes in order to generate the bit
3978 // encoding of the instruction.  Operands specify their base encoding
3979 // interface with the interface keyword.  There are currently
3980 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3981 // COND_INTER.  REG_INTER causes an operand to generate a function
3982 // which returns its register number when queried.  CONST_INTER causes
3983 // an operand to generate a function which returns the value of the
3984 // constant when queried.  MEMORY_INTER causes an operand to generate
3985 // four functions which return the Base Register, the Index Register,
3986 // the Scale Value, and the Offset Value of the operand when queried.
3987 // COND_INTER causes an operand to generate six functions which return
3988 // the encoding code (ie - encoding bits for the instruction)
3989 // associated with each basic boolean condition for a conditional
3990 // instruction.
3991 //
3992 // Instructions specify two basic values for encoding.  Again, a
3993 // function is available to check if the constant displacement is an
3994 // oop. They use the ins_encode keyword to specify their encoding
3995 // classes (which must be a sequence of enc_class names, and their
3996 // parameters, specified in the encoding block), and they use the
3997 // opcode keyword to specify, in order, their primary, secondary, and
3998 // tertiary opcode.  Only the opcode sections which a particular
3999 // instruction needs for encoding need to be specified.
4000 encode %{
4001   // Build emit functions for each basic byte or larger field in the
4002   // intel encoding scheme (opcode, rm, sib, immediate), and call them
4003   // from C++ code in the enc_class source block.  Emit functions will
4004   // live in the main source block for now.  In future, we can
4005   // generalize this by adding a syntax that specifies the sizes of
4006   // fields in an order, so that the adlc can build the emit functions
4007   // automagically
4008 
4009   // catch all for unimplemented encodings
4010   enc_class enc_unimplemented %{
4011     MacroAssembler _masm(&cbuf);
4012     __ unimplemented("C2 catch all");
4013   %}
4014 
4015   // BEGIN Non-volatile memory access
4016 
4017   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
4018     Register dst_reg = as_Register($dst$$reg);
4019     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
4020                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4021   %}
4022 
4023   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
4024     Register dst_reg = as_Register($dst$$reg);
4025     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
4026                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4027   %}
4028 
4029   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
4030     Register dst_reg = as_Register($dst$$reg);
4031     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
4032                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4033   %}
4034 
4035   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
4036     Register dst_reg = as_Register($dst$$reg);
4037     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
4038                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4039   %}
4040 
4041   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
4042     Register dst_reg = as_Register($dst$$reg);
4043     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
4044                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4045   %}
4046 
4047   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
4048     Register dst_reg = as_Register($dst$$reg);
4049     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
4050                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4051   %}
4052 
4053   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
4054     Register dst_reg = as_Register($dst$$reg);
4055     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4056                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4057   %}
4058 
4059   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
4060     Register dst_reg = as_Register($dst$$reg);
4061     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4062                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4063   %}
4064 
4065   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
4066     Register dst_reg = as_Register($dst$$reg);
4067     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4068                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4069   %}
4070 
4071   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
4072     Register dst_reg = as_Register($dst$$reg);
4073     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4074                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4075   %}
4076 
4077   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
4078     Register dst_reg = as_Register($dst$$reg);
4079     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
4080                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4081   %}
4082 
4083   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4084     Register dst_reg = as_Register($dst$$reg);
4085     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4086                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4087   %}
4088 
4089   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4090     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4091     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4092                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4093   %}
4094 
4095   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4096     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4097     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4098                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4099   %}
4100 
4101   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4102     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4103     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4104        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4105   %}
4106 
4107   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4108     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4109     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4110        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4111   %}
4112 
4113   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4114     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4115     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4116        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4117   %}
4118 
4119   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4120     Register src_reg = as_Register($src$$reg);
4121     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4122                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4123   %}
4124 
4125   enc_class aarch64_enc_strb0(memory mem) %{
4126     MacroAssembler _masm(&cbuf);
4127     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4128                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4129   %}
4130 
4131   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4132     MacroAssembler _masm(&cbuf);
4133     __ membar(Assembler::StoreStore);
4134     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4135                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4136   %}
4137 
4138   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4139     Register src_reg = as_Register($src$$reg);
4140     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4141                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4142   %}
4143 
4144   enc_class aarch64_enc_strh0(memory mem) %{
4145     MacroAssembler _masm(&cbuf);
4146     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4147                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4148   %}
4149 
4150   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4151     Register src_reg = as_Register($src$$reg);
4152     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4153                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4154   %}
4155 
4156   enc_class aarch64_enc_strw0(memory mem) %{
4157     MacroAssembler _masm(&cbuf);
4158     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4159                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4160   %}
4161 
4162   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4163     Register src_reg = as_Register($src$$reg);
4164     // we sometimes get asked to store the stack pointer into the
4165     // current thread -- we cannot do that directly on AArch64
4166     if (src_reg == r31_sp) {
4167       MacroAssembler _masm(&cbuf);
4168       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4169       __ mov(rscratch2, sp);
4170       src_reg = rscratch2;
4171     }
4172     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4173                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4174   %}
4175 
4176   enc_class aarch64_enc_str0(memory mem) %{
4177     MacroAssembler _masm(&cbuf);
4178     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4179                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4180   %}
4181 
4182   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4183     FloatRegister src_reg = as_FloatRegister($src$$reg);
4184     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4185                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4186   %}
4187 
4188   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4189     FloatRegister src_reg = as_FloatRegister($src$$reg);
4190     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4191                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4192   %}
4193 
4194   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4195     FloatRegister src_reg = as_FloatRegister($src$$reg);
4196     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4197        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4198   %}
4199 
4200   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4201     FloatRegister src_reg = as_FloatRegister($src$$reg);
4202     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4203        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4204   %}
4205 
4206   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4207     FloatRegister src_reg = as_FloatRegister($src$$reg);
4208     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4209        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4210   %}
4211 
4212   // END Non-volatile memory access
4213 
4214   // volatile loads and stores
4215 
4216   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4217     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4218                  rscratch1, stlrb);
4219   %}
4220 
4221   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4222     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4223                  rscratch1, stlrh);
4224   %}
4225 
4226   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4227     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4228                  rscratch1, stlrw);
4229   %}
4230 
4231 
4232   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4233     Register dst_reg = as_Register($dst$$reg);
4234     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4235              rscratch1, ldarb);
4236     __ sxtbw(dst_reg, dst_reg);
4237   %}
4238 
4239   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4240     Register dst_reg = as_Register($dst$$reg);
4241     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4242              rscratch1, ldarb);
4243     __ sxtb(dst_reg, dst_reg);
4244   %}
4245 
4246   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4247     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4248              rscratch1, ldarb);
4249   %}
4250 
4251   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4252     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4253              rscratch1, ldarb);
4254   %}
4255 
4256   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4257     Register dst_reg = as_Register($dst$$reg);
4258     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4259              rscratch1, ldarh);
4260     __ sxthw(dst_reg, dst_reg);
4261   %}
4262 
4263   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4264     Register dst_reg = as_Register($dst$$reg);
4265     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4266              rscratch1, ldarh);
4267     __ sxth(dst_reg, dst_reg);
4268   %}
4269 
4270   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4271     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4272              rscratch1, ldarh);
4273   %}
4274 
4275   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4276     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4277              rscratch1, ldarh);
4278   %}
4279 
4280   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4281     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4282              rscratch1, ldarw);
4283   %}
4284 
4285   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4286     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4287              rscratch1, ldarw);
4288   %}
4289 
4290   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4291     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4292              rscratch1, ldar);
4293   %}
4294 
4295   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4296     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4297              rscratch1, ldarw);
4298     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4299   %}
4300 
4301   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4302     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4303              rscratch1, ldar);
4304     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4305   %}
4306 
4307   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4308     Register src_reg = as_Register($src$$reg);
4309     // we sometimes get asked to store the stack pointer into the
4310     // current thread -- we cannot do that directly on AArch64
4311     if (src_reg == r31_sp) {
4312         MacroAssembler _masm(&cbuf);
4313       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4314       __ mov(rscratch2, sp);
4315       src_reg = rscratch2;
4316     }
4317     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4318                  rscratch1, stlr);
4319   %}
4320 
4321   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4322     {
4323       MacroAssembler _masm(&cbuf);
4324       FloatRegister src_reg = as_FloatRegister($src$$reg);
4325       __ fmovs(rscratch2, src_reg);
4326     }
4327     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4328                  rscratch1, stlrw);
4329   %}
4330 
4331   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4332     {
4333       MacroAssembler _masm(&cbuf);
4334       FloatRegister src_reg = as_FloatRegister($src$$reg);
4335       __ fmovd(rscratch2, src_reg);
4336     }
4337     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4338                  rscratch1, stlr);
4339   %}
4340 
4341   // synchronized read/update encodings
4342 
4343   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4344     MacroAssembler _masm(&cbuf);
4345     Register dst_reg = as_Register($dst$$reg);
4346     Register base = as_Register($mem$$base);
4347     int index = $mem$$index;
4348     int scale = $mem$$scale;
4349     int disp = $mem$$disp;
4350     if (index == -1) {
4351        if (disp != 0) {
4352         __ lea(rscratch1, Address(base, disp));
4353         __ ldaxr(dst_reg, rscratch1);
4354       } else {
4355         // TODO
4356         // should we ever get anything other than this case?
4357         __ ldaxr(dst_reg, base);
4358       }
4359     } else {
4360       Register index_reg = as_Register(index);
4361       if (disp == 0) {
4362         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4363         __ ldaxr(dst_reg, rscratch1);
4364       } else {
4365         __ lea(rscratch1, Address(base, disp));
4366         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4367         __ ldaxr(dst_reg, rscratch1);
4368       }
4369     }
4370   %}
4371 
4372   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4373     MacroAssembler _masm(&cbuf);
4374     Register src_reg = as_Register($src$$reg);
4375     Register base = as_Register($mem$$base);
4376     int index = $mem$$index;
4377     int scale = $mem$$scale;
4378     int disp = $mem$$disp;
4379     if (index == -1) {
4380        if (disp != 0) {
4381         __ lea(rscratch2, Address(base, disp));
4382         __ stlxr(rscratch1, src_reg, rscratch2);
4383       } else {
4384         // TODO
4385         // should we ever get anything other than this case?
4386         __ stlxr(rscratch1, src_reg, base);
4387       }
4388     } else {
4389       Register index_reg = as_Register(index);
4390       if (disp == 0) {
4391         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4392         __ stlxr(rscratch1, src_reg, rscratch2);
4393       } else {
4394         __ lea(rscratch2, Address(base, disp));
4395         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4396         __ stlxr(rscratch1, src_reg, rscratch2);
4397       }
4398     }
4399     __ cmpw(rscratch1, zr);
4400   %}
4401 
4402   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4403     MacroAssembler _masm(&cbuf);
4404     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4405     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4406                Assembler::xword, /*acquire*/ false, /*release*/ true,
4407                /*weak*/ false, noreg);
4408   %}
4409 
4410   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4411     MacroAssembler _masm(&cbuf);
4412     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4413     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4414                Assembler::word, /*acquire*/ false, /*release*/ true,
4415                /*weak*/ false, noreg);
4416   %}
4417 
4418 
4419   // The only difference between aarch64_enc_cmpxchg and
4420   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4421   // CompareAndSwap sequence to serve as a barrier on acquiring a
4422   // lock.
4423   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4424     MacroAssembler _masm(&cbuf);
4425     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4426     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4427                Assembler::xword, /*acquire*/ true, /*release*/ true,
4428                /*weak*/ false, noreg);
4429   %}
4430 
4431   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4432     MacroAssembler _masm(&cbuf);
4433     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4434     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4435                Assembler::word, /*acquire*/ true, /*release*/ true,
4436                /*weak*/ false, noreg);
4437   %}
4438 
4439 
4440   // auxiliary used for CompareAndSwapX to set result register
4441   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4442     MacroAssembler _masm(&cbuf);
4443     Register res_reg = as_Register($res$$reg);
4444     __ cset(res_reg, Assembler::EQ);
4445   %}
4446 
4447   // prefetch encodings
4448 
4449   enc_class aarch64_enc_prefetchw(memory mem) %{
4450     MacroAssembler _masm(&cbuf);
4451     Register base = as_Register($mem$$base);
4452     int index = $mem$$index;
4453     int scale = $mem$$scale;
4454     int disp = $mem$$disp;
4455     if (index == -1) {
4456       __ prfm(Address(base, disp), PSTL1KEEP);
4457     } else {
4458       Register index_reg = as_Register(index);
4459       if (disp == 0) {
4460         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4461       } else {
4462         __ lea(rscratch1, Address(base, disp));
4463         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4464       }
4465     }
4466   %}
4467 
4468   /// mov envcodings
4469 
4470   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4471     MacroAssembler _masm(&cbuf);
4472     u_int32_t con = (u_int32_t)$src$$constant;
4473     Register dst_reg = as_Register($dst$$reg);
4474     if (con == 0) {
4475       __ movw(dst_reg, zr);
4476     } else {
4477       __ movw(dst_reg, con);
4478     }
4479   %}
4480 
4481   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4482     MacroAssembler _masm(&cbuf);
4483     Register dst_reg = as_Register($dst$$reg);
4484     u_int64_t con = (u_int64_t)$src$$constant;
4485     if (con == 0) {
4486       __ mov(dst_reg, zr);
4487     } else {
4488       __ mov(dst_reg, con);
4489     }
4490   %}
4491 
4492   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4493     MacroAssembler _masm(&cbuf);
4494     Register dst_reg = as_Register($dst$$reg);
4495     address con = (address)$src$$constant;
4496     if (con == NULL || con == (address)1) {
4497       ShouldNotReachHere();
4498     } else {
4499       relocInfo::relocType rtype = $src->constant_reloc();
4500       if (rtype == relocInfo::oop_type) {
4501         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4502       } else if (rtype == relocInfo::metadata_type) {
4503         __ mov_metadata(dst_reg, (Metadata*)con);
4504       } else {
4505         assert(rtype == relocInfo::none, "unexpected reloc type");
4506         if (con < (address)(uintptr_t)os::vm_page_size()) {
4507           __ mov(dst_reg, con);
4508         } else {
4509           unsigned long offset;
4510           __ adrp(dst_reg, con, offset);
4511           __ add(dst_reg, dst_reg, offset);
4512         }
4513       }
4514     }
4515   %}
4516 
4517   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4518     MacroAssembler _masm(&cbuf);
4519     Register dst_reg = as_Register($dst$$reg);
4520     __ mov(dst_reg, zr);
4521   %}
4522 
4523   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4524     MacroAssembler _masm(&cbuf);
4525     Register dst_reg = as_Register($dst$$reg);
4526     __ mov(dst_reg, (u_int64_t)1);
4527   %}
4528 
4529   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4530     MacroAssembler _masm(&cbuf);
4531     address page = (address)$src$$constant;
4532     Register dst_reg = as_Register($dst$$reg);
4533     unsigned long off;
4534     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4535     assert(off == 0, "assumed offset == 0");
4536   %}
4537 
4538   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4539     MacroAssembler _masm(&cbuf);
4540     __ load_byte_map_base($dst$$Register);
4541   %}
4542 
4543   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4544     MacroAssembler _masm(&cbuf);
4545     Register dst_reg = as_Register($dst$$reg);
4546     address con = (address)$src$$constant;
4547     if (con == NULL) {
4548       ShouldNotReachHere();
4549     } else {
4550       relocInfo::relocType rtype = $src->constant_reloc();
4551       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4552       __ set_narrow_oop(dst_reg, (jobject)con);
4553     }
4554   %}
4555 
4556   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4557     MacroAssembler _masm(&cbuf);
4558     Register dst_reg = as_Register($dst$$reg);
4559     __ mov(dst_reg, zr);
4560   %}
4561 
4562   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register dst_reg = as_Register($dst$$reg);
4565     address con = (address)$src$$constant;
4566     if (con == NULL) {
4567       ShouldNotReachHere();
4568     } else {
4569       relocInfo::relocType rtype = $src->constant_reloc();
4570       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4571       __ set_narrow_klass(dst_reg, (Klass *)con);
4572     }
4573   %}
4574 
4575   // arithmetic encodings
4576 
4577   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4578     MacroAssembler _masm(&cbuf);
4579     Register dst_reg = as_Register($dst$$reg);
4580     Register src_reg = as_Register($src1$$reg);
4581     int32_t con = (int32_t)$src2$$constant;
4582     // add has primary == 0, subtract has primary == 1
4583     if ($primary) { con = -con; }
4584     if (con < 0) {
4585       __ subw(dst_reg, src_reg, -con);
4586     } else {
4587       __ addw(dst_reg, src_reg, con);
4588     }
4589   %}
4590 
4591   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4592     MacroAssembler _masm(&cbuf);
4593     Register dst_reg = as_Register($dst$$reg);
4594     Register src_reg = as_Register($src1$$reg);
4595     int32_t con = (int32_t)$src2$$constant;
4596     // add has primary == 0, subtract has primary == 1
4597     if ($primary) { con = -con; }
4598     if (con < 0) {
4599       __ sub(dst_reg, src_reg, -con);
4600     } else {
4601       __ add(dst_reg, src_reg, con);
4602     }
4603   %}
4604 
4605   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4606     MacroAssembler _masm(&cbuf);
4607    Register dst_reg = as_Register($dst$$reg);
4608    Register src1_reg = as_Register($src1$$reg);
4609    Register src2_reg = as_Register($src2$$reg);
4610     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4611   %}
4612 
4613   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4614     MacroAssembler _masm(&cbuf);
4615    Register dst_reg = as_Register($dst$$reg);
4616    Register src1_reg = as_Register($src1$$reg);
4617    Register src2_reg = as_Register($src2$$reg);
4618     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4619   %}
4620 
4621   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4622     MacroAssembler _masm(&cbuf);
4623    Register dst_reg = as_Register($dst$$reg);
4624    Register src1_reg = as_Register($src1$$reg);
4625    Register src2_reg = as_Register($src2$$reg);
4626     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4627   %}
4628 
4629   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4630     MacroAssembler _masm(&cbuf);
4631    Register dst_reg = as_Register($dst$$reg);
4632    Register src1_reg = as_Register($src1$$reg);
4633    Register src2_reg = as_Register($src2$$reg);
4634     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4635   %}
4636 
4637   // compare instruction encodings
4638 
4639   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4640     MacroAssembler _masm(&cbuf);
4641     Register reg1 = as_Register($src1$$reg);
4642     Register reg2 = as_Register($src2$$reg);
4643     __ cmpw(reg1, reg2);
4644   %}
4645 
4646   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4647     MacroAssembler _masm(&cbuf);
4648     Register reg = as_Register($src1$$reg);
4649     int32_t val = $src2$$constant;
4650     if (val >= 0) {
4651       __ subsw(zr, reg, val);
4652     } else {
4653       __ addsw(zr, reg, -val);
4654     }
4655   %}
4656 
4657   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4658     MacroAssembler _masm(&cbuf);
4659     Register reg1 = as_Register($src1$$reg);
4660     u_int32_t val = (u_int32_t)$src2$$constant;
4661     __ movw(rscratch1, val);
4662     __ cmpw(reg1, rscratch1);
4663   %}
4664 
4665   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4666     MacroAssembler _masm(&cbuf);
4667     Register reg1 = as_Register($src1$$reg);
4668     Register reg2 = as_Register($src2$$reg);
4669     __ cmp(reg1, reg2);
4670   %}
4671 
4672   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4673     MacroAssembler _masm(&cbuf);
4674     Register reg = as_Register($src1$$reg);
4675     int64_t val = $src2$$constant;
4676     if (val >= 0) {
4677       __ subs(zr, reg, val);
4678     } else if (val != -val) {
4679       __ adds(zr, reg, -val);
4680     } else {
4681     // aargh, Long.MIN_VALUE is a special case
4682       __ orr(rscratch1, zr, (u_int64_t)val);
4683       __ subs(zr, reg, rscratch1);
4684     }
4685   %}
4686 
4687   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4688     MacroAssembler _masm(&cbuf);
4689     Register reg1 = as_Register($src1$$reg);
4690     u_int64_t val = (u_int64_t)$src2$$constant;
4691     __ mov(rscratch1, val);
4692     __ cmp(reg1, rscratch1);
4693   %}
4694 
4695   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4696     MacroAssembler _masm(&cbuf);
4697     Register reg1 = as_Register($src1$$reg);
4698     Register reg2 = as_Register($src2$$reg);
4699     __ cmp(reg1, reg2);
4700   %}
4701 
4702   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4703     MacroAssembler _masm(&cbuf);
4704     Register reg1 = as_Register($src1$$reg);
4705     Register reg2 = as_Register($src2$$reg);
4706     __ cmpw(reg1, reg2);
4707   %}
4708 
4709   enc_class aarch64_enc_testp(iRegP src) %{
4710     MacroAssembler _masm(&cbuf);
4711     Register reg = as_Register($src$$reg);
4712     __ cmp(reg, zr);
4713   %}
4714 
4715   enc_class aarch64_enc_testn(iRegN src) %{
4716     MacroAssembler _masm(&cbuf);
4717     Register reg = as_Register($src$$reg);
4718     __ cmpw(reg, zr);
4719   %}
4720 
4721   enc_class aarch64_enc_b(label lbl) %{
4722     MacroAssembler _masm(&cbuf);
4723     Label *L = $lbl$$label;
4724     __ b(*L);
4725   %}
4726 
4727   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4728     MacroAssembler _masm(&cbuf);
4729     Label *L = $lbl$$label;
4730     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4731   %}
4732 
4733   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4734     MacroAssembler _masm(&cbuf);
4735     Label *L = $lbl$$label;
4736     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4737   %}
4738 
4739   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4740   %{
4741      Register sub_reg = as_Register($sub$$reg);
4742      Register super_reg = as_Register($super$$reg);
4743      Register temp_reg = as_Register($temp$$reg);
4744      Register result_reg = as_Register($result$$reg);
4745 
4746      Label miss;
4747      MacroAssembler _masm(&cbuf);
4748      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4749                                      NULL, &miss,
4750                                      /*set_cond_codes:*/ true);
4751      if ($primary) {
4752        __ mov(result_reg, zr);
4753      }
4754      __ bind(miss);
4755   %}
4756 
4757   enc_class aarch64_enc_java_static_call(method meth) %{
4758     MacroAssembler _masm(&cbuf);
4759 
4760     address addr = (address)$meth$$method;
4761     address call;
4762     if (!_method) {
4763       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4764       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4765     } else {
4766       int method_index = resolved_method_index(cbuf);
4767       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4768                                                   : static_call_Relocation::spec(method_index);
4769       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4770 
4771       // Emit stub for static call
4772       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4773       if (stub == NULL) {
4774         ciEnv::current()->record_failure("CodeCache is full");
4775         return;
4776       }
4777     }
4778     if (call == NULL) {
4779       ciEnv::current()->record_failure("CodeCache is full");
4780       return;
4781     }
4782   %}
4783 
4784   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4785     MacroAssembler _masm(&cbuf);
4786     int method_index = resolved_method_index(cbuf);
4787     address call = __ ic_call((address)$meth$$method, method_index);
4788     if (call == NULL) {
4789       ciEnv::current()->record_failure("CodeCache is full");
4790       return;
4791     }
4792   %}
4793 
4794   enc_class aarch64_enc_call_epilog() %{
4795     MacroAssembler _masm(&cbuf);
4796     if (VerifyStackAtCalls) {
4797       // Check that stack depth is unchanged: find majik cookie on stack
4798       __ call_Unimplemented();
4799     }
4800   %}
4801 
4802   enc_class aarch64_enc_java_to_runtime(method meth) %{
4803     MacroAssembler _masm(&cbuf);
4804 
4805     // some calls to generated routines (arraycopy code) are scheduled
4806     // by C2 as runtime calls. if so we can call them using a br (they
4807     // will be in a reachable segment) otherwise we have to use a blrt
4808     // which loads the absolute address into a register.
4809     address entry = (address)$meth$$method;
4810     CodeBlob *cb = CodeCache::find_blob(entry);
4811     if (cb) {
4812       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4813       if (call == NULL) {
4814         ciEnv::current()->record_failure("CodeCache is full");
4815         return;
4816       }
4817     } else {
4818       int gpcnt;
4819       int fpcnt;
4820       int rtype;
4821       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4822       Label retaddr;
4823       __ adr(rscratch2, retaddr);
4824       __ lea(rscratch1, RuntimeAddress(entry));
4825       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4826       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4827       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4828       __ bind(retaddr);
4829       __ add(sp, sp, 2 * wordSize);
4830     }
4831   %}
4832 
4833   enc_class aarch64_enc_rethrow() %{
4834     MacroAssembler _masm(&cbuf);
4835     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4836   %}
4837 
4838   enc_class aarch64_enc_ret() %{
4839     MacroAssembler _masm(&cbuf);
4840     __ ret(lr);
4841   %}
4842 
4843   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4844     MacroAssembler _masm(&cbuf);
4845     Register target_reg = as_Register($jump_target$$reg);
4846     __ br(target_reg);
4847   %}
4848 
4849   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4850     MacroAssembler _masm(&cbuf);
4851     Register target_reg = as_Register($jump_target$$reg);
4852     // exception oop should be in r0
4853     // ret addr has been popped into lr
4854     // callee expects it in r3
4855     __ mov(r3, lr);
4856     __ br(target_reg);
4857   %}
4858 
4859   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4860     MacroAssembler _masm(&cbuf);
4861     Register oop = as_Register($object$$reg);
4862     Register box = as_Register($box$$reg);
4863     Register disp_hdr = as_Register($tmp$$reg);
4864     Register tmp = as_Register($tmp2$$reg);
4865     Label cont;
4866     Label object_has_monitor;
4867     Label cas_failed;
4868 
4869     assert_different_registers(oop, box, tmp, disp_hdr);
4870 
4871     // Load markOop from object into displaced_header.
4872     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4873 
4874     // Always do locking in runtime.
4875     if (EmitSync & 0x01) {
4876       __ cmp(oop, zr);
4877       return;
4878     }
4879 
4880     if (UseBiasedLocking && !UseOptoBiasInlining) {
4881       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4882     }
4883 
4884     // Handle existing monitor
4885     if ((EmitSync & 0x02) == 0) {
4886       // we can use AArch64's bit test and branch here but
4887       // markoopDesc does not define a bit index just the bit value
4888       // so assert in case the bit pos changes
4889 #     define __monitor_value_log2 1
4890       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4891       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4892 #     undef __monitor_value_log2
4893     }
4894 
4895     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4896     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4897 
4898     // Load Compare Value application register.
4899 
4900     // Initialize the box. (Must happen before we update the object mark!)
4901     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4902 
4903     // Compare object markOop with mark and if equal exchange scratch1
4904     // with object markOop.
4905     if (UseLSE) {
4906       __ mov(tmp, disp_hdr);
4907       __ casal(Assembler::xword, tmp, box, oop);
4908       __ cmp(tmp, disp_hdr);
4909       __ br(Assembler::EQ, cont);
4910     } else {
4911       Label retry_load;
4912       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4913         __ prfm(Address(oop), PSTL1STRM);
4914       __ bind(retry_load);
4915       __ ldaxr(tmp, oop);
4916       __ cmp(tmp, disp_hdr);
4917       __ br(Assembler::NE, cas_failed);
4918       // use stlxr to ensure update is immediately visible
4919       __ stlxr(tmp, box, oop);
4920       __ cbzw(tmp, cont);
4921       __ b(retry_load);
4922     }
4923 
4924     // Formerly:
4925     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4926     //               /*newv=*/box,
4927     //               /*addr=*/oop,
4928     //               /*tmp=*/tmp,
4929     //               cont,
4930     //               /*fail*/NULL);
4931 
4932     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4933 
4934     // If the compare-and-exchange succeeded, then we found an unlocked
4935     // object, will have now locked it will continue at label cont
4936 
4937     __ bind(cas_failed);
4938     // We did not see an unlocked object so try the fast recursive case.
4939 
4940     // Check if the owner is self by comparing the value in the
4941     // markOop of object (disp_hdr) with the stack pointer.
4942     __ mov(rscratch1, sp);
4943     __ sub(disp_hdr, disp_hdr, rscratch1);
4944     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4945     // If condition is true we are cont and hence we can store 0 as the
4946     // displaced header in the box, which indicates that it is a recursive lock.
4947     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4948     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4949 
4950     // Handle existing monitor.
4951     if ((EmitSync & 0x02) == 0) {
4952       __ b(cont);
4953 
4954       __ bind(object_has_monitor);
4955       // The object's monitor m is unlocked iff m->owner == NULL,
4956       // otherwise m->owner may contain a thread or a stack address.
4957       //
4958       // Try to CAS m->owner from NULL to current thread.
4959       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4960       __ mov(disp_hdr, zr);
4961 
4962       if (UseLSE) {
4963         __ mov(rscratch1, disp_hdr);
4964         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4965         __ cmp(rscratch1, disp_hdr);
4966       } else {
4967         Label retry_load, fail;
4968         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4969           __ prfm(Address(tmp), PSTL1STRM);
4970         __ bind(retry_load);
4971         __ ldaxr(rscratch1, tmp);
4972         __ cmp(disp_hdr, rscratch1);
4973         __ br(Assembler::NE, fail);
4974         // use stlxr to ensure update is immediately visible
4975         __ stlxr(rscratch1, rthread, tmp);
4976         __ cbnzw(rscratch1, retry_load);
4977         __ bind(fail);
4978       }
4979 
4980       // Label next;
4981       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4982       //               /*newv=*/rthread,
4983       //               /*addr=*/tmp,
4984       //               /*tmp=*/rscratch1,
4985       //               /*succeed*/next,
4986       //               /*fail*/NULL);
4987       // __ bind(next);
4988 
4989       // store a non-null value into the box.
4990       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4991 
4992       // PPC port checks the following invariants
4993       // #ifdef ASSERT
4994       // bne(flag, cont);
4995       // We have acquired the monitor, check some invariants.
4996       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4997       // Invariant 1: _recursions should be 0.
4998       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4999       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
5000       //                        "monitor->_recursions should be 0", -1);
5001       // Invariant 2: OwnerIsThread shouldn't be 0.
5002       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
5003       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
5004       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
5005       // #endif
5006     }
5007 
5008     __ bind(cont);
5009     // flag == EQ indicates success
5010     // flag == NE indicates failure
5011 
5012   %}
5013 
5014   // TODO
5015   // reimplement this with custom cmpxchgptr code
5016   // which avoids some of the unnecessary branching
5017   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
5018     MacroAssembler _masm(&cbuf);
5019     Register oop = as_Register($object$$reg);
5020     Register box = as_Register($box$$reg);
5021     Register disp_hdr = as_Register($tmp$$reg);
5022     Register tmp = as_Register($tmp2$$reg);
5023     Label cont;
5024     Label object_has_monitor;
5025     Label cas_failed;
5026 
5027     assert_different_registers(oop, box, tmp, disp_hdr);
5028 
5029     // Always do locking in runtime.
5030     if (EmitSync & 0x01) {
5031       __ cmp(oop, zr); // Oop can't be 0 here => always false.
5032       return;
5033     }
5034 
5035     if (UseBiasedLocking && !UseOptoBiasInlining) {
5036       __ biased_locking_exit(oop, tmp, cont);
5037     }
5038 
5039     // Find the lock address and load the displaced header from the stack.
5040     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5041 
5042     // If the displaced header is 0, we have a recursive unlock.
5043     __ cmp(disp_hdr, zr);
5044     __ br(Assembler::EQ, cont);
5045 
5046 
5047     // Handle existing monitor.
5048     if ((EmitSync & 0x02) == 0) {
5049       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5050       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5051     }
5052 
5053     // Check if it is still a light weight lock, this is is true if we
5054     // see the stack address of the basicLock in the markOop of the
5055     // object.
5056 
5057       if (UseLSE) {
5058         __ mov(tmp, box);
5059         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5060         __ cmp(tmp, box);
5061       } else {
5062         Label retry_load;
5063         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
5064           __ prfm(Address(oop), PSTL1STRM);
5065         __ bind(retry_load);
5066         __ ldxr(tmp, oop);
5067         __ cmp(box, tmp);
5068         __ br(Assembler::NE, cas_failed);
5069         // use stlxr to ensure update is immediately visible
5070         __ stlxr(tmp, disp_hdr, oop);
5071         __ cbzw(tmp, cont);
5072         __ b(retry_load);
5073       }
5074 
5075     // __ cmpxchgptr(/*compare_value=*/box,
5076     //               /*exchange_value=*/disp_hdr,
5077     //               /*where=*/oop,
5078     //               /*result=*/tmp,
5079     //               cont,
5080     //               /*cas_failed*/NULL);
5081     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5082 
5083     __ bind(cas_failed);
5084 
5085     // Handle existing monitor.
5086     if ((EmitSync & 0x02) == 0) {
5087       __ b(cont);
5088 
5089       __ bind(object_has_monitor);
5090       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5091       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5092       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5093       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5094       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5095       __ cmp(rscratch1, zr);
5096       __ br(Assembler::NE, cont);
5097 
5098       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5099       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5100       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5101       __ cmp(rscratch1, zr);
5102       __ cbnz(rscratch1, cont);
5103       // need a release store here
5104       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5105       __ stlr(rscratch1, tmp); // rscratch1 is zero
5106     }
5107 
5108     __ bind(cont);
5109     // flag == EQ indicates success
5110     // flag == NE indicates failure
5111   %}
5112 
5113 %}
5114 
5115 //----------FRAME--------------------------------------------------------------
5116 // Definition of frame structure and management information.
5117 //
5118 //  S T A C K   L A Y O U T    Allocators stack-slot number
5119 //                             |   (to get allocators register number
5120 //  G  Owned by    |        |  v    add OptoReg::stack0())
5121 //  r   CALLER     |        |
5122 //  o     |        +--------+      pad to even-align allocators stack-slot
5123 //  w     V        |  pad0  |        numbers; owned by CALLER
5124 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5125 //  h     ^        |   in   |  5
5126 //        |        |  args  |  4   Holes in incoming args owned by SELF
5127 //  |     |        |        |  3
5128 //  |     |        +--------+
5129 //  V     |        | old out|      Empty on Intel, window on Sparc
5130 //        |    old |preserve|      Must be even aligned.
5131 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5132 //        |        |   in   |  3   area for Intel ret address
5133 //     Owned by    |preserve|      Empty on Sparc.
5134 //       SELF      +--------+
5135 //        |        |  pad2  |  2   pad to align old SP
5136 //        |        +--------+  1
5137 //        |        | locks  |  0
5138 //        |        +--------+----> OptoReg::stack0(), even aligned
5139 //        |        |  pad1  | 11   pad to align new SP
5140 //        |        +--------+
5141 //        |        |        | 10
5142 //        |        | spills |  9   spills
5143 //        V        |        |  8   (pad0 slot for callee)
5144 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5145 //        ^        |  out   |  7
5146 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5147 //     Owned by    +--------+
5148 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5149 //        |    new |preserve|      Must be even-aligned.
5150 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5151 //        |        |        |
5152 //
5153 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5154 //         known from SELF's arguments and the Java calling convention.
5155 //         Region 6-7 is determined per call site.
5156 // Note 2: If the calling convention leaves holes in the incoming argument
5157 //         area, those holes are owned by SELF.  Holes in the outgoing area
5158 //         are owned by the CALLEE.  Holes should not be nessecary in the
5159 //         incoming area, as the Java calling convention is completely under
5160 //         the control of the AD file.  Doubles can be sorted and packed to
5161 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5162 //         varargs C calling conventions.
5163 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5164 //         even aligned with pad0 as needed.
5165 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5166 //           (the latter is true on Intel but is it false on AArch64?)
5167 //         region 6-11 is even aligned; it may be padded out more so that
5168 //         the region from SP to FP meets the minimum stack alignment.
5169 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5170 //         alignment.  Region 11, pad1, may be dynamically extended so that
5171 //         SP meets the minimum alignment.
5172 
5173 frame %{
5174   // What direction does stack grow in (assumed to be same for C & Java)
5175   stack_direction(TOWARDS_LOW);
5176 
5177   // These three registers define part of the calling convention
5178   // between compiled code and the interpreter.
5179 
5180   // Inline Cache Register or methodOop for I2C.
5181   inline_cache_reg(R12);
5182 
5183   // Method Oop Register when calling interpreter.
5184   interpreter_method_oop_reg(R12);
5185 
5186   // Number of stack slots consumed by locking an object
5187   sync_stack_slots(2);
5188 
5189   // Compiled code's Frame Pointer
5190   frame_pointer(R31);
5191 
5192   // Interpreter stores its frame pointer in a register which is
5193   // stored to the stack by I2CAdaptors.
5194   // I2CAdaptors convert from interpreted java to compiled java.
5195   interpreter_frame_pointer(R29);
5196 
5197   // Stack alignment requirement
5198   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5199 
5200   // Number of stack slots between incoming argument block and the start of
5201   // a new frame.  The PROLOG must add this many slots to the stack.  The
5202   // EPILOG must remove this many slots. aarch64 needs two slots for
5203   // return address and fp.
5204   // TODO think this is correct but check
5205   in_preserve_stack_slots(4);
5206 
5207   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5208   // for calls to C.  Supports the var-args backing area for register parms.
5209   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5210 
5211   // The after-PROLOG location of the return address.  Location of
5212   // return address specifies a type (REG or STACK) and a number
5213   // representing the register number (i.e. - use a register name) or
5214   // stack slot.
5215   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5216   // Otherwise, it is above the locks and verification slot and alignment word
5217   // TODO this may well be correct but need to check why that - 2 is there
5218   // ppc port uses 0 but we definitely need to allow for fixed_slots
5219   // which folds in the space used for monitors
5220   return_addr(STACK - 2 +
5221               align_up((Compile::current()->in_preserve_stack_slots() +
5222                         Compile::current()->fixed_slots()),
5223                        stack_alignment_in_slots()));
5224 
5225   // Body of function which returns an integer array locating
5226   // arguments either in registers or in stack slots.  Passed an array
5227   // of ideal registers called "sig" and a "length" count.  Stack-slot
5228   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5229   // arguments for a CALLEE.  Incoming stack arguments are
5230   // automatically biased by the preserve_stack_slots field above.
5231 
5232   calling_convention
5233   %{
5234     // No difference between ingoing/outgoing just pass false
5235     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5236   %}
5237 
5238   c_calling_convention
5239   %{
5240     // This is obviously always outgoing
5241     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5242   %}
5243 
5244   // Location of compiled Java return values.  Same as C for now.
5245   return_value
5246   %{
5247     // TODO do we allow ideal_reg == Op_RegN???
5248     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5249            "only return normal values");
5250 
5251     static const int lo[Op_RegL + 1] = { // enum name
5252       0,                                 // Op_Node
5253       0,                                 // Op_Set
5254       R0_num,                            // Op_RegN
5255       R0_num,                            // Op_RegI
5256       R0_num,                            // Op_RegP
5257       V0_num,                            // Op_RegF
5258       V0_num,                            // Op_RegD
5259       R0_num                             // Op_RegL
5260     };
5261 
5262     static const int hi[Op_RegL + 1] = { // enum name
5263       0,                                 // Op_Node
5264       0,                                 // Op_Set
5265       OptoReg::Bad,                       // Op_RegN
5266       OptoReg::Bad,                      // Op_RegI
5267       R0_H_num,                          // Op_RegP
5268       OptoReg::Bad,                      // Op_RegF
5269       V0_H_num,                          // Op_RegD
5270       R0_H_num                           // Op_RegL
5271     };
5272 
5273     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5274   %}
5275 %}
5276 
5277 //----------ATTRIBUTES---------------------------------------------------------
5278 //----------Operand Attributes-------------------------------------------------
5279 op_attrib op_cost(1);        // Required cost attribute
5280 
5281 //----------Instruction Attributes---------------------------------------------
5282 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5283 ins_attrib ins_size(32);        // Required size attribute (in bits)
5284 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5285                                 // a non-matching short branch variant
5286                                 // of some long branch?
5287 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5288                                 // be a power of 2) specifies the
5289                                 // alignment that some part of the
5290                                 // instruction (not necessarily the
5291                                 // start) requires.  If > 1, a
5292                                 // compute_padding() function must be
5293                                 // provided for the instruction
5294 
5295 //----------OPERANDS-----------------------------------------------------------
5296 // Operand definitions must precede instruction definitions for correct parsing
5297 // in the ADLC because operands constitute user defined types which are used in
5298 // instruction definitions.
5299 
5300 //----------Simple Operands----------------------------------------------------
5301 
5302 // Integer operands 32 bit
5303 // 32 bit immediate
5304 operand immI()
5305 %{
5306   match(ConI);
5307 
5308   op_cost(0);
5309   format %{ %}
5310   interface(CONST_INTER);
5311 %}
5312 
5313 // 32 bit zero
5314 operand immI0()
5315 %{
5316   predicate(n->get_int() == 0);
5317   match(ConI);
5318 
5319   op_cost(0);
5320   format %{ %}
5321   interface(CONST_INTER);
5322 %}
5323 
5324 // 32 bit unit increment
5325 operand immI_1()
5326 %{
5327   predicate(n->get_int() == 1);
5328   match(ConI);
5329 
5330   op_cost(0);
5331   format %{ %}
5332   interface(CONST_INTER);
5333 %}
5334 
5335 // 32 bit unit decrement
5336 operand immI_M1()
5337 %{
5338   predicate(n->get_int() == -1);
5339   match(ConI);
5340 
5341   op_cost(0);
5342   format %{ %}
5343   interface(CONST_INTER);
5344 %}
5345 
5346 operand immI_le_4()
5347 %{
5348   predicate(n->get_int() <= 4);
5349   match(ConI);
5350 
5351   op_cost(0);
5352   format %{ %}
5353   interface(CONST_INTER);
5354 %}
5355 
5356 operand immI_31()
5357 %{
5358   predicate(n->get_int() == 31);
5359   match(ConI);
5360 
5361   op_cost(0);
5362   format %{ %}
5363   interface(CONST_INTER);
5364 %}
5365 
5366 operand immI_8()
5367 %{
5368   predicate(n->get_int() == 8);
5369   match(ConI);
5370 
5371   op_cost(0);
5372   format %{ %}
5373   interface(CONST_INTER);
5374 %}
5375 
5376 operand immI_16()
5377 %{
5378   predicate(n->get_int() == 16);
5379   match(ConI);
5380 
5381   op_cost(0);
5382   format %{ %}
5383   interface(CONST_INTER);
5384 %}
5385 
5386 operand immI_24()
5387 %{
5388   predicate(n->get_int() == 24);
5389   match(ConI);
5390 
5391   op_cost(0);
5392   format %{ %}
5393   interface(CONST_INTER);
5394 %}
5395 
5396 operand immI_32()
5397 %{
5398   predicate(n->get_int() == 32);
5399   match(ConI);
5400 
5401   op_cost(0);
5402   format %{ %}
5403   interface(CONST_INTER);
5404 %}
5405 
5406 operand immI_48()
5407 %{
5408   predicate(n->get_int() == 48);
5409   match(ConI);
5410 
5411   op_cost(0);
5412   format %{ %}
5413   interface(CONST_INTER);
5414 %}
5415 
5416 operand immI_56()
5417 %{
5418   predicate(n->get_int() == 56);
5419   match(ConI);
5420 
5421   op_cost(0);
5422   format %{ %}
5423   interface(CONST_INTER);
5424 %}
5425 
5426 operand immI_63()
5427 %{
5428   predicate(n->get_int() == 63);
5429   match(ConI);
5430 
5431   op_cost(0);
5432   format %{ %}
5433   interface(CONST_INTER);
5434 %}
5435 
5436 operand immI_64()
5437 %{
5438   predicate(n->get_int() == 64);
5439   match(ConI);
5440 
5441   op_cost(0);
5442   format %{ %}
5443   interface(CONST_INTER);
5444 %}
5445 
5446 operand immI_255()
5447 %{
5448   predicate(n->get_int() == 255);
5449   match(ConI);
5450 
5451   op_cost(0);
5452   format %{ %}
5453   interface(CONST_INTER);
5454 %}
5455 
5456 operand immI_65535()
5457 %{
5458   predicate(n->get_int() == 65535);
5459   match(ConI);
5460 
5461   op_cost(0);
5462   format %{ %}
5463   interface(CONST_INTER);
5464 %}
5465 
5466 operand immL_255()
5467 %{
5468   predicate(n->get_long() == 255L);
5469   match(ConL);
5470 
5471   op_cost(0);
5472   format %{ %}
5473   interface(CONST_INTER);
5474 %}
5475 
5476 operand immL_65535()
5477 %{
5478   predicate(n->get_long() == 65535L);
5479   match(ConL);
5480 
5481   op_cost(0);
5482   format %{ %}
5483   interface(CONST_INTER);
5484 %}
5485 
5486 operand immL_4294967295()
5487 %{
5488   predicate(n->get_long() == 4294967295L);
5489   match(ConL);
5490 
5491   op_cost(0);
5492   format %{ %}
5493   interface(CONST_INTER);
5494 %}
5495 
5496 operand immL_bitmask()
5497 %{
5498   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5499             && is_power_of_2(n->get_long() + 1));
5500   match(ConL);
5501 
5502   op_cost(0);
5503   format %{ %}
5504   interface(CONST_INTER);
5505 %}
5506 
5507 operand immI_bitmask()
5508 %{
5509   predicate(((n->get_int() & 0xc0000000) == 0)
5510             && is_power_of_2(n->get_int() + 1));
5511   match(ConI);
5512 
5513   op_cost(0);
5514   format %{ %}
5515   interface(CONST_INTER);
5516 %}
5517 
5518 // Scale values for scaled offset addressing modes (up to long but not quad)
5519 operand immIScale()
5520 %{
5521   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5522   match(ConI);
5523 
5524   op_cost(0);
5525   format %{ %}
5526   interface(CONST_INTER);
5527 %}
5528 
5529 // 26 bit signed offset -- for pc-relative branches
5530 operand immI26()
5531 %{
5532   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5533   match(ConI);
5534 
5535   op_cost(0);
5536   format %{ %}
5537   interface(CONST_INTER);
5538 %}
5539 
5540 // 19 bit signed offset -- for pc-relative loads
5541 operand immI19()
5542 %{
5543   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5544   match(ConI);
5545 
5546   op_cost(0);
5547   format %{ %}
5548   interface(CONST_INTER);
5549 %}
5550 
5551 // 12 bit unsigned offset -- for base plus immediate loads
5552 operand immIU12()
5553 %{
5554   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5555   match(ConI);
5556 
5557   op_cost(0);
5558   format %{ %}
5559   interface(CONST_INTER);
5560 %}
5561 
5562 operand immLU12()
5563 %{
5564   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5565   match(ConL);
5566 
5567   op_cost(0);
5568   format %{ %}
5569   interface(CONST_INTER);
5570 %}
5571 
5572 // Offset for scaled or unscaled immediate loads and stores
5573 operand immIOffset()
5574 %{
5575   predicate(Address::offset_ok_for_immed(n->get_int()));
5576   match(ConI);
5577 
5578   op_cost(0);
5579   format %{ %}
5580   interface(CONST_INTER);
5581 %}
5582 
5583 operand immIOffset4()
5584 %{
5585   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5586   match(ConI);
5587 
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 operand immIOffset8()
5594 %{
5595   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5596   match(ConI);
5597 
5598   op_cost(0);
5599   format %{ %}
5600   interface(CONST_INTER);
5601 %}
5602 
5603 operand immIOffset16()
5604 %{
5605   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5606   match(ConI);
5607 
5608   op_cost(0);
5609   format %{ %}
5610   interface(CONST_INTER);
5611 %}
5612 
5613 operand immLoffset()
5614 %{
5615   predicate(Address::offset_ok_for_immed(n->get_long()));
5616   match(ConL);
5617 
5618   op_cost(0);
5619   format %{ %}
5620   interface(CONST_INTER);
5621 %}
5622 
5623 operand immLoffset4()
5624 %{
5625   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5626   match(ConL);
5627 
5628   op_cost(0);
5629   format %{ %}
5630   interface(CONST_INTER);
5631 %}
5632 
5633 operand immLoffset8()
5634 %{
5635   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5636   match(ConL);
5637 
5638   op_cost(0);
5639   format %{ %}
5640   interface(CONST_INTER);
5641 %}
5642 
5643 operand immLoffset16()
5644 %{
5645   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5646   match(ConL);
5647 
5648   op_cost(0);
5649   format %{ %}
5650   interface(CONST_INTER);
5651 %}
5652 
5653 // 32 bit integer valid for add sub immediate
5654 operand immIAddSub()
5655 %{
5656   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5657   match(ConI);
5658   op_cost(0);
5659   format %{ %}
5660   interface(CONST_INTER);
5661 %}
5662 
5663 // 32 bit unsigned integer valid for logical immediate
5664 // TODO -- check this is right when e.g the mask is 0x80000000
5665 operand immILog()
5666 %{
5667   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5668   match(ConI);
5669 
5670   op_cost(0);
5671   format %{ %}
5672   interface(CONST_INTER);
5673 %}
5674 
5675 // Integer operands 64 bit
5676 // 64 bit immediate
5677 operand immL()
5678 %{
5679   match(ConL);
5680 
5681   op_cost(0);
5682   format %{ %}
5683   interface(CONST_INTER);
5684 %}
5685 
5686 // 64 bit zero
5687 operand immL0()
5688 %{
5689   predicate(n->get_long() == 0);
5690   match(ConL);
5691 
5692   op_cost(0);
5693   format %{ %}
5694   interface(CONST_INTER);
5695 %}
5696 
5697 // 64 bit unit increment
5698 operand immL_1()
5699 %{
5700   predicate(n->get_long() == 1);
5701   match(ConL);
5702 
5703   op_cost(0);
5704   format %{ %}
5705   interface(CONST_INTER);
5706 %}
5707 
5708 // 64 bit unit decrement
5709 operand immL_M1()
5710 %{
5711   predicate(n->get_long() == -1);
5712   match(ConL);
5713 
5714   op_cost(0);
5715   format %{ %}
5716   interface(CONST_INTER);
5717 %}
5718 
5719 // 32 bit offset of pc in thread anchor
5720 
5721 operand immL_pc_off()
5722 %{
5723   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5724                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5725   match(ConL);
5726 
5727   op_cost(0);
5728   format %{ %}
5729   interface(CONST_INTER);
5730 %}
5731 
5732 // 64 bit integer valid for add sub immediate
5733 operand immLAddSub()
5734 %{
5735   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5736   match(ConL);
5737   op_cost(0);
5738   format %{ %}
5739   interface(CONST_INTER);
5740 %}
5741 
5742 // 64 bit integer valid for logical immediate
5743 operand immLLog()
5744 %{
5745   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5746   match(ConL);
5747   op_cost(0);
5748   format %{ %}
5749   interface(CONST_INTER);
5750 %}
5751 
5752 // Long Immediate: low 32-bit mask
5753 operand immL_32bits()
5754 %{
5755   predicate(n->get_long() == 0xFFFFFFFFL);
5756   match(ConL);
5757   op_cost(0);
5758   format %{ %}
5759   interface(CONST_INTER);
5760 %}
5761 
5762 // Pointer operands
5763 // Pointer Immediate
5764 operand immP()
5765 %{
5766   match(ConP);
5767 
5768   op_cost(0);
5769   format %{ %}
5770   interface(CONST_INTER);
5771 %}
5772 
5773 // NULL Pointer Immediate
5774 operand immP0()
5775 %{
5776   predicate(n->get_ptr() == 0);
5777   match(ConP);
5778 
5779   op_cost(0);
5780   format %{ %}
5781   interface(CONST_INTER);
5782 %}
5783 
5784 // Pointer Immediate One
5785 // this is used in object initialization (initial object header)
5786 operand immP_1()
5787 %{
5788   predicate(n->get_ptr() == 1);
5789   match(ConP);
5790 
5791   op_cost(0);
5792   format %{ %}
5793   interface(CONST_INTER);
5794 %}
5795 
5796 // Polling Page Pointer Immediate
5797 operand immPollPage()
5798 %{
5799   predicate((address)n->get_ptr() == os::get_polling_page());
5800   match(ConP);
5801 
5802   op_cost(0);
5803   format %{ %}
5804   interface(CONST_INTER);
5805 %}
5806 
5807 // Card Table Byte Map Base
5808 operand immByteMapBase()
5809 %{
5810   // Get base of card map
5811   predicate((jbyte*)n->get_ptr() ==
5812         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5813   match(ConP);
5814 
5815   op_cost(0);
5816   format %{ %}
5817   interface(CONST_INTER);
5818 %}
5819 
5820 // Pointer Immediate Minus One
5821 // this is used when we want to write the current PC to the thread anchor
5822 operand immP_M1()
5823 %{
5824   predicate(n->get_ptr() == -1);
5825   match(ConP);
5826 
5827   op_cost(0);
5828   format %{ %}
5829   interface(CONST_INTER);
5830 %}
5831 
5832 // Pointer Immediate Minus Two
5833 // this is used when we want to write the current PC to the thread anchor
5834 operand immP_M2()
5835 %{
5836   predicate(n->get_ptr() == -2);
5837   match(ConP);
5838 
5839   op_cost(0);
5840   format %{ %}
5841   interface(CONST_INTER);
5842 %}
5843 
5844 // Float and Double operands
5845 // Double Immediate
5846 operand immD()
5847 %{
5848   match(ConD);
5849   op_cost(0);
5850   format %{ %}
5851   interface(CONST_INTER);
5852 %}
5853 
5854 // Double Immediate: +0.0d
5855 operand immD0()
5856 %{
5857   predicate(jlong_cast(n->getd()) == 0);
5858   match(ConD);
5859 
5860   op_cost(0);
5861   format %{ %}
5862   interface(CONST_INTER);
5863 %}
5864 
5865 // constant 'double +0.0'.
5866 operand immDPacked()
5867 %{
5868   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5869   match(ConD);
5870   op_cost(0);
5871   format %{ %}
5872   interface(CONST_INTER);
5873 %}
5874 
5875 // Float Immediate
5876 operand immF()
5877 %{
5878   match(ConF);
5879   op_cost(0);
5880   format %{ %}
5881   interface(CONST_INTER);
5882 %}
5883 
5884 // Float Immediate: +0.0f.
5885 operand immF0()
5886 %{
5887   predicate(jint_cast(n->getf()) == 0);
5888   match(ConF);
5889 
5890   op_cost(0);
5891   format %{ %}
5892   interface(CONST_INTER);
5893 %}
5894 
5895 //
5896 operand immFPacked()
5897 %{
5898   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5899   match(ConF);
5900   op_cost(0);
5901   format %{ %}
5902   interface(CONST_INTER);
5903 %}
5904 
5905 // Narrow pointer operands
5906 // Narrow Pointer Immediate
5907 operand immN()
5908 %{
5909   match(ConN);
5910 
5911   op_cost(0);
5912   format %{ %}
5913   interface(CONST_INTER);
5914 %}
5915 
5916 // Narrow NULL Pointer Immediate
5917 operand immN0()
5918 %{
5919   predicate(n->get_narrowcon() == 0);
5920   match(ConN);
5921 
5922   op_cost(0);
5923   format %{ %}
5924   interface(CONST_INTER);
5925 %}
5926 
5927 operand immNKlass()
5928 %{
5929   match(ConNKlass);
5930 
5931   op_cost(0);
5932   format %{ %}
5933   interface(CONST_INTER);
5934 %}
5935 
5936 // Integer 32 bit Register Operands
5937 // Integer 32 bitRegister (excludes SP)
5938 operand iRegI()
5939 %{
5940   constraint(ALLOC_IN_RC(any_reg32));
5941   match(RegI);
5942   match(iRegINoSp);
5943   op_cost(0);
5944   format %{ %}
5945   interface(REG_INTER);
5946 %}
5947 
5948 // Integer 32 bit Register not Special
5949 operand iRegINoSp()
5950 %{
5951   constraint(ALLOC_IN_RC(no_special_reg32));
5952   match(RegI);
5953   op_cost(0);
5954   format %{ %}
5955   interface(REG_INTER);
5956 %}
5957 
5958 // Integer 64 bit Register Operands
5959 // Integer 64 bit Register (includes SP)
5960 operand iRegL()
5961 %{
5962   constraint(ALLOC_IN_RC(any_reg));
5963   match(RegL);
5964   match(iRegLNoSp);
5965   op_cost(0);
5966   format %{ %}
5967   interface(REG_INTER);
5968 %}
5969 
5970 // Integer 64 bit Register not Special
5971 operand iRegLNoSp()
5972 %{
5973   constraint(ALLOC_IN_RC(no_special_reg));
5974   match(RegL);
5975   match(iRegL_R0);
5976   format %{ %}
5977   interface(REG_INTER);
5978 %}
5979 
5980 // Pointer Register Operands
5981 // Pointer Register
5982 operand iRegP()
5983 %{
5984   constraint(ALLOC_IN_RC(ptr_reg));
5985   match(RegP);
5986   match(iRegPNoSp);
5987   match(iRegP_R0);
5988   //match(iRegP_R2);
5989   //match(iRegP_R4);
5990   //match(iRegP_R5);
5991   match(thread_RegP);
5992   op_cost(0);
5993   format %{ %}
5994   interface(REG_INTER);
5995 %}
5996 
5997 // Pointer 64 bit Register not Special
5998 operand iRegPNoSp()
5999 %{
6000   constraint(ALLOC_IN_RC(no_special_ptr_reg));
6001   match(RegP);
6002   // match(iRegP);
6003   // match(iRegP_R0);
6004   // match(iRegP_R2);
6005   // match(iRegP_R4);
6006   // match(iRegP_R5);
6007   // match(thread_RegP);
6008   op_cost(0);
6009   format %{ %}
6010   interface(REG_INTER);
6011 %}
6012 
6013 // Pointer 64 bit Register R0 only
6014 operand iRegP_R0()
6015 %{
6016   constraint(ALLOC_IN_RC(r0_reg));
6017   match(RegP);
6018   // match(iRegP);
6019   match(iRegPNoSp);
6020   op_cost(0);
6021   format %{ %}
6022   interface(REG_INTER);
6023 %}
6024 
6025 // Pointer 64 bit Register R1 only
6026 operand iRegP_R1()
6027 %{
6028   constraint(ALLOC_IN_RC(r1_reg));
6029   match(RegP);
6030   // match(iRegP);
6031   match(iRegPNoSp);
6032   op_cost(0);
6033   format %{ %}
6034   interface(REG_INTER);
6035 %}
6036 
6037 // Pointer 64 bit Register R2 only
6038 operand iRegP_R2()
6039 %{
6040   constraint(ALLOC_IN_RC(r2_reg));
6041   match(RegP);
6042   // match(iRegP);
6043   match(iRegPNoSp);
6044   op_cost(0);
6045   format %{ %}
6046   interface(REG_INTER);
6047 %}
6048 
6049 // Pointer 64 bit Register R3 only
6050 operand iRegP_R3()
6051 %{
6052   constraint(ALLOC_IN_RC(r3_reg));
6053   match(RegP);
6054   // match(iRegP);
6055   match(iRegPNoSp);
6056   op_cost(0);
6057   format %{ %}
6058   interface(REG_INTER);
6059 %}
6060 
6061 // Pointer 64 bit Register R4 only
6062 operand iRegP_R4()
6063 %{
6064   constraint(ALLOC_IN_RC(r4_reg));
6065   match(RegP);
6066   // match(iRegP);
6067   match(iRegPNoSp);
6068   op_cost(0);
6069   format %{ %}
6070   interface(REG_INTER);
6071 %}
6072 
6073 // Pointer 64 bit Register R5 only
6074 operand iRegP_R5()
6075 %{
6076   constraint(ALLOC_IN_RC(r5_reg));
6077   match(RegP);
6078   // match(iRegP);
6079   match(iRegPNoSp);
6080   op_cost(0);
6081   format %{ %}
6082   interface(REG_INTER);
6083 %}
6084 
6085 // Pointer 64 bit Register R10 only
6086 operand iRegP_R10()
6087 %{
6088   constraint(ALLOC_IN_RC(r10_reg));
6089   match(RegP);
6090   // match(iRegP);
6091   match(iRegPNoSp);
6092   op_cost(0);
6093   format %{ %}
6094   interface(REG_INTER);
6095 %}
6096 
6097 // Long 64 bit Register R0 only
6098 operand iRegL_R0()
6099 %{
6100   constraint(ALLOC_IN_RC(r0_reg));
6101   match(RegL);
6102   match(iRegLNoSp);
6103   op_cost(0);
6104   format %{ %}
6105   interface(REG_INTER);
6106 %}
6107 
6108 // Long 64 bit Register R2 only
6109 operand iRegL_R2()
6110 %{
6111   constraint(ALLOC_IN_RC(r2_reg));
6112   match(RegL);
6113   match(iRegLNoSp);
6114   op_cost(0);
6115   format %{ %}
6116   interface(REG_INTER);
6117 %}
6118 
6119 // Long 64 bit Register R3 only
6120 operand iRegL_R3()
6121 %{
6122   constraint(ALLOC_IN_RC(r3_reg));
6123   match(RegL);
6124   match(iRegLNoSp);
6125   op_cost(0);
6126   format %{ %}
6127   interface(REG_INTER);
6128 %}
6129 
6130 // Long 64 bit Register R11 only
6131 operand iRegL_R11()
6132 %{
6133   constraint(ALLOC_IN_RC(r11_reg));
6134   match(RegL);
6135   match(iRegLNoSp);
6136   op_cost(0);
6137   format %{ %}
6138   interface(REG_INTER);
6139 %}
6140 
6141 // Pointer 64 bit Register FP only
6142 operand iRegP_FP()
6143 %{
6144   constraint(ALLOC_IN_RC(fp_reg));
6145   match(RegP);
6146   // match(iRegP);
6147   op_cost(0);
6148   format %{ %}
6149   interface(REG_INTER);
6150 %}
6151 
6152 // Register R0 only
6153 operand iRegI_R0()
6154 %{
6155   constraint(ALLOC_IN_RC(int_r0_reg));
6156   match(RegI);
6157   match(iRegINoSp);
6158   op_cost(0);
6159   format %{ %}
6160   interface(REG_INTER);
6161 %}
6162 
6163 // Register R2 only
6164 operand iRegI_R2()
6165 %{
6166   constraint(ALLOC_IN_RC(int_r2_reg));
6167   match(RegI);
6168   match(iRegINoSp);
6169   op_cost(0);
6170   format %{ %}
6171   interface(REG_INTER);
6172 %}
6173 
6174 // Register R3 only
6175 operand iRegI_R3()
6176 %{
6177   constraint(ALLOC_IN_RC(int_r3_reg));
6178   match(RegI);
6179   match(iRegINoSp);
6180   op_cost(0);
6181   format %{ %}
6182   interface(REG_INTER);
6183 %}
6184 
6185 
6186 // Register R4 only
6187 operand iRegI_R4()
6188 %{
6189   constraint(ALLOC_IN_RC(int_r4_reg));
6190   match(RegI);
6191   match(iRegINoSp);
6192   op_cost(0);
6193   format %{ %}
6194   interface(REG_INTER);
6195 %}
6196 
6197 
6198 // Pointer Register Operands
6199 // Narrow Pointer Register
6200 operand iRegN()
6201 %{
6202   constraint(ALLOC_IN_RC(any_reg32));
6203   match(RegN);
6204   match(iRegNNoSp);
6205   op_cost(0);
6206   format %{ %}
6207   interface(REG_INTER);
6208 %}
6209 
6210 operand iRegN_R0()
6211 %{
6212   constraint(ALLOC_IN_RC(r0_reg));
6213   match(iRegN);
6214   op_cost(0);
6215   format %{ %}
6216   interface(REG_INTER);
6217 %}
6218 
6219 operand iRegN_R2()
6220 %{
6221   constraint(ALLOC_IN_RC(r2_reg));
6222   match(iRegN);
6223   op_cost(0);
6224   format %{ %}
6225   interface(REG_INTER);
6226 %}
6227 
6228 operand iRegN_R3()
6229 %{
6230   constraint(ALLOC_IN_RC(r3_reg));
6231   match(iRegN);
6232   op_cost(0);
6233   format %{ %}
6234   interface(REG_INTER);
6235 %}
6236 
6237 // Integer 64 bit Register not Special
6238 operand iRegNNoSp()
6239 %{
6240   constraint(ALLOC_IN_RC(no_special_reg32));
6241   match(RegN);
6242   op_cost(0);
6243   format %{ %}
6244   interface(REG_INTER);
6245 %}
6246 
6247 // heap base register -- used for encoding immN0
6248 
6249 operand iRegIHeapbase()
6250 %{
6251   constraint(ALLOC_IN_RC(heapbase_reg));
6252   match(RegI);
6253   op_cost(0);
6254   format %{ %}
6255   interface(REG_INTER);
6256 %}
6257 
6258 // Float Register
6259 // Float register operands
6260 operand vRegF()
6261 %{
6262   constraint(ALLOC_IN_RC(float_reg));
6263   match(RegF);
6264 
6265   op_cost(0);
6266   format %{ %}
6267   interface(REG_INTER);
6268 %}
6269 
6270 // Double Register
6271 // Double register operands
6272 operand vRegD()
6273 %{
6274   constraint(ALLOC_IN_RC(double_reg));
6275   match(RegD);
6276 
6277   op_cost(0);
6278   format %{ %}
6279   interface(REG_INTER);
6280 %}
6281 
6282 operand vecD()
6283 %{
6284   constraint(ALLOC_IN_RC(vectord_reg));
6285   match(VecD);
6286 
6287   op_cost(0);
6288   format %{ %}
6289   interface(REG_INTER);
6290 %}
6291 
6292 operand vecX()
6293 %{
6294   constraint(ALLOC_IN_RC(vectorx_reg));
6295   match(VecX);
6296 
6297   op_cost(0);
6298   format %{ %}
6299   interface(REG_INTER);
6300 %}
6301 
6302 operand vRegD_V0()
6303 %{
6304   constraint(ALLOC_IN_RC(v0_reg));
6305   match(RegD);
6306   op_cost(0);
6307   format %{ %}
6308   interface(REG_INTER);
6309 %}
6310 
6311 operand vRegD_V1()
6312 %{
6313   constraint(ALLOC_IN_RC(v1_reg));
6314   match(RegD);
6315   op_cost(0);
6316   format %{ %}
6317   interface(REG_INTER);
6318 %}
6319 
6320 operand vRegD_V2()
6321 %{
6322   constraint(ALLOC_IN_RC(v2_reg));
6323   match(RegD);
6324   op_cost(0);
6325   format %{ %}
6326   interface(REG_INTER);
6327 %}
6328 
6329 operand vRegD_V3()
6330 %{
6331   constraint(ALLOC_IN_RC(v3_reg));
6332   match(RegD);
6333   op_cost(0);
6334   format %{ %}
6335   interface(REG_INTER);
6336 %}
6337 
6338 // Flags register, used as output of signed compare instructions
6339 
6340 // note that on AArch64 we also use this register as the output for
6341 // for floating point compare instructions (CmpF CmpD). this ensures
6342 // that ordered inequality tests use GT, GE, LT or LE none of which
6343 // pass through cases where the result is unordered i.e. one or both
6344 // inputs to the compare is a NaN. this means that the ideal code can
6345 // replace e.g. a GT with an LE and not end up capturing the NaN case
6346 // (where the comparison should always fail). EQ and NE tests are
6347 // always generated in ideal code so that unordered folds into the NE
6348 // case, matching the behaviour of AArch64 NE.
6349 //
6350 // This differs from x86 where the outputs of FP compares use a
6351 // special FP flags registers and where compares based on this
6352 // register are distinguished into ordered inequalities (cmpOpUCF) and
6353 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6354 // to explicitly handle the unordered case in branches. x86 also has
6355 // to include extra CMoveX rules to accept a cmpOpUCF input.
6356 
6357 operand rFlagsReg()
6358 %{
6359   constraint(ALLOC_IN_RC(int_flags));
6360   match(RegFlags);
6361 
6362   op_cost(0);
6363   format %{ "RFLAGS" %}
6364   interface(REG_INTER);
6365 %}
6366 
6367 // Flags register, used as output of unsigned compare instructions
6368 operand rFlagsRegU()
6369 %{
6370   constraint(ALLOC_IN_RC(int_flags));
6371   match(RegFlags);
6372 
6373   op_cost(0);
6374   format %{ "RFLAGSU" %}
6375   interface(REG_INTER);
6376 %}
6377 
6378 // Special Registers
6379 
6380 // Method Register
6381 operand inline_cache_RegP(iRegP reg)
6382 %{
6383   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6384   match(reg);
6385   match(iRegPNoSp);
6386   op_cost(0);
6387   format %{ %}
6388   interface(REG_INTER);
6389 %}
6390 
6391 operand interpreter_method_oop_RegP(iRegP reg)
6392 %{
6393   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6394   match(reg);
6395   match(iRegPNoSp);
6396   op_cost(0);
6397   format %{ %}
6398   interface(REG_INTER);
6399 %}
6400 
6401 // Thread Register
6402 operand thread_RegP(iRegP reg)
6403 %{
6404   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6405   match(reg);
6406   op_cost(0);
6407   format %{ %}
6408   interface(REG_INTER);
6409 %}
6410 
6411 operand lr_RegP(iRegP reg)
6412 %{
6413   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6414   match(reg);
6415   op_cost(0);
6416   format %{ %}
6417   interface(REG_INTER);
6418 %}
6419 
6420 //----------Memory Operands----------------------------------------------------
6421 
6422 operand indirect(iRegP reg)
6423 %{
6424   constraint(ALLOC_IN_RC(ptr_reg));
6425   match(reg);
6426   op_cost(0);
6427   format %{ "[$reg]" %}
6428   interface(MEMORY_INTER) %{
6429     base($reg);
6430     index(0xffffffff);
6431     scale(0x0);
6432     disp(0x0);
6433   %}
6434 %}
6435 
6436 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6437 %{
6438   constraint(ALLOC_IN_RC(ptr_reg));
6439   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6440   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6441   op_cost(0);
6442   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6443   interface(MEMORY_INTER) %{
6444     base($reg);
6445     index($ireg);
6446     scale($scale);
6447     disp(0x0);
6448   %}
6449 %}
6450 
6451 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6452 %{
6453   constraint(ALLOC_IN_RC(ptr_reg));
6454   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6455   match(AddP reg (LShiftL lreg scale));
6456   op_cost(0);
6457   format %{ "$reg, $lreg lsl($scale)" %}
6458   interface(MEMORY_INTER) %{
6459     base($reg);
6460     index($lreg);
6461     scale($scale);
6462     disp(0x0);
6463   %}
6464 %}
6465 
6466 operand indIndexI2L(iRegP reg, iRegI ireg)
6467 %{
6468   constraint(ALLOC_IN_RC(ptr_reg));
6469   match(AddP reg (ConvI2L ireg));
6470   op_cost(0);
6471   format %{ "$reg, $ireg, 0, I2L" %}
6472   interface(MEMORY_INTER) %{
6473     base($reg);
6474     index($ireg);
6475     scale(0x0);
6476     disp(0x0);
6477   %}
6478 %}
6479 
6480 operand indIndex(iRegP reg, iRegL lreg)
6481 %{
6482   constraint(ALLOC_IN_RC(ptr_reg));
6483   match(AddP reg lreg);
6484   op_cost(0);
6485   format %{ "$reg, $lreg" %}
6486   interface(MEMORY_INTER) %{
6487     base($reg);
6488     index($lreg);
6489     scale(0x0);
6490     disp(0x0);
6491   %}
6492 %}
6493 
6494 operand indOffI(iRegP reg, immIOffset off)
6495 %{
6496   constraint(ALLOC_IN_RC(ptr_reg));
6497   match(AddP reg off);
6498   op_cost(0);
6499   format %{ "[$reg, $off]" %}
6500   interface(MEMORY_INTER) %{
6501     base($reg);
6502     index(0xffffffff);
6503     scale(0x0);
6504     disp($off);
6505   %}
6506 %}
6507 
6508 operand indOffI4(iRegP reg, immIOffset4 off)
6509 %{
6510   constraint(ALLOC_IN_RC(ptr_reg));
6511   match(AddP reg off);
6512   op_cost(0);
6513   format %{ "[$reg, $off]" %}
6514   interface(MEMORY_INTER) %{
6515     base($reg);
6516     index(0xffffffff);
6517     scale(0x0);
6518     disp($off);
6519   %}
6520 %}
6521 
6522 operand indOffI8(iRegP reg, immIOffset8 off)
6523 %{
6524   constraint(ALLOC_IN_RC(ptr_reg));
6525   match(AddP reg off);
6526   op_cost(0);
6527   format %{ "[$reg, $off]" %}
6528   interface(MEMORY_INTER) %{
6529     base($reg);
6530     index(0xffffffff);
6531     scale(0x0);
6532     disp($off);
6533   %}
6534 %}
6535 
6536 operand indOffI16(iRegP reg, immIOffset16 off)
6537 %{
6538   constraint(ALLOC_IN_RC(ptr_reg));
6539   match(AddP reg off);
6540   op_cost(0);
6541   format %{ "[$reg, $off]" %}
6542   interface(MEMORY_INTER) %{
6543     base($reg);
6544     index(0xffffffff);
6545     scale(0x0);
6546     disp($off);
6547   %}
6548 %}
6549 
6550 operand indOffL(iRegP reg, immLoffset off)
6551 %{
6552   constraint(ALLOC_IN_RC(ptr_reg));
6553   match(AddP reg off);
6554   op_cost(0);
6555   format %{ "[$reg, $off]" %}
6556   interface(MEMORY_INTER) %{
6557     base($reg);
6558     index(0xffffffff);
6559     scale(0x0);
6560     disp($off);
6561   %}
6562 %}
6563 
6564 operand indOffL4(iRegP reg, immLoffset4 off)
6565 %{
6566   constraint(ALLOC_IN_RC(ptr_reg));
6567   match(AddP reg off);
6568   op_cost(0);
6569   format %{ "[$reg, $off]" %}
6570   interface(MEMORY_INTER) %{
6571     base($reg);
6572     index(0xffffffff);
6573     scale(0x0);
6574     disp($off);
6575   %}
6576 %}
6577 
6578 operand indOffL8(iRegP reg, immLoffset8 off)
6579 %{
6580   constraint(ALLOC_IN_RC(ptr_reg));
6581   match(AddP reg off);
6582   op_cost(0);
6583   format %{ "[$reg, $off]" %}
6584   interface(MEMORY_INTER) %{
6585     base($reg);
6586     index(0xffffffff);
6587     scale(0x0);
6588     disp($off);
6589   %}
6590 %}
6591 
6592 operand indOffL16(iRegP reg, immLoffset16 off)
6593 %{
6594   constraint(ALLOC_IN_RC(ptr_reg));
6595   match(AddP reg off);
6596   op_cost(0);
6597   format %{ "[$reg, $off]" %}
6598   interface(MEMORY_INTER) %{
6599     base($reg);
6600     index(0xffffffff);
6601     scale(0x0);
6602     disp($off);
6603   %}
6604 %}
6605 
6606 operand indirectN(iRegN reg)
6607 %{
6608   predicate(Universe::narrow_oop_shift() == 0);
6609   constraint(ALLOC_IN_RC(ptr_reg));
6610   match(DecodeN reg);
6611   op_cost(0);
6612   format %{ "[$reg]\t# narrow" %}
6613   interface(MEMORY_INTER) %{
6614     base($reg);
6615     index(0xffffffff);
6616     scale(0x0);
6617     disp(0x0);
6618   %}
6619 %}
6620 
6621 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6622 %{
6623   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6624   constraint(ALLOC_IN_RC(ptr_reg));
6625   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6626   op_cost(0);
6627   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6628   interface(MEMORY_INTER) %{
6629     base($reg);
6630     index($ireg);
6631     scale($scale);
6632     disp(0x0);
6633   %}
6634 %}
6635 
6636 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6637 %{
6638   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6639   constraint(ALLOC_IN_RC(ptr_reg));
6640   match(AddP (DecodeN reg) (LShiftL lreg scale));
6641   op_cost(0);
6642   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6643   interface(MEMORY_INTER) %{
6644     base($reg);
6645     index($lreg);
6646     scale($scale);
6647     disp(0x0);
6648   %}
6649 %}
6650 
6651 operand indIndexI2LN(iRegN reg, iRegI ireg)
6652 %{
6653   predicate(Universe::narrow_oop_shift() == 0);
6654   constraint(ALLOC_IN_RC(ptr_reg));
6655   match(AddP (DecodeN reg) (ConvI2L ireg));
6656   op_cost(0);
6657   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6658   interface(MEMORY_INTER) %{
6659     base($reg);
6660     index($ireg);
6661     scale(0x0);
6662     disp(0x0);
6663   %}
6664 %}
6665 
6666 operand indIndexN(iRegN reg, iRegL lreg)
6667 %{
6668   predicate(Universe::narrow_oop_shift() == 0);
6669   constraint(ALLOC_IN_RC(ptr_reg));
6670   match(AddP (DecodeN reg) lreg);
6671   op_cost(0);
6672   format %{ "$reg, $lreg\t# narrow" %}
6673   interface(MEMORY_INTER) %{
6674     base($reg);
6675     index($lreg);
6676     scale(0x0);
6677     disp(0x0);
6678   %}
6679 %}
6680 
6681 operand indOffIN(iRegN reg, immIOffset off)
6682 %{
6683   predicate(Universe::narrow_oop_shift() == 0);
6684   constraint(ALLOC_IN_RC(ptr_reg));
6685   match(AddP (DecodeN reg) off);
6686   op_cost(0);
6687   format %{ "[$reg, $off]\t# narrow" %}
6688   interface(MEMORY_INTER) %{
6689     base($reg);
6690     index(0xffffffff);
6691     scale(0x0);
6692     disp($off);
6693   %}
6694 %}
6695 
6696 operand indOffLN(iRegN reg, immLoffset off)
6697 %{
6698   predicate(Universe::narrow_oop_shift() == 0);
6699   constraint(ALLOC_IN_RC(ptr_reg));
6700   match(AddP (DecodeN reg) off);
6701   op_cost(0);
6702   format %{ "[$reg, $off]\t# narrow" %}
6703   interface(MEMORY_INTER) %{
6704     base($reg);
6705     index(0xffffffff);
6706     scale(0x0);
6707     disp($off);
6708   %}
6709 %}
6710 
6711 
6712 
6713 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6714 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6715 %{
6716   constraint(ALLOC_IN_RC(ptr_reg));
6717   match(AddP reg off);
6718   op_cost(0);
6719   format %{ "[$reg, $off]" %}
6720   interface(MEMORY_INTER) %{
6721     base($reg);
6722     index(0xffffffff);
6723     scale(0x0);
6724     disp($off);
6725   %}
6726 %}
6727 
6728 //----------Special Memory Operands--------------------------------------------
6729 // Stack Slot Operand - This operand is used for loading and storing temporary
6730 //                      values on the stack where a match requires a value to
6731 //                      flow through memory.
6732 operand stackSlotP(sRegP reg)
6733 %{
6734   constraint(ALLOC_IN_RC(stack_slots));
6735   op_cost(100);
6736   // No match rule because this operand is only generated in matching
6737   // match(RegP);
6738   format %{ "[$reg]" %}
6739   interface(MEMORY_INTER) %{
6740     base(0x1e);  // RSP
6741     index(0x0);  // No Index
6742     scale(0x0);  // No Scale
6743     disp($reg);  // Stack Offset
6744   %}
6745 %}
6746 
6747 operand stackSlotI(sRegI reg)
6748 %{
6749   constraint(ALLOC_IN_RC(stack_slots));
6750   // No match rule because this operand is only generated in matching
6751   // match(RegI);
6752   format %{ "[$reg]" %}
6753   interface(MEMORY_INTER) %{
6754     base(0x1e);  // RSP
6755     index(0x0);  // No Index
6756     scale(0x0);  // No Scale
6757     disp($reg);  // Stack Offset
6758   %}
6759 %}
6760 
6761 operand stackSlotF(sRegF reg)
6762 %{
6763   constraint(ALLOC_IN_RC(stack_slots));
6764   // No match rule because this operand is only generated in matching
6765   // match(RegF);
6766   format %{ "[$reg]" %}
6767   interface(MEMORY_INTER) %{
6768     base(0x1e);  // RSP
6769     index(0x0);  // No Index
6770     scale(0x0);  // No Scale
6771     disp($reg);  // Stack Offset
6772   %}
6773 %}
6774 
6775 operand stackSlotD(sRegD reg)
6776 %{
6777   constraint(ALLOC_IN_RC(stack_slots));
6778   // No match rule because this operand is only generated in matching
6779   // match(RegD);
6780   format %{ "[$reg]" %}
6781   interface(MEMORY_INTER) %{
6782     base(0x1e);  // RSP
6783     index(0x0);  // No Index
6784     scale(0x0);  // No Scale
6785     disp($reg);  // Stack Offset
6786   %}
6787 %}
6788 
6789 operand stackSlotL(sRegL reg)
6790 %{
6791   constraint(ALLOC_IN_RC(stack_slots));
6792   // No match rule because this operand is only generated in matching
6793   // match(RegL);
6794   format %{ "[$reg]" %}
6795   interface(MEMORY_INTER) %{
6796     base(0x1e);  // RSP
6797     index(0x0);  // No Index
6798     scale(0x0);  // No Scale
6799     disp($reg);  // Stack Offset
6800   %}
6801 %}
6802 
6803 // Operands for expressing Control Flow
6804 // NOTE: Label is a predefined operand which should not be redefined in
6805 //       the AD file. It is generically handled within the ADLC.
6806 
6807 //----------Conditional Branch Operands----------------------------------------
6808 // Comparison Op  - This is the operation of the comparison, and is limited to
6809 //                  the following set of codes:
6810 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6811 //
6812 // Other attributes of the comparison, such as unsignedness, are specified
6813 // by the comparison instruction that sets a condition code flags register.
6814 // That result is represented by a flags operand whose subtype is appropriate
6815 // to the unsignedness (etc.) of the comparison.
6816 //
6817 // Later, the instruction which matches both the Comparison Op (a Bool) and
6818 // the flags (produced by the Cmp) specifies the coding of the comparison op
6819 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6820 
6821 // used for signed integral comparisons and fp comparisons
6822 
6823 operand cmpOp()
6824 %{
6825   match(Bool);
6826 
6827   format %{ "" %}
6828   interface(COND_INTER) %{
6829     equal(0x0, "eq");
6830     not_equal(0x1, "ne");
6831     less(0xb, "lt");
6832     greater_equal(0xa, "ge");
6833     less_equal(0xd, "le");
6834     greater(0xc, "gt");
6835     overflow(0x6, "vs");
6836     no_overflow(0x7, "vc");
6837   %}
6838 %}
6839 
6840 // used for unsigned integral comparisons
6841 
6842 operand cmpOpU()
6843 %{
6844   match(Bool);
6845 
6846   format %{ "" %}
6847   interface(COND_INTER) %{
6848     equal(0x0, "eq");
6849     not_equal(0x1, "ne");
6850     less(0x3, "lo");
6851     greater_equal(0x2, "hs");
6852     less_equal(0x9, "ls");
6853     greater(0x8, "hi");
6854     overflow(0x6, "vs");
6855     no_overflow(0x7, "vc");
6856   %}
6857 %}
6858 
6859 // used for certain integral comparisons which can be
6860 // converted to cbxx or tbxx instructions
6861 
6862 operand cmpOpEqNe()
6863 %{
6864   match(Bool);
6865   match(CmpOp);
6866   op_cost(0);
6867   predicate(n->as_Bool()->_test._test == BoolTest::ne
6868             || n->as_Bool()->_test._test == BoolTest::eq);
6869 
6870   format %{ "" %}
6871   interface(COND_INTER) %{
6872     equal(0x0, "eq");
6873     not_equal(0x1, "ne");
6874     less(0xb, "lt");
6875     greater_equal(0xa, "ge");
6876     less_equal(0xd, "le");
6877     greater(0xc, "gt");
6878     overflow(0x6, "vs");
6879     no_overflow(0x7, "vc");
6880   %}
6881 %}
6882 
6883 // used for certain integral comparisons which can be
6884 // converted to cbxx or tbxx instructions
6885 
6886 operand cmpOpLtGe()
6887 %{
6888   match(Bool);
6889   match(CmpOp);
6890   op_cost(0);
6891 
6892   predicate(n->as_Bool()->_test._test == BoolTest::lt
6893             || n->as_Bool()->_test._test == BoolTest::ge);
6894 
6895   format %{ "" %}
6896   interface(COND_INTER) %{
6897     equal(0x0, "eq");
6898     not_equal(0x1, "ne");
6899     less(0xb, "lt");
6900     greater_equal(0xa, "ge");
6901     less_equal(0xd, "le");
6902     greater(0xc, "gt");
6903     overflow(0x6, "vs");
6904     no_overflow(0x7, "vc");
6905   %}
6906 %}
6907 
6908 // used for certain unsigned integral comparisons which can be
6909 // converted to cbxx or tbxx instructions
6910 
6911 operand cmpOpUEqNeLtGe()
6912 %{
6913   match(Bool);
6914   match(CmpOp);
6915   op_cost(0);
6916 
6917   predicate(n->as_Bool()->_test._test == BoolTest::eq
6918             || n->as_Bool()->_test._test == BoolTest::ne
6919             || n->as_Bool()->_test._test == BoolTest::lt
6920             || n->as_Bool()->_test._test == BoolTest::ge);
6921 
6922   format %{ "" %}
6923   interface(COND_INTER) %{
6924     equal(0x0, "eq");
6925     not_equal(0x1, "ne");
6926     less(0xb, "lt");
6927     greater_equal(0xa, "ge");
6928     less_equal(0xd, "le");
6929     greater(0xc, "gt");
6930     overflow(0x6, "vs");
6931     no_overflow(0x7, "vc");
6932   %}
6933 %}
6934 
6935 // Special operand allowing long args to int ops to be truncated for free
6936 
6937 operand iRegL2I(iRegL reg) %{
6938 
6939   op_cost(0);
6940 
6941   match(ConvL2I reg);
6942 
6943   format %{ "l2i($reg)" %}
6944 
6945   interface(REG_INTER)
6946 %}
6947 
6948 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6949 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6950 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6951 
6952 //----------OPERAND CLASSES----------------------------------------------------
6953 // Operand Classes are groups of operands that are used as to simplify
6954 // instruction definitions by not requiring the AD writer to specify
6955 // separate instructions for every form of operand when the
6956 // instruction accepts multiple operand types with the same basic
6957 // encoding and format. The classic case of this is memory operands.
6958 
6959 // memory is used to define read/write location for load/store
6960 // instruction defs. we can turn a memory op into an Address
6961 
6962 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6963                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6964 
6965 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6966 // operations. it allows the src to be either an iRegI or a (ConvL2I
6967 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6968 // can be elided because the 32-bit instruction will just employ the
6969 // lower 32 bits anyway.
6970 //
6971 // n.b. this does not elide all L2I conversions. if the truncated
6972 // value is consumed by more than one operation then the ConvL2I
6973 // cannot be bundled into the consuming nodes so an l2i gets planted
6974 // (actually a movw $dst $src) and the downstream instructions consume
6975 // the result of the l2i as an iRegI input. That's a shame since the
6976 // movw is actually redundant but its not too costly.
6977 
6978 opclass iRegIorL2I(iRegI, iRegL2I);
6979 
6980 //----------PIPELINE-----------------------------------------------------------
6981 // Rules which define the behavior of the target architectures pipeline.
6982 
6983 // For specific pipelines, eg A53, define the stages of that pipeline
6984 //pipe_desc(ISS, EX1, EX2, WR);
6985 #define ISS S0
6986 #define EX1 S1
6987 #define EX2 S2
6988 #define WR  S3
6989 
6990 // Integer ALU reg operation
6991 pipeline %{
6992 
6993 attributes %{
6994   // ARM instructions are of fixed length
6995   fixed_size_instructions;        // Fixed size instructions TODO does
6996   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6997   // ARM instructions come in 32-bit word units
6998   instruction_unit_size = 4;         // An instruction is 4 bytes long
6999   instruction_fetch_unit_size = 64;  // The processor fetches one line
7000   instruction_fetch_units = 1;       // of 64 bytes
7001 
7002   // List of nop instructions
7003   nops( MachNop );
7004 %}
7005 
7006 // We don't use an actual pipeline model so don't care about resources
7007 // or description. we do use pipeline classes to introduce fixed
7008 // latencies
7009 
7010 //----------RESOURCES----------------------------------------------------------
7011 // Resources are the functional units available to the machine
7012 
7013 resources( INS0, INS1, INS01 = INS0 | INS1,
7014            ALU0, ALU1, ALU = ALU0 | ALU1,
7015            MAC,
7016            DIV,
7017            BRANCH,
7018            LDST,
7019            NEON_FP);
7020 
7021 //----------PIPELINE DESCRIPTION-----------------------------------------------
7022 // Pipeline Description specifies the stages in the machine's pipeline
7023 
7024 // Define the pipeline as a generic 6 stage pipeline
7025 pipe_desc(S0, S1, S2, S3, S4, S5);
7026 
7027 //----------PIPELINE CLASSES---------------------------------------------------
7028 // Pipeline Classes describe the stages in which input and output are
7029 // referenced by the hardware pipeline.
7030 
7031 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
7032 %{
7033   single_instruction;
7034   src1   : S1(read);
7035   src2   : S2(read);
7036   dst    : S5(write);
7037   INS01  : ISS;
7038   NEON_FP : S5;
7039 %}
7040 
7041 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
7042 %{
7043   single_instruction;
7044   src1   : S1(read);
7045   src2   : S2(read);
7046   dst    : S5(write);
7047   INS01  : ISS;
7048   NEON_FP : S5;
7049 %}
7050 
7051 pipe_class fp_uop_s(vRegF dst, vRegF src)
7052 %{
7053   single_instruction;
7054   src    : S1(read);
7055   dst    : S5(write);
7056   INS01  : ISS;
7057   NEON_FP : S5;
7058 %}
7059 
7060 pipe_class fp_uop_d(vRegD dst, vRegD src)
7061 %{
7062   single_instruction;
7063   src    : S1(read);
7064   dst    : S5(write);
7065   INS01  : ISS;
7066   NEON_FP : S5;
7067 %}
7068 
7069 pipe_class fp_d2f(vRegF dst, vRegD src)
7070 %{
7071   single_instruction;
7072   src    : S1(read);
7073   dst    : S5(write);
7074   INS01  : ISS;
7075   NEON_FP : S5;
7076 %}
7077 
7078 pipe_class fp_f2d(vRegD dst, vRegF src)
7079 %{
7080   single_instruction;
7081   src    : S1(read);
7082   dst    : S5(write);
7083   INS01  : ISS;
7084   NEON_FP : S5;
7085 %}
7086 
7087 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7088 %{
7089   single_instruction;
7090   src    : S1(read);
7091   dst    : S5(write);
7092   INS01  : ISS;
7093   NEON_FP : S5;
7094 %}
7095 
7096 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7097 %{
7098   single_instruction;
7099   src    : S1(read);
7100   dst    : S5(write);
7101   INS01  : ISS;
7102   NEON_FP : S5;
7103 %}
7104 
7105 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7106 %{
7107   single_instruction;
7108   src    : S1(read);
7109   dst    : S5(write);
7110   INS01  : ISS;
7111   NEON_FP : S5;
7112 %}
7113 
7114 pipe_class fp_l2f(vRegF dst, iRegL src)
7115 %{
7116   single_instruction;
7117   src    : S1(read);
7118   dst    : S5(write);
7119   INS01  : ISS;
7120   NEON_FP : S5;
7121 %}
7122 
7123 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7124 %{
7125   single_instruction;
7126   src    : S1(read);
7127   dst    : S5(write);
7128   INS01  : ISS;
7129   NEON_FP : S5;
7130 %}
7131 
7132 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7133 %{
7134   single_instruction;
7135   src    : S1(read);
7136   dst    : S5(write);
7137   INS01  : ISS;
7138   NEON_FP : S5;
7139 %}
7140 
7141 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7142 %{
7143   single_instruction;
7144   src    : S1(read);
7145   dst    : S5(write);
7146   INS01  : ISS;
7147   NEON_FP : S5;
7148 %}
7149 
7150 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7151 %{
7152   single_instruction;
7153   src    : S1(read);
7154   dst    : S5(write);
7155   INS01  : ISS;
7156   NEON_FP : S5;
7157 %}
7158 
7159 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7160 %{
7161   single_instruction;
7162   src1   : S1(read);
7163   src2   : S2(read);
7164   dst    : S5(write);
7165   INS0   : ISS;
7166   NEON_FP : S5;
7167 %}
7168 
7169 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7170 %{
7171   single_instruction;
7172   src1   : S1(read);
7173   src2   : S2(read);
7174   dst    : S5(write);
7175   INS0   : ISS;
7176   NEON_FP : S5;
7177 %}
7178 
7179 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7180 %{
7181   single_instruction;
7182   cr     : S1(read);
7183   src1   : S1(read);
7184   src2   : S1(read);
7185   dst    : S3(write);
7186   INS01  : ISS;
7187   NEON_FP : S3;
7188 %}
7189 
7190 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7191 %{
7192   single_instruction;
7193   cr     : S1(read);
7194   src1   : S1(read);
7195   src2   : S1(read);
7196   dst    : S3(write);
7197   INS01  : ISS;
7198   NEON_FP : S3;
7199 %}
7200 
7201 pipe_class fp_imm_s(vRegF dst)
7202 %{
7203   single_instruction;
7204   dst    : S3(write);
7205   INS01  : ISS;
7206   NEON_FP : S3;
7207 %}
7208 
7209 pipe_class fp_imm_d(vRegD dst)
7210 %{
7211   single_instruction;
7212   dst    : S3(write);
7213   INS01  : ISS;
7214   NEON_FP : S3;
7215 %}
7216 
7217 pipe_class fp_load_constant_s(vRegF dst)
7218 %{
7219   single_instruction;
7220   dst    : S4(write);
7221   INS01  : ISS;
7222   NEON_FP : S4;
7223 %}
7224 
7225 pipe_class fp_load_constant_d(vRegD dst)
7226 %{
7227   single_instruction;
7228   dst    : S4(write);
7229   INS01  : ISS;
7230   NEON_FP : S4;
7231 %}
7232 
7233 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7234 %{
7235   single_instruction;
7236   dst    : S5(write);
7237   src1   : S1(read);
7238   src2   : S1(read);
7239   INS01  : ISS;
7240   NEON_FP : S5;
7241 %}
7242 
7243 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7244 %{
7245   single_instruction;
7246   dst    : S5(write);
7247   src1   : S1(read);
7248   src2   : S1(read);
7249   INS0   : ISS;
7250   NEON_FP : S5;
7251 %}
7252 
7253 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7254 %{
7255   single_instruction;
7256   dst    : S5(write);
7257   src1   : S1(read);
7258   src2   : S1(read);
7259   dst    : S1(read);
7260   INS01  : ISS;
7261   NEON_FP : S5;
7262 %}
7263 
7264 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7265 %{
7266   single_instruction;
7267   dst    : S5(write);
7268   src1   : S1(read);
7269   src2   : S1(read);
7270   dst    : S1(read);
7271   INS0   : ISS;
7272   NEON_FP : S5;
7273 %}
7274 
7275 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7276 %{
7277   single_instruction;
7278   dst    : S4(write);
7279   src1   : S2(read);
7280   src2   : S2(read);
7281   INS01  : ISS;
7282   NEON_FP : S4;
7283 %}
7284 
7285 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7286 %{
7287   single_instruction;
7288   dst    : S4(write);
7289   src1   : S2(read);
7290   src2   : S2(read);
7291   INS0   : ISS;
7292   NEON_FP : S4;
7293 %}
7294 
7295 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7296 %{
7297   single_instruction;
7298   dst    : S3(write);
7299   src1   : S2(read);
7300   src2   : S2(read);
7301   INS01  : ISS;
7302   NEON_FP : S3;
7303 %}
7304 
7305 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7306 %{
7307   single_instruction;
7308   dst    : S3(write);
7309   src1   : S2(read);
7310   src2   : S2(read);
7311   INS0   : ISS;
7312   NEON_FP : S3;
7313 %}
7314 
7315 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7316 %{
7317   single_instruction;
7318   dst    : S3(write);
7319   src    : S1(read);
7320   shift  : S1(read);
7321   INS01  : ISS;
7322   NEON_FP : S3;
7323 %}
7324 
7325 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7326 %{
7327   single_instruction;
7328   dst    : S3(write);
7329   src    : S1(read);
7330   shift  : S1(read);
7331   INS0   : ISS;
7332   NEON_FP : S3;
7333 %}
7334 
7335 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7336 %{
7337   single_instruction;
7338   dst    : S3(write);
7339   src    : S1(read);
7340   INS01  : ISS;
7341   NEON_FP : S3;
7342 %}
7343 
7344 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7345 %{
7346   single_instruction;
7347   dst    : S3(write);
7348   src    : S1(read);
7349   INS0   : ISS;
7350   NEON_FP : S3;
7351 %}
7352 
7353 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7354 %{
7355   single_instruction;
7356   dst    : S5(write);
7357   src1   : S1(read);
7358   src2   : S1(read);
7359   INS01  : ISS;
7360   NEON_FP : S5;
7361 %}
7362 
7363 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7364 %{
7365   single_instruction;
7366   dst    : S5(write);
7367   src1   : S1(read);
7368   src2   : S1(read);
7369   INS0   : ISS;
7370   NEON_FP : S5;
7371 %}
7372 
7373 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7374 %{
7375   single_instruction;
7376   dst    : S5(write);
7377   src1   : S1(read);
7378   src2   : S1(read);
7379   INS0   : ISS;
7380   NEON_FP : S5;
7381 %}
7382 
7383 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7384 %{
7385   single_instruction;
7386   dst    : S5(write);
7387   src1   : S1(read);
7388   src2   : S1(read);
7389   INS0   : ISS;
7390   NEON_FP : S5;
7391 %}
7392 
7393 pipe_class vsqrt_fp128(vecX dst, vecX src)
7394 %{
7395   single_instruction;
7396   dst    : S5(write);
7397   src    : S1(read);
7398   INS0   : ISS;
7399   NEON_FP : S5;
7400 %}
7401 
7402 pipe_class vunop_fp64(vecD dst, vecD src)
7403 %{
7404   single_instruction;
7405   dst    : S5(write);
7406   src    : S1(read);
7407   INS01  : ISS;
7408   NEON_FP : S5;
7409 %}
7410 
7411 pipe_class vunop_fp128(vecX dst, vecX src)
7412 %{
7413   single_instruction;
7414   dst    : S5(write);
7415   src    : S1(read);
7416   INS0   : ISS;
7417   NEON_FP : S5;
7418 %}
7419 
7420 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7421 %{
7422   single_instruction;
7423   dst    : S3(write);
7424   src    : S1(read);
7425   INS01  : ISS;
7426   NEON_FP : S3;
7427 %}
7428 
7429 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7430 %{
7431   single_instruction;
7432   dst    : S3(write);
7433   src    : S1(read);
7434   INS01  : ISS;
7435   NEON_FP : S3;
7436 %}
7437 
7438 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7439 %{
7440   single_instruction;
7441   dst    : S3(write);
7442   src    : S1(read);
7443   INS01  : ISS;
7444   NEON_FP : S3;
7445 %}
7446 
7447 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7448 %{
7449   single_instruction;
7450   dst    : S3(write);
7451   src    : S1(read);
7452   INS01  : ISS;
7453   NEON_FP : S3;
7454 %}
7455 
7456 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7457 %{
7458   single_instruction;
7459   dst    : S3(write);
7460   src    : S1(read);
7461   INS01  : ISS;
7462   NEON_FP : S3;
7463 %}
7464 
7465 pipe_class vmovi_reg_imm64(vecD dst)
7466 %{
7467   single_instruction;
7468   dst    : S3(write);
7469   INS01  : ISS;
7470   NEON_FP : S3;
7471 %}
7472 
7473 pipe_class vmovi_reg_imm128(vecX dst)
7474 %{
7475   single_instruction;
7476   dst    : S3(write);
7477   INS0   : ISS;
7478   NEON_FP : S3;
7479 %}
7480 
7481 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7482 %{
7483   single_instruction;
7484   dst    : S5(write);
7485   mem    : ISS(read);
7486   INS01  : ISS;
7487   NEON_FP : S3;
7488 %}
7489 
7490 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7491 %{
7492   single_instruction;
7493   dst    : S5(write);
7494   mem    : ISS(read);
7495   INS01  : ISS;
7496   NEON_FP : S3;
7497 %}
7498 
7499 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7500 %{
7501   single_instruction;
7502   mem    : ISS(read);
7503   src    : S2(read);
7504   INS01  : ISS;
7505   NEON_FP : S3;
7506 %}
7507 
7508 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7509 %{
7510   single_instruction;
7511   mem    : ISS(read);
7512   src    : S2(read);
7513   INS01  : ISS;
7514   NEON_FP : S3;
7515 %}
7516 
7517 //------- Integer ALU operations --------------------------
7518 
7519 // Integer ALU reg-reg operation
7520 // Operands needed in EX1, result generated in EX2
7521 // Eg.  ADD     x0, x1, x2
7522 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7523 %{
7524   single_instruction;
7525   dst    : EX2(write);
7526   src1   : EX1(read);
7527   src2   : EX1(read);
7528   INS01  : ISS; // Dual issue as instruction 0 or 1
7529   ALU    : EX2;
7530 %}
7531 
7532 // Integer ALU reg-reg operation with constant shift
7533 // Shifted register must be available in LATE_ISS instead of EX1
7534 // Eg.  ADD     x0, x1, x2, LSL #2
7535 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7536 %{
7537   single_instruction;
7538   dst    : EX2(write);
7539   src1   : EX1(read);
7540   src2   : ISS(read);
7541   INS01  : ISS;
7542   ALU    : EX2;
7543 %}
7544 
7545 // Integer ALU reg operation with constant shift
7546 // Eg.  LSL     x0, x1, #shift
7547 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7548 %{
7549   single_instruction;
7550   dst    : EX2(write);
7551   src1   : ISS(read);
7552   INS01  : ISS;
7553   ALU    : EX2;
7554 %}
7555 
7556 // Integer ALU reg-reg operation with variable shift
7557 // Both operands must be available in LATE_ISS instead of EX1
7558 // Result is available in EX1 instead of EX2
7559 // Eg.  LSLV    x0, x1, x2
7560 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7561 %{
7562   single_instruction;
7563   dst    : EX1(write);
7564   src1   : ISS(read);
7565   src2   : ISS(read);
7566   INS01  : ISS;
7567   ALU    : EX1;
7568 %}
7569 
7570 // Integer ALU reg-reg operation with extract
7571 // As for _vshift above, but result generated in EX2
7572 // Eg.  EXTR    x0, x1, x2, #N
7573 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7574 %{
7575   single_instruction;
7576   dst    : EX2(write);
7577   src1   : ISS(read);
7578   src2   : ISS(read);
7579   INS1   : ISS; // Can only dual issue as Instruction 1
7580   ALU    : EX1;
7581 %}
7582 
7583 // Integer ALU reg operation
7584 // Eg.  NEG     x0, x1
7585 pipe_class ialu_reg(iRegI dst, iRegI src)
7586 %{
7587   single_instruction;
7588   dst    : EX2(write);
7589   src    : EX1(read);
7590   INS01  : ISS;
7591   ALU    : EX2;
7592 %}
7593 
7594 // Integer ALU reg mmediate operation
7595 // Eg.  ADD     x0, x1, #N
7596 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7597 %{
7598   single_instruction;
7599   dst    : EX2(write);
7600   src1   : EX1(read);
7601   INS01  : ISS;
7602   ALU    : EX2;
7603 %}
7604 
7605 // Integer ALU immediate operation (no source operands)
7606 // Eg.  MOV     x0, #N
7607 pipe_class ialu_imm(iRegI dst)
7608 %{
7609   single_instruction;
7610   dst    : EX1(write);
7611   INS01  : ISS;
7612   ALU    : EX1;
7613 %}
7614 
7615 //------- Compare operation -------------------------------
7616 
7617 // Compare reg-reg
7618 // Eg.  CMP     x0, x1
7619 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7620 %{
7621   single_instruction;
7622 //  fixed_latency(16);
7623   cr     : EX2(write);
7624   op1    : EX1(read);
7625   op2    : EX1(read);
7626   INS01  : ISS;
7627   ALU    : EX2;
7628 %}
7629 
7630 // Compare reg-reg
7631 // Eg.  CMP     x0, #N
7632 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7633 %{
7634   single_instruction;
7635 //  fixed_latency(16);
7636   cr     : EX2(write);
7637   op1    : EX1(read);
7638   INS01  : ISS;
7639   ALU    : EX2;
7640 %}
7641 
7642 //------- Conditional instructions ------------------------
7643 
7644 // Conditional no operands
7645 // Eg.  CSINC   x0, zr, zr, <cond>
7646 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7647 %{
7648   single_instruction;
7649   cr     : EX1(read);
7650   dst    : EX2(write);
7651   INS01  : ISS;
7652   ALU    : EX2;
7653 %}
7654 
7655 // Conditional 2 operand
7656 // EG.  CSEL    X0, X1, X2, <cond>
7657 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7658 %{
7659   single_instruction;
7660   cr     : EX1(read);
7661   src1   : EX1(read);
7662   src2   : EX1(read);
7663   dst    : EX2(write);
7664   INS01  : ISS;
7665   ALU    : EX2;
7666 %}
7667 
7668 // Conditional 2 operand
7669 // EG.  CSEL    X0, X1, X2, <cond>
7670 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7671 %{
7672   single_instruction;
7673   cr     : EX1(read);
7674   src    : EX1(read);
7675   dst    : EX2(write);
7676   INS01  : ISS;
7677   ALU    : EX2;
7678 %}
7679 
7680 //------- Multiply pipeline operations --------------------
7681 
7682 // Multiply reg-reg
7683 // Eg.  MUL     w0, w1, w2
7684 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7685 %{
7686   single_instruction;
7687   dst    : WR(write);
7688   src1   : ISS(read);
7689   src2   : ISS(read);
7690   INS01  : ISS;
7691   MAC    : WR;
7692 %}
7693 
7694 // Multiply accumulate
7695 // Eg.  MADD    w0, w1, w2, w3
7696 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7697 %{
7698   single_instruction;
7699   dst    : WR(write);
7700   src1   : ISS(read);
7701   src2   : ISS(read);
7702   src3   : ISS(read);
7703   INS01  : ISS;
7704   MAC    : WR;
7705 %}
7706 
7707 // Eg.  MUL     w0, w1, w2
7708 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7709 %{
7710   single_instruction;
7711   fixed_latency(3); // Maximum latency for 64 bit mul
7712   dst    : WR(write);
7713   src1   : ISS(read);
7714   src2   : ISS(read);
7715   INS01  : ISS;
7716   MAC    : WR;
7717 %}
7718 
7719 // Multiply accumulate
7720 // Eg.  MADD    w0, w1, w2, w3
7721 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7722 %{
7723   single_instruction;
7724   fixed_latency(3); // Maximum latency for 64 bit mul
7725   dst    : WR(write);
7726   src1   : ISS(read);
7727   src2   : ISS(read);
7728   src3   : ISS(read);
7729   INS01  : ISS;
7730   MAC    : WR;
7731 %}
7732 
7733 //------- Divide pipeline operations --------------------
7734 
7735 // Eg.  SDIV    w0, w1, w2
7736 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7737 %{
7738   single_instruction;
7739   fixed_latency(8); // Maximum latency for 32 bit divide
7740   dst    : WR(write);
7741   src1   : ISS(read);
7742   src2   : ISS(read);
7743   INS0   : ISS; // Can only dual issue as instruction 0
7744   DIV    : WR;
7745 %}
7746 
7747 // Eg.  SDIV    x0, x1, x2
7748 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7749 %{
7750   single_instruction;
7751   fixed_latency(16); // Maximum latency for 64 bit divide
7752   dst    : WR(write);
7753   src1   : ISS(read);
7754   src2   : ISS(read);
7755   INS0   : ISS; // Can only dual issue as instruction 0
7756   DIV    : WR;
7757 %}
7758 
7759 //------- Load pipeline operations ------------------------
7760 
7761 // Load - prefetch
7762 // Eg.  PFRM    <mem>
7763 pipe_class iload_prefetch(memory mem)
7764 %{
7765   single_instruction;
7766   mem    : ISS(read);
7767   INS01  : ISS;
7768   LDST   : WR;
7769 %}
7770 
7771 // Load - reg, mem
7772 // Eg.  LDR     x0, <mem>
7773 pipe_class iload_reg_mem(iRegI dst, memory mem)
7774 %{
7775   single_instruction;
7776   dst    : WR(write);
7777   mem    : ISS(read);
7778   INS01  : ISS;
7779   LDST   : WR;
7780 %}
7781 
7782 // Load - reg, reg
7783 // Eg.  LDR     x0, [sp, x1]
7784 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7785 %{
7786   single_instruction;
7787   dst    : WR(write);
7788   src    : ISS(read);
7789   INS01  : ISS;
7790   LDST   : WR;
7791 %}
7792 
7793 //------- Store pipeline operations -----------------------
7794 
7795 // Store - zr, mem
7796 // Eg.  STR     zr, <mem>
7797 pipe_class istore_mem(memory mem)
7798 %{
7799   single_instruction;
7800   mem    : ISS(read);
7801   INS01  : ISS;
7802   LDST   : WR;
7803 %}
7804 
7805 // Store - reg, mem
7806 // Eg.  STR     x0, <mem>
7807 pipe_class istore_reg_mem(iRegI src, memory mem)
7808 %{
7809   single_instruction;
7810   mem    : ISS(read);
7811   src    : EX2(read);
7812   INS01  : ISS;
7813   LDST   : WR;
7814 %}
7815 
7816 // Store - reg, reg
7817 // Eg. STR      x0, [sp, x1]
7818 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7819 %{
7820   single_instruction;
7821   dst    : ISS(read);
7822   src    : EX2(read);
7823   INS01  : ISS;
7824   LDST   : WR;
7825 %}
7826 
7827 //------- Store pipeline operations -----------------------
7828 
7829 // Branch
7830 pipe_class pipe_branch()
7831 %{
7832   single_instruction;
7833   INS01  : ISS;
7834   BRANCH : EX1;
7835 %}
7836 
7837 // Conditional branch
7838 pipe_class pipe_branch_cond(rFlagsReg cr)
7839 %{
7840   single_instruction;
7841   cr     : EX1(read);
7842   INS01  : ISS;
7843   BRANCH : EX1;
7844 %}
7845 
7846 // Compare & Branch
7847 // EG.  CBZ/CBNZ
7848 pipe_class pipe_cmp_branch(iRegI op1)
7849 %{
7850   single_instruction;
7851   op1    : EX1(read);
7852   INS01  : ISS;
7853   BRANCH : EX1;
7854 %}
7855 
7856 //------- Synchronisation operations ----------------------
7857 
7858 // Any operation requiring serialization.
7859 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7860 pipe_class pipe_serial()
7861 %{
7862   single_instruction;
7863   force_serialization;
7864   fixed_latency(16);
7865   INS01  : ISS(2); // Cannot dual issue with any other instruction
7866   LDST   : WR;
7867 %}
7868 
7869 // Generic big/slow expanded idiom - also serialized
7870 pipe_class pipe_slow()
7871 %{
7872   instruction_count(10);
7873   multiple_bundles;
7874   force_serialization;
7875   fixed_latency(16);
7876   INS01  : ISS(2); // Cannot dual issue with any other instruction
7877   LDST   : WR;
7878 %}
7879 
7880 // Empty pipeline class
7881 pipe_class pipe_class_empty()
7882 %{
7883   single_instruction;
7884   fixed_latency(0);
7885 %}
7886 
7887 // Default pipeline class.
7888 pipe_class pipe_class_default()
7889 %{
7890   single_instruction;
7891   fixed_latency(2);
7892 %}
7893 
7894 // Pipeline class for compares.
7895 pipe_class pipe_class_compare()
7896 %{
7897   single_instruction;
7898   fixed_latency(16);
7899 %}
7900 
7901 // Pipeline class for memory operations.
7902 pipe_class pipe_class_memory()
7903 %{
7904   single_instruction;
7905   fixed_latency(16);
7906 %}
7907 
7908 // Pipeline class for call.
7909 pipe_class pipe_class_call()
7910 %{
7911   single_instruction;
7912   fixed_latency(100);
7913 %}
7914 
7915 // Define the class for the Nop node.
7916 define %{
7917    MachNop = pipe_class_empty;
7918 %}
7919 
7920 %}
7921 //----------INSTRUCTIONS-------------------------------------------------------
7922 //
7923 // match      -- States which machine-independent subtree may be replaced
7924 //               by this instruction.
7925 // ins_cost   -- The estimated cost of this instruction is used by instruction
7926 //               selection to identify a minimum cost tree of machine
7927 //               instructions that matches a tree of machine-independent
7928 //               instructions.
7929 // format     -- A string providing the disassembly for this instruction.
7930 //               The value of an instruction's operand may be inserted
7931 //               by referring to it with a '$' prefix.
7932 // opcode     -- Three instruction opcodes may be provided.  These are referred
7933 //               to within an encode class as $primary, $secondary, and $tertiary
7934 //               rrspectively.  The primary opcode is commonly used to
7935 //               indicate the type of machine instruction, while secondary
7936 //               and tertiary are often used for prefix options or addressing
7937 //               modes.
7938 // ins_encode -- A list of encode classes with parameters. The encode class
7939 //               name must have been defined in an 'enc_class' specification
7940 //               in the encode section of the architecture description.
7941 
7942 // ============================================================================
7943 // Memory (Load/Store) Instructions
7944 
7945 // Load Instructions
7946 
7947 // Load Byte (8 bit signed)
7948 instruct loadB(iRegINoSp dst, memory mem)
7949 %{
7950   match(Set dst (LoadB mem));
7951   predicate(!needs_acquiring_load(n));
7952 
7953   ins_cost(4 * INSN_COST);
7954   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7955 
7956   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7957 
7958   ins_pipe(iload_reg_mem);
7959 %}
7960 
7961 // Load Byte (8 bit signed) into long
7962 instruct loadB2L(iRegLNoSp dst, memory mem)
7963 %{
7964   match(Set dst (ConvI2L (LoadB mem)));
7965   predicate(!needs_acquiring_load(n->in(1)));
7966 
7967   ins_cost(4 * INSN_COST);
7968   format %{ "ldrsb  $dst, $mem\t# byte" %}
7969 
7970   ins_encode(aarch64_enc_ldrsb(dst, mem));
7971 
7972   ins_pipe(iload_reg_mem);
7973 %}
7974 
7975 // Load Byte (8 bit unsigned)
7976 instruct loadUB(iRegINoSp dst, memory mem)
7977 %{
7978   match(Set dst (LoadUB mem));
7979   predicate(!needs_acquiring_load(n));
7980 
7981   ins_cost(4 * INSN_COST);
7982   format %{ "ldrbw  $dst, $mem\t# byte" %}
7983 
7984   ins_encode(aarch64_enc_ldrb(dst, mem));
7985 
7986   ins_pipe(iload_reg_mem);
7987 %}
7988 
7989 // Load Byte (8 bit unsigned) into long
7990 instruct loadUB2L(iRegLNoSp dst, memory mem)
7991 %{
7992   match(Set dst (ConvI2L (LoadUB mem)));
7993   predicate(!needs_acquiring_load(n->in(1)));
7994 
7995   ins_cost(4 * INSN_COST);
7996   format %{ "ldrb  $dst, $mem\t# byte" %}
7997 
7998   ins_encode(aarch64_enc_ldrb(dst, mem));
7999 
8000   ins_pipe(iload_reg_mem);
8001 %}
8002 
8003 // Load Short (16 bit signed)
8004 instruct loadS(iRegINoSp dst, memory mem)
8005 %{
8006   match(Set dst (LoadS mem));
8007   predicate(!needs_acquiring_load(n));
8008 
8009   ins_cost(4 * INSN_COST);
8010   format %{ "ldrshw  $dst, $mem\t# short" %}
8011 
8012   ins_encode(aarch64_enc_ldrshw(dst, mem));
8013 
8014   ins_pipe(iload_reg_mem);
8015 %}
8016 
8017 // Load Short (16 bit signed) into long
8018 instruct loadS2L(iRegLNoSp dst, memory mem)
8019 %{
8020   match(Set dst (ConvI2L (LoadS mem)));
8021   predicate(!needs_acquiring_load(n->in(1)));
8022 
8023   ins_cost(4 * INSN_COST);
8024   format %{ "ldrsh  $dst, $mem\t# short" %}
8025 
8026   ins_encode(aarch64_enc_ldrsh(dst, mem));
8027 
8028   ins_pipe(iload_reg_mem);
8029 %}
8030 
8031 // Load Char (16 bit unsigned)
8032 instruct loadUS(iRegINoSp dst, memory mem)
8033 %{
8034   match(Set dst (LoadUS mem));
8035   predicate(!needs_acquiring_load(n));
8036 
8037   ins_cost(4 * INSN_COST);
8038   format %{ "ldrh  $dst, $mem\t# short" %}
8039 
8040   ins_encode(aarch64_enc_ldrh(dst, mem));
8041 
8042   ins_pipe(iload_reg_mem);
8043 %}
8044 
8045 // Load Short/Char (16 bit unsigned) into long
8046 instruct loadUS2L(iRegLNoSp dst, memory mem)
8047 %{
8048   match(Set dst (ConvI2L (LoadUS mem)));
8049   predicate(!needs_acquiring_load(n->in(1)));
8050 
8051   ins_cost(4 * INSN_COST);
8052   format %{ "ldrh  $dst, $mem\t# short" %}
8053 
8054   ins_encode(aarch64_enc_ldrh(dst, mem));
8055 
8056   ins_pipe(iload_reg_mem);
8057 %}
8058 
8059 // Load Integer (32 bit signed)
8060 instruct loadI(iRegINoSp dst, memory mem)
8061 %{
8062   match(Set dst (LoadI mem));
8063   predicate(!needs_acquiring_load(n));
8064 
8065   ins_cost(4 * INSN_COST);
8066   format %{ "ldrw  $dst, $mem\t# int" %}
8067 
8068   ins_encode(aarch64_enc_ldrw(dst, mem));
8069 
8070   ins_pipe(iload_reg_mem);
8071 %}
8072 
8073 // Load Integer (32 bit signed) into long
8074 instruct loadI2L(iRegLNoSp dst, memory mem)
8075 %{
8076   match(Set dst (ConvI2L (LoadI mem)));
8077   predicate(!needs_acquiring_load(n->in(1)));
8078 
8079   ins_cost(4 * INSN_COST);
8080   format %{ "ldrsw  $dst, $mem\t# int" %}
8081 
8082   ins_encode(aarch64_enc_ldrsw(dst, mem));
8083 
8084   ins_pipe(iload_reg_mem);
8085 %}
8086 
8087 // Load Integer (32 bit unsigned) into long
8088 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8089 %{
8090   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8091   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8092 
8093   ins_cost(4 * INSN_COST);
8094   format %{ "ldrw  $dst, $mem\t# int" %}
8095 
8096   ins_encode(aarch64_enc_ldrw(dst, mem));
8097 
8098   ins_pipe(iload_reg_mem);
8099 %}
8100 
8101 // Load Long (64 bit signed)
8102 instruct loadL(iRegLNoSp dst, memory mem)
8103 %{
8104   match(Set dst (LoadL mem));
8105   predicate(!needs_acquiring_load(n));
8106 
8107   ins_cost(4 * INSN_COST);
8108   format %{ "ldr  $dst, $mem\t# int" %}
8109 
8110   ins_encode(aarch64_enc_ldr(dst, mem));
8111 
8112   ins_pipe(iload_reg_mem);
8113 %}
8114 
8115 // Load Range
8116 instruct loadRange(iRegINoSp dst, memory mem)
8117 %{
8118   match(Set dst (LoadRange mem));
8119 
8120   ins_cost(4 * INSN_COST);
8121   format %{ "ldrw  $dst, $mem\t# range" %}
8122 
8123   ins_encode(aarch64_enc_ldrw(dst, mem));
8124 
8125   ins_pipe(iload_reg_mem);
8126 %}
8127 
8128 // Load Pointer
8129 instruct loadP(iRegPNoSp dst, memory mem)
8130 %{
8131   match(Set dst (LoadP mem));
8132   predicate(!needs_acquiring_load(n));
8133 
8134   ins_cost(4 * INSN_COST);
8135   format %{ "ldr  $dst, $mem\t# ptr" %}
8136 
8137   ins_encode(aarch64_enc_ldr(dst, mem));
8138 
8139   ins_pipe(iload_reg_mem);
8140 %}
8141 
8142 // Load Compressed Pointer
8143 instruct loadN(iRegNNoSp dst, memory mem)
8144 %{
8145   match(Set dst (LoadN mem));
8146   predicate(!needs_acquiring_load(n));
8147 
8148   ins_cost(4 * INSN_COST);
8149   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8150 
8151   ins_encode(aarch64_enc_ldrw(dst, mem));
8152 
8153   ins_pipe(iload_reg_mem);
8154 %}
8155 
8156 // Load Klass Pointer
8157 instruct loadKlass(iRegPNoSp dst, memory mem)
8158 %{
8159   match(Set dst (LoadKlass mem));
8160   predicate(!needs_acquiring_load(n));
8161 
8162   ins_cost(4 * INSN_COST);
8163   format %{ "ldr  $dst, $mem\t# class" %}
8164 
8165   ins_encode(aarch64_enc_ldr(dst, mem));
8166 
8167   ins_pipe(iload_reg_mem);
8168 %}
8169 
8170 // Load Narrow Klass Pointer
8171 instruct loadNKlass(iRegNNoSp dst, memory mem)
8172 %{
8173   match(Set dst (LoadNKlass mem));
8174   predicate(!needs_acquiring_load(n));
8175 
8176   ins_cost(4 * INSN_COST);
8177   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8178 
8179   ins_encode(aarch64_enc_ldrw(dst, mem));
8180 
8181   ins_pipe(iload_reg_mem);
8182 %}
8183 
8184 // Load Float
8185 instruct loadF(vRegF dst, memory mem)
8186 %{
8187   match(Set dst (LoadF mem));
8188   predicate(!needs_acquiring_load(n));
8189 
8190   ins_cost(4 * INSN_COST);
8191   format %{ "ldrs  $dst, $mem\t# float" %}
8192 
8193   ins_encode( aarch64_enc_ldrs(dst, mem) );
8194 
8195   ins_pipe(pipe_class_memory);
8196 %}
8197 
8198 // Load Double
8199 instruct loadD(vRegD dst, memory mem)
8200 %{
8201   match(Set dst (LoadD mem));
8202   predicate(!needs_acquiring_load(n));
8203 
8204   ins_cost(4 * INSN_COST);
8205   format %{ "ldrd  $dst, $mem\t# double" %}
8206 
8207   ins_encode( aarch64_enc_ldrd(dst, mem) );
8208 
8209   ins_pipe(pipe_class_memory);
8210 %}
8211 
8212 
8213 // Load Int Constant
8214 instruct loadConI(iRegINoSp dst, immI src)
8215 %{
8216   match(Set dst src);
8217 
8218   ins_cost(INSN_COST);
8219   format %{ "mov $dst, $src\t# int" %}
8220 
8221   ins_encode( aarch64_enc_movw_imm(dst, src) );
8222 
8223   ins_pipe(ialu_imm);
8224 %}
8225 
8226 // Load Long Constant
8227 instruct loadConL(iRegLNoSp dst, immL src)
8228 %{
8229   match(Set dst src);
8230 
8231   ins_cost(INSN_COST);
8232   format %{ "mov $dst, $src\t# long" %}
8233 
8234   ins_encode( aarch64_enc_mov_imm(dst, src) );
8235 
8236   ins_pipe(ialu_imm);
8237 %}
8238 
8239 // Load Pointer Constant
8240 
8241 instruct loadConP(iRegPNoSp dst, immP con)
8242 %{
8243   match(Set dst con);
8244 
8245   ins_cost(INSN_COST * 4);
8246   format %{
8247     "mov  $dst, $con\t# ptr\n\t"
8248   %}
8249 
8250   ins_encode(aarch64_enc_mov_p(dst, con));
8251 
8252   ins_pipe(ialu_imm);
8253 %}
8254 
8255 // Load Null Pointer Constant
8256 
8257 instruct loadConP0(iRegPNoSp dst, immP0 con)
8258 %{
8259   match(Set dst con);
8260 
8261   ins_cost(INSN_COST);
8262   format %{ "mov  $dst, $con\t# NULL ptr" %}
8263 
8264   ins_encode(aarch64_enc_mov_p0(dst, con));
8265 
8266   ins_pipe(ialu_imm);
8267 %}
8268 
8269 // Load Pointer Constant One
8270 
8271 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8272 %{
8273   match(Set dst con);
8274 
8275   ins_cost(INSN_COST);
8276   format %{ "mov  $dst, $con\t# NULL ptr" %}
8277 
8278   ins_encode(aarch64_enc_mov_p1(dst, con));
8279 
8280   ins_pipe(ialu_imm);
8281 %}
8282 
8283 // Load Poll Page Constant
8284 
8285 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8286 %{
8287   match(Set dst con);
8288 
8289   ins_cost(INSN_COST);
8290   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8291 
8292   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8293 
8294   ins_pipe(ialu_imm);
8295 %}
8296 
8297 // Load Byte Map Base Constant
8298 
8299 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8300 %{
8301   match(Set dst con);
8302 
8303   ins_cost(INSN_COST);
8304   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8305 
8306   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8307 
8308   ins_pipe(ialu_imm);
8309 %}
8310 
8311 // Load Narrow Pointer Constant
8312 
8313 instruct loadConN(iRegNNoSp dst, immN con)
8314 %{
8315   match(Set dst con);
8316 
8317   ins_cost(INSN_COST * 4);
8318   format %{ "mov  $dst, $con\t# compressed ptr" %}
8319 
8320   ins_encode(aarch64_enc_mov_n(dst, con));
8321 
8322   ins_pipe(ialu_imm);
8323 %}
8324 
8325 // Load Narrow Null Pointer Constant
8326 
8327 instruct loadConN0(iRegNNoSp dst, immN0 con)
8328 %{
8329   match(Set dst con);
8330 
8331   ins_cost(INSN_COST);
8332   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8333 
8334   ins_encode(aarch64_enc_mov_n0(dst, con));
8335 
8336   ins_pipe(ialu_imm);
8337 %}
8338 
8339 // Load Narrow Klass Constant
8340 
8341 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8342 %{
8343   match(Set dst con);
8344 
8345   ins_cost(INSN_COST);
8346   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8347 
8348   ins_encode(aarch64_enc_mov_nk(dst, con));
8349 
8350   ins_pipe(ialu_imm);
8351 %}
8352 
8353 // Load Packed Float Constant
8354 
8355 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8356   match(Set dst con);
8357   ins_cost(INSN_COST * 4);
8358   format %{ "fmovs  $dst, $con"%}
8359   ins_encode %{
8360     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8361   %}
8362 
8363   ins_pipe(fp_imm_s);
8364 %}
8365 
8366 // Load Float Constant
8367 
8368 instruct loadConF(vRegF dst, immF con) %{
8369   match(Set dst con);
8370 
8371   ins_cost(INSN_COST * 4);
8372 
8373   format %{
8374     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8375   %}
8376 
8377   ins_encode %{
8378     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8379   %}
8380 
8381   ins_pipe(fp_load_constant_s);
8382 %}
8383 
8384 // Load Packed Double Constant
8385 
8386 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8387   match(Set dst con);
8388   ins_cost(INSN_COST);
8389   format %{ "fmovd  $dst, $con"%}
8390   ins_encode %{
8391     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8392   %}
8393 
8394   ins_pipe(fp_imm_d);
8395 %}
8396 
8397 // Load Double Constant
8398 
8399 instruct loadConD(vRegD dst, immD con) %{
8400   match(Set dst con);
8401 
8402   ins_cost(INSN_COST * 5);
8403   format %{
8404     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8405   %}
8406 
8407   ins_encode %{
8408     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8409   %}
8410 
8411   ins_pipe(fp_load_constant_d);
8412 %}
8413 
8414 // Store Instructions
8415 
8416 // Store CMS card-mark Immediate
8417 instruct storeimmCM0(immI0 zero, memory mem)
8418 %{
8419   match(Set mem (StoreCM mem zero));
8420   predicate(unnecessary_storestore(n));
8421 
8422   ins_cost(INSN_COST);
8423   format %{ "strb zr, $mem\t# byte" %}
8424 
8425   ins_encode(aarch64_enc_strb0(mem));
8426 
8427   ins_pipe(istore_mem);
8428 %}
8429 
8430 // Store CMS card-mark Immediate with intervening StoreStore
8431 // needed when using CMS with no conditional card marking
8432 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8433 %{
8434   match(Set mem (StoreCM mem zero));
8435 
8436   ins_cost(INSN_COST * 2);
8437   format %{ "dmb ishst"
8438       "\n\tstrb zr, $mem\t# byte" %}
8439 
8440   ins_encode(aarch64_enc_strb0_ordered(mem));
8441 
8442   ins_pipe(istore_mem);
8443 %}
8444 
8445 // Store Byte
8446 instruct storeB(iRegIorL2I src, memory mem)
8447 %{
8448   match(Set mem (StoreB mem src));
8449   predicate(!needs_releasing_store(n));
8450 
8451   ins_cost(INSN_COST);
8452   format %{ "strb  $src, $mem\t# byte" %}
8453 
8454   ins_encode(aarch64_enc_strb(src, mem));
8455 
8456   ins_pipe(istore_reg_mem);
8457 %}
8458 
8459 
8460 instruct storeimmB0(immI0 zero, memory mem)
8461 %{
8462   match(Set mem (StoreB mem zero));
8463   predicate(!needs_releasing_store(n));
8464 
8465   ins_cost(INSN_COST);
8466   format %{ "strb rscractch2, $mem\t# byte" %}
8467 
8468   ins_encode(aarch64_enc_strb0(mem));
8469 
8470   ins_pipe(istore_mem);
8471 %}
8472 
8473 // Store Char/Short
8474 instruct storeC(iRegIorL2I src, memory mem)
8475 %{
8476   match(Set mem (StoreC mem src));
8477   predicate(!needs_releasing_store(n));
8478 
8479   ins_cost(INSN_COST);
8480   format %{ "strh  $src, $mem\t# short" %}
8481 
8482   ins_encode(aarch64_enc_strh(src, mem));
8483 
8484   ins_pipe(istore_reg_mem);
8485 %}
8486 
8487 instruct storeimmC0(immI0 zero, memory mem)
8488 %{
8489   match(Set mem (StoreC mem zero));
8490   predicate(!needs_releasing_store(n));
8491 
8492   ins_cost(INSN_COST);
8493   format %{ "strh  zr, $mem\t# short" %}
8494 
8495   ins_encode(aarch64_enc_strh0(mem));
8496 
8497   ins_pipe(istore_mem);
8498 %}
8499 
8500 // Store Integer
8501 
8502 instruct storeI(iRegIorL2I src, memory mem)
8503 %{
8504   match(Set mem(StoreI mem src));
8505   predicate(!needs_releasing_store(n));
8506 
8507   ins_cost(INSN_COST);
8508   format %{ "strw  $src, $mem\t# int" %}
8509 
8510   ins_encode(aarch64_enc_strw(src, mem));
8511 
8512   ins_pipe(istore_reg_mem);
8513 %}
8514 
8515 instruct storeimmI0(immI0 zero, memory mem)
8516 %{
8517   match(Set mem(StoreI mem zero));
8518   predicate(!needs_releasing_store(n));
8519 
8520   ins_cost(INSN_COST);
8521   format %{ "strw  zr, $mem\t# int" %}
8522 
8523   ins_encode(aarch64_enc_strw0(mem));
8524 
8525   ins_pipe(istore_mem);
8526 %}
8527 
8528 // Store Long (64 bit signed)
8529 instruct storeL(iRegL src, memory mem)
8530 %{
8531   match(Set mem (StoreL mem src));
8532   predicate(!needs_releasing_store(n));
8533 
8534   ins_cost(INSN_COST);
8535   format %{ "str  $src, $mem\t# int" %}
8536 
8537   ins_encode(aarch64_enc_str(src, mem));
8538 
8539   ins_pipe(istore_reg_mem);
8540 %}
8541 
8542 // Store Long (64 bit signed)
8543 instruct storeimmL0(immL0 zero, memory mem)
8544 %{
8545   match(Set mem (StoreL mem zero));
8546   predicate(!needs_releasing_store(n));
8547 
8548   ins_cost(INSN_COST);
8549   format %{ "str  zr, $mem\t# int" %}
8550 
8551   ins_encode(aarch64_enc_str0(mem));
8552 
8553   ins_pipe(istore_mem);
8554 %}
8555 
8556 // Store Pointer
8557 instruct storeP(iRegP src, memory mem)
8558 %{
8559   match(Set mem (StoreP mem src));
8560   predicate(!needs_releasing_store(n));
8561 
8562   ins_cost(INSN_COST);
8563   format %{ "str  $src, $mem\t# ptr" %}
8564 
8565   ins_encode(aarch64_enc_str(src, mem));
8566 
8567   ins_pipe(istore_reg_mem);
8568 %}
8569 
8570 // Store Pointer
8571 instruct storeimmP0(immP0 zero, memory mem)
8572 %{
8573   match(Set mem (StoreP mem zero));
8574   predicate(!needs_releasing_store(n));
8575 
8576   ins_cost(INSN_COST);
8577   format %{ "str zr, $mem\t# ptr" %}
8578 
8579   ins_encode(aarch64_enc_str0(mem));
8580 
8581   ins_pipe(istore_mem);
8582 %}
8583 
8584 // Store Compressed Pointer
8585 instruct storeN(iRegN src, memory mem)
8586 %{
8587   match(Set mem (StoreN mem src));
8588   predicate(!needs_releasing_store(n));
8589 
8590   ins_cost(INSN_COST);
8591   format %{ "strw  $src, $mem\t# compressed ptr" %}
8592 
8593   ins_encode(aarch64_enc_strw(src, mem));
8594 
8595   ins_pipe(istore_reg_mem);
8596 %}
8597 
8598 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8599 %{
8600   match(Set mem (StoreN mem zero));
8601   predicate(Universe::narrow_oop_base() == NULL &&
8602             Universe::narrow_klass_base() == NULL &&
8603             (!needs_releasing_store(n)));
8604 
8605   ins_cost(INSN_COST);
8606   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8607 
8608   ins_encode(aarch64_enc_strw(heapbase, mem));
8609 
8610   ins_pipe(istore_reg_mem);
8611 %}
8612 
8613 // Store Float
8614 instruct storeF(vRegF src, memory mem)
8615 %{
8616   match(Set mem (StoreF mem src));
8617   predicate(!needs_releasing_store(n));
8618 
8619   ins_cost(INSN_COST);
8620   format %{ "strs  $src, $mem\t# float" %}
8621 
8622   ins_encode( aarch64_enc_strs(src, mem) );
8623 
8624   ins_pipe(pipe_class_memory);
8625 %}
8626 
8627 // TODO
8628 // implement storeImmF0 and storeFImmPacked
8629 
8630 // Store Double
8631 instruct storeD(vRegD src, memory mem)
8632 %{
8633   match(Set mem (StoreD mem src));
8634   predicate(!needs_releasing_store(n));
8635 
8636   ins_cost(INSN_COST);
8637   format %{ "strd  $src, $mem\t# double" %}
8638 
8639   ins_encode( aarch64_enc_strd(src, mem) );
8640 
8641   ins_pipe(pipe_class_memory);
8642 %}
8643 
8644 // Store Compressed Klass Pointer
8645 instruct storeNKlass(iRegN src, memory mem)
8646 %{
8647   predicate(!needs_releasing_store(n));
8648   match(Set mem (StoreNKlass mem src));
8649 
8650   ins_cost(INSN_COST);
8651   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8652 
8653   ins_encode(aarch64_enc_strw(src, mem));
8654 
8655   ins_pipe(istore_reg_mem);
8656 %}
8657 
8658 // TODO
8659 // implement storeImmD0 and storeDImmPacked
8660 
8661 // prefetch instructions
8662 // Must be safe to execute with invalid address (cannot fault).
8663 
8664 instruct prefetchalloc( memory mem ) %{
8665   match(PrefetchAllocation mem);
8666 
8667   ins_cost(INSN_COST);
8668   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8669 
8670   ins_encode( aarch64_enc_prefetchw(mem) );
8671 
8672   ins_pipe(iload_prefetch);
8673 %}
8674 
8675 //  ---------------- volatile loads and stores ----------------
8676 
8677 // Load Byte (8 bit signed)
8678 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8679 %{
8680   match(Set dst (LoadB mem));
8681 
8682   ins_cost(VOLATILE_REF_COST);
8683   format %{ "ldarsb  $dst, $mem\t# byte" %}
8684 
8685   ins_encode(aarch64_enc_ldarsb(dst, mem));
8686 
8687   ins_pipe(pipe_serial);
8688 %}
8689 
8690 // Load Byte (8 bit signed) into long
8691 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8692 %{
8693   match(Set dst (ConvI2L (LoadB mem)));
8694 
8695   ins_cost(VOLATILE_REF_COST);
8696   format %{ "ldarsb  $dst, $mem\t# byte" %}
8697 
8698   ins_encode(aarch64_enc_ldarsb(dst, mem));
8699 
8700   ins_pipe(pipe_serial);
8701 %}
8702 
8703 // Load Byte (8 bit unsigned)
8704 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8705 %{
8706   match(Set dst (LoadUB mem));
8707 
8708   ins_cost(VOLATILE_REF_COST);
8709   format %{ "ldarb  $dst, $mem\t# byte" %}
8710 
8711   ins_encode(aarch64_enc_ldarb(dst, mem));
8712 
8713   ins_pipe(pipe_serial);
8714 %}
8715 
8716 // Load Byte (8 bit unsigned) into long
8717 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8718 %{
8719   match(Set dst (ConvI2L (LoadUB mem)));
8720 
8721   ins_cost(VOLATILE_REF_COST);
8722   format %{ "ldarb  $dst, $mem\t# byte" %}
8723 
8724   ins_encode(aarch64_enc_ldarb(dst, mem));
8725 
8726   ins_pipe(pipe_serial);
8727 %}
8728 
8729 // Load Short (16 bit signed)
8730 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8731 %{
8732   match(Set dst (LoadS mem));
8733 
8734   ins_cost(VOLATILE_REF_COST);
8735   format %{ "ldarshw  $dst, $mem\t# short" %}
8736 
8737   ins_encode(aarch64_enc_ldarshw(dst, mem));
8738 
8739   ins_pipe(pipe_serial);
8740 %}
8741 
8742 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8743 %{
8744   match(Set dst (LoadUS mem));
8745 
8746   ins_cost(VOLATILE_REF_COST);
8747   format %{ "ldarhw  $dst, $mem\t# short" %}
8748 
8749   ins_encode(aarch64_enc_ldarhw(dst, mem));
8750 
8751   ins_pipe(pipe_serial);
8752 %}
8753 
8754 // Load Short/Char (16 bit unsigned) into long
8755 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8756 %{
8757   match(Set dst (ConvI2L (LoadUS mem)));
8758 
8759   ins_cost(VOLATILE_REF_COST);
8760   format %{ "ldarh  $dst, $mem\t# short" %}
8761 
8762   ins_encode(aarch64_enc_ldarh(dst, mem));
8763 
8764   ins_pipe(pipe_serial);
8765 %}
8766 
8767 // Load Short/Char (16 bit signed) into long
8768 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8769 %{
8770   match(Set dst (ConvI2L (LoadS mem)));
8771 
8772   ins_cost(VOLATILE_REF_COST);
8773   format %{ "ldarh  $dst, $mem\t# short" %}
8774 
8775   ins_encode(aarch64_enc_ldarsh(dst, mem));
8776 
8777   ins_pipe(pipe_serial);
8778 %}
8779 
8780 // Load Integer (32 bit signed)
8781 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8782 %{
8783   match(Set dst (LoadI mem));
8784 
8785   ins_cost(VOLATILE_REF_COST);
8786   format %{ "ldarw  $dst, $mem\t# int" %}
8787 
8788   ins_encode(aarch64_enc_ldarw(dst, mem));
8789 
8790   ins_pipe(pipe_serial);
8791 %}
8792 
8793 // Load Integer (32 bit unsigned) into long
8794 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8795 %{
8796   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8797 
8798   ins_cost(VOLATILE_REF_COST);
8799   format %{ "ldarw  $dst, $mem\t# int" %}
8800 
8801   ins_encode(aarch64_enc_ldarw(dst, mem));
8802 
8803   ins_pipe(pipe_serial);
8804 %}
8805 
8806 // Load Long (64 bit signed)
8807 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8808 %{
8809   match(Set dst (LoadL mem));
8810 
8811   ins_cost(VOLATILE_REF_COST);
8812   format %{ "ldar  $dst, $mem\t# int" %}
8813 
8814   ins_encode(aarch64_enc_ldar(dst, mem));
8815 
8816   ins_pipe(pipe_serial);
8817 %}
8818 
8819 // Load Pointer
8820 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8821 %{
8822   match(Set dst (LoadP mem));
8823 
8824   ins_cost(VOLATILE_REF_COST);
8825   format %{ "ldar  $dst, $mem\t# ptr" %}
8826 
8827   ins_encode(aarch64_enc_ldar(dst, mem));
8828 
8829   ins_pipe(pipe_serial);
8830 %}
8831 
8832 // Load Compressed Pointer
8833 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8834 %{
8835   match(Set dst (LoadN mem));
8836 
8837   ins_cost(VOLATILE_REF_COST);
8838   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8839 
8840   ins_encode(aarch64_enc_ldarw(dst, mem));
8841 
8842   ins_pipe(pipe_serial);
8843 %}
8844 
8845 // Load Float
8846 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8847 %{
8848   match(Set dst (LoadF mem));
8849 
8850   ins_cost(VOLATILE_REF_COST);
8851   format %{ "ldars  $dst, $mem\t# float" %}
8852 
8853   ins_encode( aarch64_enc_fldars(dst, mem) );
8854 
8855   ins_pipe(pipe_serial);
8856 %}
8857 
8858 // Load Double
8859 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8860 %{
8861   match(Set dst (LoadD mem));
8862 
8863   ins_cost(VOLATILE_REF_COST);
8864   format %{ "ldard  $dst, $mem\t# double" %}
8865 
8866   ins_encode( aarch64_enc_fldard(dst, mem) );
8867 
8868   ins_pipe(pipe_serial);
8869 %}
8870 
8871 // Store Byte
8872 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8873 %{
8874   match(Set mem (StoreB mem src));
8875 
8876   ins_cost(VOLATILE_REF_COST);
8877   format %{ "stlrb  $src, $mem\t# byte" %}
8878 
8879   ins_encode(aarch64_enc_stlrb(src, mem));
8880 
8881   ins_pipe(pipe_class_memory);
8882 %}
8883 
8884 // Store Char/Short
8885 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8886 %{
8887   match(Set mem (StoreC mem src));
8888 
8889   ins_cost(VOLATILE_REF_COST);
8890   format %{ "stlrh  $src, $mem\t# short" %}
8891 
8892   ins_encode(aarch64_enc_stlrh(src, mem));
8893 
8894   ins_pipe(pipe_class_memory);
8895 %}
8896 
8897 // Store Integer
8898 
8899 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8900 %{
8901   match(Set mem(StoreI mem src));
8902 
8903   ins_cost(VOLATILE_REF_COST);
8904   format %{ "stlrw  $src, $mem\t# int" %}
8905 
8906   ins_encode(aarch64_enc_stlrw(src, mem));
8907 
8908   ins_pipe(pipe_class_memory);
8909 %}
8910 
8911 // Store Long (64 bit signed)
8912 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8913 %{
8914   match(Set mem (StoreL mem src));
8915 
8916   ins_cost(VOLATILE_REF_COST);
8917   format %{ "stlr  $src, $mem\t# int" %}
8918 
8919   ins_encode(aarch64_enc_stlr(src, mem));
8920 
8921   ins_pipe(pipe_class_memory);
8922 %}
8923 
8924 // Store Pointer
8925 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8926 %{
8927   match(Set mem (StoreP mem src));
8928 
8929   ins_cost(VOLATILE_REF_COST);
8930   format %{ "stlr  $src, $mem\t# ptr" %}
8931 
8932   ins_encode(aarch64_enc_stlr(src, mem));
8933 
8934   ins_pipe(pipe_class_memory);
8935 %}
8936 
8937 // Store Compressed Pointer
8938 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8939 %{
8940   match(Set mem (StoreN mem src));
8941 
8942   ins_cost(VOLATILE_REF_COST);
8943   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8944 
8945   ins_encode(aarch64_enc_stlrw(src, mem));
8946 
8947   ins_pipe(pipe_class_memory);
8948 %}
8949 
8950 // Store Float
8951 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8952 %{
8953   match(Set mem (StoreF mem src));
8954 
8955   ins_cost(VOLATILE_REF_COST);
8956   format %{ "stlrs  $src, $mem\t# float" %}
8957 
8958   ins_encode( aarch64_enc_fstlrs(src, mem) );
8959 
8960   ins_pipe(pipe_class_memory);
8961 %}
8962 
8963 // TODO
8964 // implement storeImmF0 and storeFImmPacked
8965 
8966 // Store Double
8967 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8968 %{
8969   match(Set mem (StoreD mem src));
8970 
8971   ins_cost(VOLATILE_REF_COST);
8972   format %{ "stlrd  $src, $mem\t# double" %}
8973 
8974   ins_encode( aarch64_enc_fstlrd(src, mem) );
8975 
8976   ins_pipe(pipe_class_memory);
8977 %}
8978 
8979 //  ---------------- end of volatile loads and stores ----------------
8980 
8981 // ============================================================================
8982 // BSWAP Instructions
8983 
8984 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8985   match(Set dst (ReverseBytesI src));
8986 
8987   ins_cost(INSN_COST);
8988   format %{ "revw  $dst, $src" %}
8989 
8990   ins_encode %{
8991     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8992   %}
8993 
8994   ins_pipe(ialu_reg);
8995 %}
8996 
8997 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8998   match(Set dst (ReverseBytesL src));
8999 
9000   ins_cost(INSN_COST);
9001   format %{ "rev  $dst, $src" %}
9002 
9003   ins_encode %{
9004     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
9005   %}
9006 
9007   ins_pipe(ialu_reg);
9008 %}
9009 
9010 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
9011   match(Set dst (ReverseBytesUS src));
9012 
9013   ins_cost(INSN_COST);
9014   format %{ "rev16w  $dst, $src" %}
9015 
9016   ins_encode %{
9017     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9018   %}
9019 
9020   ins_pipe(ialu_reg);
9021 %}
9022 
9023 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
9024   match(Set dst (ReverseBytesS src));
9025 
9026   ins_cost(INSN_COST);
9027   format %{ "rev16w  $dst, $src\n\t"
9028             "sbfmw $dst, $dst, #0, #15" %}
9029 
9030   ins_encode %{
9031     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9032     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
9033   %}
9034 
9035   ins_pipe(ialu_reg);
9036 %}
9037 
9038 // ============================================================================
9039 // Zero Count Instructions
9040 
9041 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9042   match(Set dst (CountLeadingZerosI src));
9043 
9044   ins_cost(INSN_COST);
9045   format %{ "clzw  $dst, $src" %}
9046   ins_encode %{
9047     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9048   %}
9049 
9050   ins_pipe(ialu_reg);
9051 %}
9052 
9053 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9054   match(Set dst (CountLeadingZerosL src));
9055 
9056   ins_cost(INSN_COST);
9057   format %{ "clz   $dst, $src" %}
9058   ins_encode %{
9059     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9060   %}
9061 
9062   ins_pipe(ialu_reg);
9063 %}
9064 
9065 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9066   match(Set dst (CountTrailingZerosI src));
9067 
9068   ins_cost(INSN_COST * 2);
9069   format %{ "rbitw  $dst, $src\n\t"
9070             "clzw   $dst, $dst" %}
9071   ins_encode %{
9072     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9073     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9074   %}
9075 
9076   ins_pipe(ialu_reg);
9077 %}
9078 
9079 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9080   match(Set dst (CountTrailingZerosL src));
9081 
9082   ins_cost(INSN_COST * 2);
9083   format %{ "rbit   $dst, $src\n\t"
9084             "clz    $dst, $dst" %}
9085   ins_encode %{
9086     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9087     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9088   %}
9089 
9090   ins_pipe(ialu_reg);
9091 %}
9092 
9093 //---------- Population Count Instructions -------------------------------------
9094 //
9095 
9096 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9097   predicate(UsePopCountInstruction);
9098   match(Set dst (PopCountI src));
9099   effect(TEMP tmp);
9100   ins_cost(INSN_COST * 13);
9101 
9102   format %{ "movw   $src, $src\n\t"
9103             "mov    $tmp, $src\t# vector (1D)\n\t"
9104             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9105             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9106             "mov    $dst, $tmp\t# vector (1D)" %}
9107   ins_encode %{
9108     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9109     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9110     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9111     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9112     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9113   %}
9114 
9115   ins_pipe(pipe_class_default);
9116 %}
9117 
9118 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9119   predicate(UsePopCountInstruction);
9120   match(Set dst (PopCountI (LoadI mem)));
9121   effect(TEMP tmp);
9122   ins_cost(INSN_COST * 13);
9123 
9124   format %{ "ldrs   $tmp, $mem\n\t"
9125             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9126             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9127             "mov    $dst, $tmp\t# vector (1D)" %}
9128   ins_encode %{
9129     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9130     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9131                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9132     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9133     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9134     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9135   %}
9136 
9137   ins_pipe(pipe_class_default);
9138 %}
9139 
9140 // Note: Long.bitCount(long) returns an int.
9141 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9142   predicate(UsePopCountInstruction);
9143   match(Set dst (PopCountL src));
9144   effect(TEMP tmp);
9145   ins_cost(INSN_COST * 13);
9146 
9147   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9148             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9149             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9150             "mov    $dst, $tmp\t# vector (1D)" %}
9151   ins_encode %{
9152     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9153     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9154     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9155     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9156   %}
9157 
9158   ins_pipe(pipe_class_default);
9159 %}
9160 
9161 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9162   predicate(UsePopCountInstruction);
9163   match(Set dst (PopCountL (LoadL mem)));
9164   effect(TEMP tmp);
9165   ins_cost(INSN_COST * 13);
9166 
9167   format %{ "ldrd   $tmp, $mem\n\t"
9168             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9169             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9170             "mov    $dst, $tmp\t# vector (1D)" %}
9171   ins_encode %{
9172     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9173     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9174                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9175     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9176     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9177     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9178   %}
9179 
9180   ins_pipe(pipe_class_default);
9181 %}
9182 
9183 // ============================================================================
9184 // MemBar Instruction
9185 
9186 instruct load_fence() %{
9187   match(LoadFence);
9188   ins_cost(VOLATILE_REF_COST);
9189 
9190   format %{ "load_fence" %}
9191 
9192   ins_encode %{
9193     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9194   %}
9195   ins_pipe(pipe_serial);
9196 %}
9197 
9198 instruct unnecessary_membar_acquire() %{
9199   predicate(unnecessary_acquire(n));
9200   match(MemBarAcquire);
9201   ins_cost(0);
9202 
9203   format %{ "membar_acquire (elided)" %}
9204 
9205   ins_encode %{
9206     __ block_comment("membar_acquire (elided)");
9207   %}
9208 
9209   ins_pipe(pipe_class_empty);
9210 %}
9211 
9212 instruct membar_acquire() %{
9213   match(MemBarAcquire);
9214   ins_cost(VOLATILE_REF_COST);
9215 
9216   format %{ "membar_acquire" %}
9217 
9218   ins_encode %{
9219     __ block_comment("membar_acquire");
9220     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9221   %}
9222 
9223   ins_pipe(pipe_serial);
9224 %}
9225 
9226 
9227 instruct membar_acquire_lock() %{
9228   match(MemBarAcquireLock);
9229   ins_cost(VOLATILE_REF_COST);
9230 
9231   format %{ "membar_acquire_lock (elided)" %}
9232 
9233   ins_encode %{
9234     __ block_comment("membar_acquire_lock (elided)");
9235   %}
9236 
9237   ins_pipe(pipe_serial);
9238 %}
9239 
9240 instruct store_fence() %{
9241   match(StoreFence);
9242   ins_cost(VOLATILE_REF_COST);
9243 
9244   format %{ "store_fence" %}
9245 
9246   ins_encode %{
9247     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9248   %}
9249   ins_pipe(pipe_serial);
9250 %}
9251 
9252 instruct unnecessary_membar_release() %{
9253   predicate(unnecessary_release(n));
9254   match(MemBarRelease);
9255   ins_cost(0);
9256 
9257   format %{ "membar_release (elided)" %}
9258 
9259   ins_encode %{
9260     __ block_comment("membar_release (elided)");
9261   %}
9262   ins_pipe(pipe_serial);
9263 %}
9264 
9265 instruct membar_release() %{
9266   match(MemBarRelease);
9267   ins_cost(VOLATILE_REF_COST);
9268 
9269   format %{ "membar_release" %}
9270 
9271   ins_encode %{
9272     __ block_comment("membar_release");
9273     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9274   %}
9275   ins_pipe(pipe_serial);
9276 %}
9277 
9278 instruct membar_storestore() %{
9279   match(MemBarStoreStore);
9280   ins_cost(VOLATILE_REF_COST);
9281 
9282   format %{ "MEMBAR-store-store" %}
9283 
9284   ins_encode %{
9285     __ membar(Assembler::StoreStore);
9286   %}
9287   ins_pipe(pipe_serial);
9288 %}
9289 
9290 instruct membar_release_lock() %{
9291   match(MemBarReleaseLock);
9292   ins_cost(VOLATILE_REF_COST);
9293 
9294   format %{ "membar_release_lock (elided)" %}
9295 
9296   ins_encode %{
9297     __ block_comment("membar_release_lock (elided)");
9298   %}
9299 
9300   ins_pipe(pipe_serial);
9301 %}
9302 
9303 instruct unnecessary_membar_volatile() %{
9304   predicate(unnecessary_volatile(n));
9305   match(MemBarVolatile);
9306   ins_cost(0);
9307 
9308   format %{ "membar_volatile (elided)" %}
9309 
9310   ins_encode %{
9311     __ block_comment("membar_volatile (elided)");
9312   %}
9313 
9314   ins_pipe(pipe_serial);
9315 %}
9316 
9317 instruct membar_volatile() %{
9318   match(MemBarVolatile);
9319   ins_cost(VOLATILE_REF_COST*100);
9320 
9321   format %{ "membar_volatile" %}
9322 
9323   ins_encode %{
9324     __ block_comment("membar_volatile");
9325     __ membar(Assembler::StoreLoad);
9326   %}
9327 
9328   ins_pipe(pipe_serial);
9329 %}
9330 
9331 // ============================================================================
9332 // Cast/Convert Instructions
9333 
9334 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9335   match(Set dst (CastX2P src));
9336 
9337   ins_cost(INSN_COST);
9338   format %{ "mov $dst, $src\t# long -> ptr" %}
9339 
9340   ins_encode %{
9341     if ($dst$$reg != $src$$reg) {
9342       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9343     }
9344   %}
9345 
9346   ins_pipe(ialu_reg);
9347 %}
9348 
9349 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9350   match(Set dst (CastP2X src));
9351 
9352   ins_cost(INSN_COST);
9353   format %{ "mov $dst, $src\t# ptr -> long" %}
9354 
9355   ins_encode %{
9356     if ($dst$$reg != $src$$reg) {
9357       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9358     }
9359   %}
9360 
9361   ins_pipe(ialu_reg);
9362 %}
9363 
9364 // Convert oop into int for vectors alignment masking
9365 instruct convP2I(iRegINoSp dst, iRegP src) %{
9366   match(Set dst (ConvL2I (CastP2X src)));
9367 
9368   ins_cost(INSN_COST);
9369   format %{ "movw $dst, $src\t# ptr -> int" %}
9370   ins_encode %{
9371     __ movw($dst$$Register, $src$$Register);
9372   %}
9373 
9374   ins_pipe(ialu_reg);
9375 %}
9376 
9377 // Convert compressed oop into int for vectors alignment masking
9378 // in case of 32bit oops (heap < 4Gb).
9379 instruct convN2I(iRegINoSp dst, iRegN src)
9380 %{
9381   predicate(Universe::narrow_oop_shift() == 0);
9382   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9383 
9384   ins_cost(INSN_COST);
9385   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9386   ins_encode %{
9387     __ movw($dst$$Register, $src$$Register);
9388   %}
9389 
9390   ins_pipe(ialu_reg);
9391 %}
9392 
9393 
9394 // Convert oop pointer into compressed form
9395 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9396   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9397   match(Set dst (EncodeP src));
9398   effect(KILL cr);
9399   ins_cost(INSN_COST * 3);
9400   format %{ "encode_heap_oop $dst, $src" %}
9401   ins_encode %{
9402     Register s = $src$$Register;
9403     Register d = $dst$$Register;
9404     __ encode_heap_oop(d, s);
9405   %}
9406   ins_pipe(ialu_reg);
9407 %}
9408 
9409 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9410   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9411   match(Set dst (EncodeP src));
9412   ins_cost(INSN_COST * 3);
9413   format %{ "encode_heap_oop_not_null $dst, $src" %}
9414   ins_encode %{
9415     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9416   %}
9417   ins_pipe(ialu_reg);
9418 %}
9419 
9420 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9421   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9422             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9423   match(Set dst (DecodeN src));
9424   ins_cost(INSN_COST * 3);
9425   format %{ "decode_heap_oop $dst, $src" %}
9426   ins_encode %{
9427     Register s = $src$$Register;
9428     Register d = $dst$$Register;
9429     __ decode_heap_oop(d, s);
9430   %}
9431   ins_pipe(ialu_reg);
9432 %}
9433 
9434 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9435   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9436             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9437   match(Set dst (DecodeN src));
9438   ins_cost(INSN_COST * 3);
9439   format %{ "decode_heap_oop_not_null $dst, $src" %}
9440   ins_encode %{
9441     Register s = $src$$Register;
9442     Register d = $dst$$Register;
9443     __ decode_heap_oop_not_null(d, s);
9444   %}
9445   ins_pipe(ialu_reg);
9446 %}
9447 
9448 // n.b. AArch64 implementations of encode_klass_not_null and
9449 // decode_klass_not_null do not modify the flags register so, unlike
9450 // Intel, we don't kill CR as a side effect here
9451 
9452 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9453   match(Set dst (EncodePKlass src));
9454 
9455   ins_cost(INSN_COST * 3);
9456   format %{ "encode_klass_not_null $dst,$src" %}
9457 
9458   ins_encode %{
9459     Register src_reg = as_Register($src$$reg);
9460     Register dst_reg = as_Register($dst$$reg);
9461     __ encode_klass_not_null(dst_reg, src_reg);
9462   %}
9463 
9464    ins_pipe(ialu_reg);
9465 %}
9466 
9467 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9468   match(Set dst (DecodeNKlass src));
9469 
9470   ins_cost(INSN_COST * 3);
9471   format %{ "decode_klass_not_null $dst,$src" %}
9472 
9473   ins_encode %{
9474     Register src_reg = as_Register($src$$reg);
9475     Register dst_reg = as_Register($dst$$reg);
9476     if (dst_reg != src_reg) {
9477       __ decode_klass_not_null(dst_reg, src_reg);
9478     } else {
9479       __ decode_klass_not_null(dst_reg);
9480     }
9481   %}
9482 
9483    ins_pipe(ialu_reg);
9484 %}
9485 
9486 instruct checkCastPP(iRegPNoSp dst)
9487 %{
9488   match(Set dst (CheckCastPP dst));
9489 
9490   size(0);
9491   format %{ "# checkcastPP of $dst" %}
9492   ins_encode(/* empty encoding */);
9493   ins_pipe(pipe_class_empty);
9494 %}
9495 
9496 instruct castPP(iRegPNoSp dst)
9497 %{
9498   match(Set dst (CastPP dst));
9499 
9500   size(0);
9501   format %{ "# castPP of $dst" %}
9502   ins_encode(/* empty encoding */);
9503   ins_pipe(pipe_class_empty);
9504 %}
9505 
9506 instruct castII(iRegI dst)
9507 %{
9508   match(Set dst (CastII dst));
9509 
9510   size(0);
9511   format %{ "# castII of $dst" %}
9512   ins_encode(/* empty encoding */);
9513   ins_cost(0);
9514   ins_pipe(pipe_class_empty);
9515 %}
9516 
9517 // ============================================================================
9518 // Atomic operation instructions
9519 //
9520 // Intel and SPARC both implement Ideal Node LoadPLocked and
9521 // Store{PIL}Conditional instructions using a normal load for the
9522 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9523 //
9524 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9525 // pair to lock object allocations from Eden space when not using
9526 // TLABs.
9527 //
9528 // There does not appear to be a Load{IL}Locked Ideal Node and the
9529 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9530 // and to use StoreIConditional only for 32-bit and StoreLConditional
9531 // only for 64-bit.
9532 //
9533 // We implement LoadPLocked and StorePLocked instructions using,
9534 // respectively the AArch64 hw load-exclusive and store-conditional
9535 // instructions. Whereas we must implement each of
9536 // Store{IL}Conditional using a CAS which employs a pair of
9537 // instructions comprising a load-exclusive followed by a
9538 // store-conditional.
9539 
9540 
9541 // Locked-load (linked load) of the current heap-top
9542 // used when updating the eden heap top
9543 // implemented using ldaxr on AArch64
9544 
9545 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9546 %{
9547   match(Set dst (LoadPLocked mem));
9548 
9549   ins_cost(VOLATILE_REF_COST);
9550 
9551   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9552 
9553   ins_encode(aarch64_enc_ldaxr(dst, mem));
9554 
9555   ins_pipe(pipe_serial);
9556 %}
9557 
9558 // Conditional-store of the updated heap-top.
9559 // Used during allocation of the shared heap.
9560 // Sets flag (EQ) on success.
9561 // implemented using stlxr on AArch64.
9562 
9563 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9564 %{
9565   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9566 
9567   ins_cost(VOLATILE_REF_COST);
9568 
9569  // TODO
9570  // do we need to do a store-conditional release or can we just use a
9571  // plain store-conditional?
9572 
9573   format %{
9574     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9575     "cmpw rscratch1, zr\t# EQ on successful write"
9576   %}
9577 
9578   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9579 
9580   ins_pipe(pipe_serial);
9581 %}
9582 
9583 
9584 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9585 // when attempting to rebias a lock towards the current thread.  We
9586 // must use the acquire form of cmpxchg in order to guarantee acquire
9587 // semantics in this case.
9588 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9589 %{
9590   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9591 
9592   ins_cost(VOLATILE_REF_COST);
9593 
9594   format %{
9595     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9596     "cmpw rscratch1, zr\t# EQ on successful write"
9597   %}
9598 
9599   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9600 
9601   ins_pipe(pipe_slow);
9602 %}
9603 
9604 // storeIConditional also has acquire semantics, for no better reason
9605 // than matching storeLConditional.  At the time of writing this
9606 // comment storeIConditional was not used anywhere by AArch64.
9607 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9608 %{
9609   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9610 
9611   ins_cost(VOLATILE_REF_COST);
9612 
9613   format %{
9614     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9615     "cmpw rscratch1, zr\t# EQ on successful write"
9616   %}
9617 
9618   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9619 
9620   ins_pipe(pipe_slow);
9621 %}
9622 
9623 // standard CompareAndSwapX when we are using barriers
9624 // these have higher priority than the rules selected by a predicate
9625 
9626 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9627 // can't match them
9628 
9629 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9630 
9631   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9632   ins_cost(2 * VOLATILE_REF_COST);
9633 
9634   effect(KILL cr);
9635 
9636  format %{
9637     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9638     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9639  %}
9640 
9641  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9642             aarch64_enc_cset_eq(res));
9643 
9644   ins_pipe(pipe_slow);
9645 %}
9646 
9647 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9648 
9649   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9650   ins_cost(2 * VOLATILE_REF_COST);
9651 
9652   effect(KILL cr);
9653 
9654  format %{
9655     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9656     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9657  %}
9658 
9659  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9660             aarch64_enc_cset_eq(res));
9661 
9662   ins_pipe(pipe_slow);
9663 %}
9664 
9665 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9666 
9667   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9668   ins_cost(2 * VOLATILE_REF_COST);
9669 
9670   effect(KILL cr);
9671 
9672  format %{
9673     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9674     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9675  %}
9676 
9677  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9678             aarch64_enc_cset_eq(res));
9679 
9680   ins_pipe(pipe_slow);
9681 %}
9682 
9683 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9684 
9685   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9686   ins_cost(2 * VOLATILE_REF_COST);
9687 
9688   effect(KILL cr);
9689 
9690  format %{
9691     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9692     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9693  %}
9694 
9695  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9696             aarch64_enc_cset_eq(res));
9697 
9698   ins_pipe(pipe_slow);
9699 %}
9700 
9701 // alternative CompareAndSwapX when we are eliding barriers
9702 
9703 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9704 
9705   predicate(needs_acquiring_load_exclusive(n));
9706   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9707   ins_cost(VOLATILE_REF_COST);
9708 
9709   effect(KILL cr);
9710 
9711  format %{
9712     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9713     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9714  %}
9715 
9716  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9717             aarch64_enc_cset_eq(res));
9718 
9719   ins_pipe(pipe_slow);
9720 %}
9721 
9722 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9723 
9724   predicate(needs_acquiring_load_exclusive(n));
9725   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9726   ins_cost(VOLATILE_REF_COST);
9727 
9728   effect(KILL cr);
9729 
9730  format %{
9731     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9732     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9733  %}
9734 
9735  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9736             aarch64_enc_cset_eq(res));
9737 
9738   ins_pipe(pipe_slow);
9739 %}
9740 
9741 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9742 
9743   predicate(needs_acquiring_load_exclusive(n));
9744   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9745   ins_cost(VOLATILE_REF_COST);
9746 
9747   effect(KILL cr);
9748 
9749  format %{
9750     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9751     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9752  %}
9753 
9754  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9755             aarch64_enc_cset_eq(res));
9756 
9757   ins_pipe(pipe_slow);
9758 %}
9759 
9760 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9761 
9762   predicate(needs_acquiring_load_exclusive(n));
9763   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9764   ins_cost(VOLATILE_REF_COST);
9765 
9766   effect(KILL cr);
9767 
9768  format %{
9769     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9770     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9771  %}
9772 
9773  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9774             aarch64_enc_cset_eq(res));
9775 
9776   ins_pipe(pipe_slow);
9777 %}
9778 
9779 
9780 // ---------------------------------------------------------------------
9781 
9782 
9783 // BEGIN This section of the file is automatically generated. Do not edit --------------
9784 
9785 // Sundry CAS operations.  Note that release is always true,
9786 // regardless of the memory ordering of the CAS.  This is because we
9787 // need the volatile case to be sequentially consistent but there is
9788 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9789 // can't check the type of memory ordering here, so we always emit a
9790 // STLXR.
9791 
9792 // This section is generated from aarch64_ad_cas.m4
9793 
9794 
9795 
9796 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9797   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9798   ins_cost(2 * VOLATILE_REF_COST);
9799   effect(TEMP_DEF res, KILL cr);
9800   format %{
9801     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9802   %}
9803   ins_encode %{
9804     __ uxtbw(rscratch2, $oldval$$Register);
9805     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9806                Assembler::byte, /*acquire*/ false, /*release*/ true,
9807                /*weak*/ false, $res$$Register);
9808     __ sxtbw($res$$Register, $res$$Register);
9809   %}
9810   ins_pipe(pipe_slow);
9811 %}
9812 
9813 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9814   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9815   ins_cost(2 * VOLATILE_REF_COST);
9816   effect(TEMP_DEF res, KILL cr);
9817   format %{
9818     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9819   %}
9820   ins_encode %{
9821     __ uxthw(rscratch2, $oldval$$Register);
9822     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9823                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9824                /*weak*/ false, $res$$Register);
9825     __ sxthw($res$$Register, $res$$Register);
9826   %}
9827   ins_pipe(pipe_slow);
9828 %}
9829 
9830 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9831   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9832   ins_cost(2 * VOLATILE_REF_COST);
9833   effect(TEMP_DEF res, KILL cr);
9834   format %{
9835     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9836   %}
9837   ins_encode %{
9838     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9839                Assembler::word, /*acquire*/ false, /*release*/ true,
9840                /*weak*/ false, $res$$Register);
9841   %}
9842   ins_pipe(pipe_slow);
9843 %}
9844 
9845 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9846   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9847   ins_cost(2 * VOLATILE_REF_COST);
9848   effect(TEMP_DEF res, KILL cr);
9849   format %{
9850     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9851   %}
9852   ins_encode %{
9853     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9854                Assembler::xword, /*acquire*/ false, /*release*/ true,
9855                /*weak*/ false, $res$$Register);
9856   %}
9857   ins_pipe(pipe_slow);
9858 %}
9859 
9860 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9861   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9862   ins_cost(2 * VOLATILE_REF_COST);
9863   effect(TEMP_DEF res, KILL cr);
9864   format %{
9865     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9866   %}
9867   ins_encode %{
9868     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9869                Assembler::word, /*acquire*/ false, /*release*/ true,
9870                /*weak*/ false, $res$$Register);
9871   %}
9872   ins_pipe(pipe_slow);
9873 %}
9874 
9875 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9876   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9877   ins_cost(2 * VOLATILE_REF_COST);
9878   effect(TEMP_DEF res, KILL cr);
9879   format %{
9880     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9881   %}
9882   ins_encode %{
9883     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9884                Assembler::xword, /*acquire*/ false, /*release*/ true,
9885                /*weak*/ false, $res$$Register);
9886   %}
9887   ins_pipe(pipe_slow);
9888 %}
9889 
9890 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9891   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9892   ins_cost(2 * VOLATILE_REF_COST);
9893   effect(KILL cr);
9894   format %{
9895     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9896     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9897   %}
9898   ins_encode %{
9899     __ uxtbw(rscratch2, $oldval$$Register);
9900     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9901                Assembler::byte, /*acquire*/ false, /*release*/ true,
9902                /*weak*/ true, noreg);
9903     __ csetw($res$$Register, Assembler::EQ);
9904   %}
9905   ins_pipe(pipe_slow);
9906 %}
9907 
9908 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9909   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9910   ins_cost(2 * VOLATILE_REF_COST);
9911   effect(KILL cr);
9912   format %{
9913     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9914     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9915   %}
9916   ins_encode %{
9917     __ uxthw(rscratch2, $oldval$$Register);
9918     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9919                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9920                /*weak*/ true, noreg);
9921     __ csetw($res$$Register, Assembler::EQ);
9922   %}
9923   ins_pipe(pipe_slow);
9924 %}
9925 
9926 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9927   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9928   ins_cost(2 * VOLATILE_REF_COST);
9929   effect(KILL cr);
9930   format %{
9931     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9932     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9933   %}
9934   ins_encode %{
9935     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9936                Assembler::word, /*acquire*/ false, /*release*/ true,
9937                /*weak*/ true, noreg);
9938     __ csetw($res$$Register, Assembler::EQ);
9939   %}
9940   ins_pipe(pipe_slow);
9941 %}
9942 
9943 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9944   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9945   ins_cost(2 * VOLATILE_REF_COST);
9946   effect(KILL cr);
9947   format %{
9948     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9949     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9950   %}
9951   ins_encode %{
9952     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9953                Assembler::xword, /*acquire*/ false, /*release*/ true,
9954                /*weak*/ true, noreg);
9955     __ csetw($res$$Register, Assembler::EQ);
9956   %}
9957   ins_pipe(pipe_slow);
9958 %}
9959 
9960 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9961   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9962   ins_cost(2 * VOLATILE_REF_COST);
9963   effect(KILL cr);
9964   format %{
9965     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9966     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9967   %}
9968   ins_encode %{
9969     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9970                Assembler::word, /*acquire*/ false, /*release*/ true,
9971                /*weak*/ true, noreg);
9972     __ csetw($res$$Register, Assembler::EQ);
9973   %}
9974   ins_pipe(pipe_slow);
9975 %}
9976 
9977 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9978   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9979   ins_cost(2 * VOLATILE_REF_COST);
9980   effect(KILL cr);
9981   format %{
9982     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9983     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9984   %}
9985   ins_encode %{
9986     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9987                Assembler::xword, /*acquire*/ false, /*release*/ true,
9988                /*weak*/ true, noreg);
9989     __ csetw($res$$Register, Assembler::EQ);
9990   %}
9991   ins_pipe(pipe_slow);
9992 %}
9993 
9994 // END This section of the file is automatically generated. Do not edit --------------
9995 // ---------------------------------------------------------------------
9996 
9997 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9998   match(Set prev (GetAndSetI mem newv));
9999   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10000   ins_encode %{
10001     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10002   %}
10003   ins_pipe(pipe_serial);
10004 %}
10005 
10006 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10007   match(Set prev (GetAndSetL mem newv));
10008   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10009   ins_encode %{
10010     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10011   %}
10012   ins_pipe(pipe_serial);
10013 %}
10014 
10015 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10016   match(Set prev (GetAndSetN mem newv));
10017   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10018   ins_encode %{
10019     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10020   %}
10021   ins_pipe(pipe_serial);
10022 %}
10023 
10024 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10025   match(Set prev (GetAndSetP mem newv));
10026   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10027   ins_encode %{
10028     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10029   %}
10030   ins_pipe(pipe_serial);
10031 %}
10032 
10033 
10034 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10035   match(Set newval (GetAndAddL mem incr));
10036   ins_cost(INSN_COST * 10);
10037   format %{ "get_and_addL $newval, [$mem], $incr" %}
10038   ins_encode %{
10039     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10040   %}
10041   ins_pipe(pipe_serial);
10042 %}
10043 
10044 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10045   predicate(n->as_LoadStore()->result_not_used());
10046   match(Set dummy (GetAndAddL mem incr));
10047   ins_cost(INSN_COST * 9);
10048   format %{ "get_and_addL [$mem], $incr" %}
10049   ins_encode %{
10050     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10051   %}
10052   ins_pipe(pipe_serial);
10053 %}
10054 
10055 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10056   match(Set newval (GetAndAddL mem incr));
10057   ins_cost(INSN_COST * 10);
10058   format %{ "get_and_addL $newval, [$mem], $incr" %}
10059   ins_encode %{
10060     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10061   %}
10062   ins_pipe(pipe_serial);
10063 %}
10064 
10065 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10066   predicate(n->as_LoadStore()->result_not_used());
10067   match(Set dummy (GetAndAddL mem incr));
10068   ins_cost(INSN_COST * 9);
10069   format %{ "get_and_addL [$mem], $incr" %}
10070   ins_encode %{
10071     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10072   %}
10073   ins_pipe(pipe_serial);
10074 %}
10075 
10076 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10077   match(Set newval (GetAndAddI mem incr));
10078   ins_cost(INSN_COST * 10);
10079   format %{ "get_and_addI $newval, [$mem], $incr" %}
10080   ins_encode %{
10081     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10082   %}
10083   ins_pipe(pipe_serial);
10084 %}
10085 
10086 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10087   predicate(n->as_LoadStore()->result_not_used());
10088   match(Set dummy (GetAndAddI mem incr));
10089   ins_cost(INSN_COST * 9);
10090   format %{ "get_and_addI [$mem], $incr" %}
10091   ins_encode %{
10092     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10093   %}
10094   ins_pipe(pipe_serial);
10095 %}
10096 
10097 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10098   match(Set newval (GetAndAddI mem incr));
10099   ins_cost(INSN_COST * 10);
10100   format %{ "get_and_addI $newval, [$mem], $incr" %}
10101   ins_encode %{
10102     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10103   %}
10104   ins_pipe(pipe_serial);
10105 %}
10106 
10107 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10108   predicate(n->as_LoadStore()->result_not_used());
10109   match(Set dummy (GetAndAddI mem incr));
10110   ins_cost(INSN_COST * 9);
10111   format %{ "get_and_addI [$mem], $incr" %}
10112   ins_encode %{
10113     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10114   %}
10115   ins_pipe(pipe_serial);
10116 %}
10117 
10118 // Manifest a CmpL result in an integer register.
10119 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10120 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10121 %{
10122   match(Set dst (CmpL3 src1 src2));
10123   effect(KILL flags);
10124 
10125   ins_cost(INSN_COST * 6);
10126   format %{
10127       "cmp $src1, $src2"
10128       "csetw $dst, ne"
10129       "cnegw $dst, lt"
10130   %}
10131   // format %{ "CmpL3 $dst, $src1, $src2" %}
10132   ins_encode %{
10133     __ cmp($src1$$Register, $src2$$Register);
10134     __ csetw($dst$$Register, Assembler::NE);
10135     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10136   %}
10137 
10138   ins_pipe(pipe_class_default);
10139 %}
10140 
10141 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10142 %{
10143   match(Set dst (CmpL3 src1 src2));
10144   effect(KILL flags);
10145 
10146   ins_cost(INSN_COST * 6);
10147   format %{
10148       "cmp $src1, $src2"
10149       "csetw $dst, ne"
10150       "cnegw $dst, lt"
10151   %}
10152   ins_encode %{
10153     int32_t con = (int32_t)$src2$$constant;
10154      if (con < 0) {
10155       __ adds(zr, $src1$$Register, -con);
10156     } else {
10157       __ subs(zr, $src1$$Register, con);
10158     }
10159     __ csetw($dst$$Register, Assembler::NE);
10160     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10161   %}
10162 
10163   ins_pipe(pipe_class_default);
10164 %}
10165 
10166 // ============================================================================
10167 // Conditional Move Instructions
10168 
10169 // n.b. we have identical rules for both a signed compare op (cmpOp)
10170 // and an unsigned compare op (cmpOpU). it would be nice if we could
10171 // define an op class which merged both inputs and use it to type the
10172 // argument to a single rule. unfortunatelyt his fails because the
10173 // opclass does not live up to the COND_INTER interface of its
10174 // component operands. When the generic code tries to negate the
10175 // operand it ends up running the generci Machoper::negate method
10176 // which throws a ShouldNotHappen. So, we have to provide two flavours
10177 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10178 
10179 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10180   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10181 
10182   ins_cost(INSN_COST * 2);
10183   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10184 
10185   ins_encode %{
10186     __ cselw(as_Register($dst$$reg),
10187              as_Register($src2$$reg),
10188              as_Register($src1$$reg),
10189              (Assembler::Condition)$cmp$$cmpcode);
10190   %}
10191 
10192   ins_pipe(icond_reg_reg);
10193 %}
10194 
10195 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10196   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10197 
10198   ins_cost(INSN_COST * 2);
10199   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10200 
10201   ins_encode %{
10202     __ cselw(as_Register($dst$$reg),
10203              as_Register($src2$$reg),
10204              as_Register($src1$$reg),
10205              (Assembler::Condition)$cmp$$cmpcode);
10206   %}
10207 
10208   ins_pipe(icond_reg_reg);
10209 %}
10210 
10211 // special cases where one arg is zero
10212 
10213 // n.b. this is selected in preference to the rule above because it
10214 // avoids loading constant 0 into a source register
10215 
10216 // TODO
10217 // we ought only to be able to cull one of these variants as the ideal
10218 // transforms ought always to order the zero consistently (to left/right?)
10219 
10220 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10221   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10222 
10223   ins_cost(INSN_COST * 2);
10224   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10225 
10226   ins_encode %{
10227     __ cselw(as_Register($dst$$reg),
10228              as_Register($src$$reg),
10229              zr,
10230              (Assembler::Condition)$cmp$$cmpcode);
10231   %}
10232 
10233   ins_pipe(icond_reg);
10234 %}
10235 
10236 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10237   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10238 
10239   ins_cost(INSN_COST * 2);
10240   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10241 
10242   ins_encode %{
10243     __ cselw(as_Register($dst$$reg),
10244              as_Register($src$$reg),
10245              zr,
10246              (Assembler::Condition)$cmp$$cmpcode);
10247   %}
10248 
10249   ins_pipe(icond_reg);
10250 %}
10251 
10252 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10253   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10254 
10255   ins_cost(INSN_COST * 2);
10256   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10257 
10258   ins_encode %{
10259     __ cselw(as_Register($dst$$reg),
10260              zr,
10261              as_Register($src$$reg),
10262              (Assembler::Condition)$cmp$$cmpcode);
10263   %}
10264 
10265   ins_pipe(icond_reg);
10266 %}
10267 
10268 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10269   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10270 
10271   ins_cost(INSN_COST * 2);
10272   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10273 
10274   ins_encode %{
10275     __ cselw(as_Register($dst$$reg),
10276              zr,
10277              as_Register($src$$reg),
10278              (Assembler::Condition)$cmp$$cmpcode);
10279   %}
10280 
10281   ins_pipe(icond_reg);
10282 %}
10283 
10284 // special case for creating a boolean 0 or 1
10285 
10286 // n.b. this is selected in preference to the rule above because it
10287 // avoids loading constants 0 and 1 into a source register
10288 
10289 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10290   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10291 
10292   ins_cost(INSN_COST * 2);
10293   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10294 
10295   ins_encode %{
10296     // equivalently
10297     // cset(as_Register($dst$$reg),
10298     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10299     __ csincw(as_Register($dst$$reg),
10300              zr,
10301              zr,
10302              (Assembler::Condition)$cmp$$cmpcode);
10303   %}
10304 
10305   ins_pipe(icond_none);
10306 %}
10307 
10308 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10309   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10310 
10311   ins_cost(INSN_COST * 2);
10312   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10313 
10314   ins_encode %{
10315     // equivalently
10316     // cset(as_Register($dst$$reg),
10317     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10318     __ csincw(as_Register($dst$$reg),
10319              zr,
10320              zr,
10321              (Assembler::Condition)$cmp$$cmpcode);
10322   %}
10323 
10324   ins_pipe(icond_none);
10325 %}
10326 
10327 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10328   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10329 
10330   ins_cost(INSN_COST * 2);
10331   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10332 
10333   ins_encode %{
10334     __ csel(as_Register($dst$$reg),
10335             as_Register($src2$$reg),
10336             as_Register($src1$$reg),
10337             (Assembler::Condition)$cmp$$cmpcode);
10338   %}
10339 
10340   ins_pipe(icond_reg_reg);
10341 %}
10342 
10343 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10344   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10345 
10346   ins_cost(INSN_COST * 2);
10347   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10348 
10349   ins_encode %{
10350     __ csel(as_Register($dst$$reg),
10351             as_Register($src2$$reg),
10352             as_Register($src1$$reg),
10353             (Assembler::Condition)$cmp$$cmpcode);
10354   %}
10355 
10356   ins_pipe(icond_reg_reg);
10357 %}
10358 
10359 // special cases where one arg is zero
10360 
10361 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10362   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10363 
10364   ins_cost(INSN_COST * 2);
10365   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10366 
10367   ins_encode %{
10368     __ csel(as_Register($dst$$reg),
10369             zr,
10370             as_Register($src$$reg),
10371             (Assembler::Condition)$cmp$$cmpcode);
10372   %}
10373 
10374   ins_pipe(icond_reg);
10375 %}
10376 
10377 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10378   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10379 
10380   ins_cost(INSN_COST * 2);
10381   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10382 
10383   ins_encode %{
10384     __ csel(as_Register($dst$$reg),
10385             zr,
10386             as_Register($src$$reg),
10387             (Assembler::Condition)$cmp$$cmpcode);
10388   %}
10389 
10390   ins_pipe(icond_reg);
10391 %}
10392 
10393 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10394   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10395 
10396   ins_cost(INSN_COST * 2);
10397   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10398 
10399   ins_encode %{
10400     __ csel(as_Register($dst$$reg),
10401             as_Register($src$$reg),
10402             zr,
10403             (Assembler::Condition)$cmp$$cmpcode);
10404   %}
10405 
10406   ins_pipe(icond_reg);
10407 %}
10408 
10409 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10410   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10411 
10412   ins_cost(INSN_COST * 2);
10413   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10414 
10415   ins_encode %{
10416     __ csel(as_Register($dst$$reg),
10417             as_Register($src$$reg),
10418             zr,
10419             (Assembler::Condition)$cmp$$cmpcode);
10420   %}
10421 
10422   ins_pipe(icond_reg);
10423 %}
10424 
10425 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10426   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10427 
10428   ins_cost(INSN_COST * 2);
10429   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10430 
10431   ins_encode %{
10432     __ csel(as_Register($dst$$reg),
10433             as_Register($src2$$reg),
10434             as_Register($src1$$reg),
10435             (Assembler::Condition)$cmp$$cmpcode);
10436   %}
10437 
10438   ins_pipe(icond_reg_reg);
10439 %}
10440 
10441 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10442   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10443 
10444   ins_cost(INSN_COST * 2);
10445   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10446 
10447   ins_encode %{
10448     __ csel(as_Register($dst$$reg),
10449             as_Register($src2$$reg),
10450             as_Register($src1$$reg),
10451             (Assembler::Condition)$cmp$$cmpcode);
10452   %}
10453 
10454   ins_pipe(icond_reg_reg);
10455 %}
10456 
10457 // special cases where one arg is zero
10458 
10459 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10460   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10461 
10462   ins_cost(INSN_COST * 2);
10463   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10464 
10465   ins_encode %{
10466     __ csel(as_Register($dst$$reg),
10467             zr,
10468             as_Register($src$$reg),
10469             (Assembler::Condition)$cmp$$cmpcode);
10470   %}
10471 
10472   ins_pipe(icond_reg);
10473 %}
10474 
10475 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10476   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10477 
10478   ins_cost(INSN_COST * 2);
10479   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10480 
10481   ins_encode %{
10482     __ csel(as_Register($dst$$reg),
10483             zr,
10484             as_Register($src$$reg),
10485             (Assembler::Condition)$cmp$$cmpcode);
10486   %}
10487 
10488   ins_pipe(icond_reg);
10489 %}
10490 
10491 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10492   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10493 
10494   ins_cost(INSN_COST * 2);
10495   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10496 
10497   ins_encode %{
10498     __ csel(as_Register($dst$$reg),
10499             as_Register($src$$reg),
10500             zr,
10501             (Assembler::Condition)$cmp$$cmpcode);
10502   %}
10503 
10504   ins_pipe(icond_reg);
10505 %}
10506 
10507 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10508   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10509 
10510   ins_cost(INSN_COST * 2);
10511   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10512 
10513   ins_encode %{
10514     __ csel(as_Register($dst$$reg),
10515             as_Register($src$$reg),
10516             zr,
10517             (Assembler::Condition)$cmp$$cmpcode);
10518   %}
10519 
10520   ins_pipe(icond_reg);
10521 %}
10522 
10523 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10524   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10525 
10526   ins_cost(INSN_COST * 2);
10527   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10528 
10529   ins_encode %{
10530     __ cselw(as_Register($dst$$reg),
10531              as_Register($src2$$reg),
10532              as_Register($src1$$reg),
10533              (Assembler::Condition)$cmp$$cmpcode);
10534   %}
10535 
10536   ins_pipe(icond_reg_reg);
10537 %}
10538 
10539 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10540   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10541 
10542   ins_cost(INSN_COST * 2);
10543   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10544 
10545   ins_encode %{
10546     __ cselw(as_Register($dst$$reg),
10547              as_Register($src2$$reg),
10548              as_Register($src1$$reg),
10549              (Assembler::Condition)$cmp$$cmpcode);
10550   %}
10551 
10552   ins_pipe(icond_reg_reg);
10553 %}
10554 
10555 // special cases where one arg is zero
10556 
10557 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10558   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10559 
10560   ins_cost(INSN_COST * 2);
10561   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10562 
10563   ins_encode %{
10564     __ cselw(as_Register($dst$$reg),
10565              zr,
10566              as_Register($src$$reg),
10567              (Assembler::Condition)$cmp$$cmpcode);
10568   %}
10569 
10570   ins_pipe(icond_reg);
10571 %}
10572 
10573 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10574   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10575 
10576   ins_cost(INSN_COST * 2);
10577   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10578 
10579   ins_encode %{
10580     __ cselw(as_Register($dst$$reg),
10581              zr,
10582              as_Register($src$$reg),
10583              (Assembler::Condition)$cmp$$cmpcode);
10584   %}
10585 
10586   ins_pipe(icond_reg);
10587 %}
10588 
10589 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10590   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10591 
10592   ins_cost(INSN_COST * 2);
10593   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10594 
10595   ins_encode %{
10596     __ cselw(as_Register($dst$$reg),
10597              as_Register($src$$reg),
10598              zr,
10599              (Assembler::Condition)$cmp$$cmpcode);
10600   %}
10601 
10602   ins_pipe(icond_reg);
10603 %}
10604 
10605 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10606   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10607 
10608   ins_cost(INSN_COST * 2);
10609   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10610 
10611   ins_encode %{
10612     __ cselw(as_Register($dst$$reg),
10613              as_Register($src$$reg),
10614              zr,
10615              (Assembler::Condition)$cmp$$cmpcode);
10616   %}
10617 
10618   ins_pipe(icond_reg);
10619 %}
10620 
10621 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10622 %{
10623   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10624 
10625   ins_cost(INSN_COST * 3);
10626 
10627   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10628   ins_encode %{
10629     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10630     __ fcsels(as_FloatRegister($dst$$reg),
10631               as_FloatRegister($src2$$reg),
10632               as_FloatRegister($src1$$reg),
10633               cond);
10634   %}
10635 
10636   ins_pipe(fp_cond_reg_reg_s);
10637 %}
10638 
10639 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10640 %{
10641   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10642 
10643   ins_cost(INSN_COST * 3);
10644 
10645   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10646   ins_encode %{
10647     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10648     __ fcsels(as_FloatRegister($dst$$reg),
10649               as_FloatRegister($src2$$reg),
10650               as_FloatRegister($src1$$reg),
10651               cond);
10652   %}
10653 
10654   ins_pipe(fp_cond_reg_reg_s);
10655 %}
10656 
10657 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10658 %{
10659   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10660 
10661   ins_cost(INSN_COST * 3);
10662 
10663   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10664   ins_encode %{
10665     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10666     __ fcseld(as_FloatRegister($dst$$reg),
10667               as_FloatRegister($src2$$reg),
10668               as_FloatRegister($src1$$reg),
10669               cond);
10670   %}
10671 
10672   ins_pipe(fp_cond_reg_reg_d);
10673 %}
10674 
10675 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10676 %{
10677   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10678 
10679   ins_cost(INSN_COST * 3);
10680 
10681   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10682   ins_encode %{
10683     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10684     __ fcseld(as_FloatRegister($dst$$reg),
10685               as_FloatRegister($src2$$reg),
10686               as_FloatRegister($src1$$reg),
10687               cond);
10688   %}
10689 
10690   ins_pipe(fp_cond_reg_reg_d);
10691 %}
10692 
10693 // ============================================================================
10694 // Arithmetic Instructions
10695 //
10696 
10697 // Integer Addition
10698 
10699 // TODO
10700 // these currently employ operations which do not set CR and hence are
10701 // not flagged as killing CR but we would like to isolate the cases
10702 // where we want to set flags from those where we don't. need to work
10703 // out how to do that.
10704 
10705 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10706   match(Set dst (AddI src1 src2));
10707 
10708   ins_cost(INSN_COST);
10709   format %{ "addw  $dst, $src1, $src2" %}
10710 
10711   ins_encode %{
10712     __ addw(as_Register($dst$$reg),
10713             as_Register($src1$$reg),
10714             as_Register($src2$$reg));
10715   %}
10716 
10717   ins_pipe(ialu_reg_reg);
10718 %}
10719 
10720 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10721   match(Set dst (AddI src1 src2));
10722 
10723   ins_cost(INSN_COST);
10724   format %{ "addw $dst, $src1, $src2" %}
10725 
10726   // use opcode to indicate that this is an add not a sub
10727   opcode(0x0);
10728 
10729   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10730 
10731   ins_pipe(ialu_reg_imm);
10732 %}
10733 
10734 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10735   match(Set dst (AddI (ConvL2I src1) src2));
10736 
10737   ins_cost(INSN_COST);
10738   format %{ "addw $dst, $src1, $src2" %}
10739 
10740   // use opcode to indicate that this is an add not a sub
10741   opcode(0x0);
10742 
10743   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10744 
10745   ins_pipe(ialu_reg_imm);
10746 %}
10747 
10748 // Pointer Addition
10749 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10750   match(Set dst (AddP src1 src2));
10751 
10752   ins_cost(INSN_COST);
10753   format %{ "add $dst, $src1, $src2\t# ptr" %}
10754 
10755   ins_encode %{
10756     __ add(as_Register($dst$$reg),
10757            as_Register($src1$$reg),
10758            as_Register($src2$$reg));
10759   %}
10760 
10761   ins_pipe(ialu_reg_reg);
10762 %}
10763 
10764 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10765   match(Set dst (AddP src1 (ConvI2L src2)));
10766 
10767   ins_cost(1.9 * INSN_COST);
10768   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10769 
10770   ins_encode %{
10771     __ add(as_Register($dst$$reg),
10772            as_Register($src1$$reg),
10773            as_Register($src2$$reg), ext::sxtw);
10774   %}
10775 
10776   ins_pipe(ialu_reg_reg);
10777 %}
10778 
10779 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10780   match(Set dst (AddP src1 (LShiftL src2 scale)));
10781 
10782   ins_cost(1.9 * INSN_COST);
10783   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10784 
10785   ins_encode %{
10786     __ lea(as_Register($dst$$reg),
10787            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10788                    Address::lsl($scale$$constant)));
10789   %}
10790 
10791   ins_pipe(ialu_reg_reg_shift);
10792 %}
10793 
10794 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10795   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10796 
10797   ins_cost(1.9 * INSN_COST);
10798   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10799 
10800   ins_encode %{
10801     __ lea(as_Register($dst$$reg),
10802            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10803                    Address::sxtw($scale$$constant)));
10804   %}
10805 
10806   ins_pipe(ialu_reg_reg_shift);
10807 %}
10808 
10809 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10810   match(Set dst (LShiftL (ConvI2L src) scale));
10811 
10812   ins_cost(INSN_COST);
10813   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10814 
10815   ins_encode %{
10816     __ sbfiz(as_Register($dst$$reg),
10817           as_Register($src$$reg),
10818           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10819   %}
10820 
10821   ins_pipe(ialu_reg_shift);
10822 %}
10823 
10824 // Pointer Immediate Addition
10825 // n.b. this needs to be more expensive than using an indirect memory
10826 // operand
10827 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10828   match(Set dst (AddP src1 src2));
10829 
10830   ins_cost(INSN_COST);
10831   format %{ "add $dst, $src1, $src2\t# ptr" %}
10832 
10833   // use opcode to indicate that this is an add not a sub
10834   opcode(0x0);
10835 
10836   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10837 
10838   ins_pipe(ialu_reg_imm);
10839 %}
10840 
10841 // Long Addition
10842 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10843 
10844   match(Set dst (AddL src1 src2));
10845 
10846   ins_cost(INSN_COST);
10847   format %{ "add  $dst, $src1, $src2" %}
10848 
10849   ins_encode %{
10850     __ add(as_Register($dst$$reg),
10851            as_Register($src1$$reg),
10852            as_Register($src2$$reg));
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg);
10856 %}
10857 
10858 // No constant pool entries requiredLong Immediate Addition.
10859 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10860   match(Set dst (AddL src1 src2));
10861 
10862   ins_cost(INSN_COST);
10863   format %{ "add $dst, $src1, $src2" %}
10864 
10865   // use opcode to indicate that this is an add not a sub
10866   opcode(0x0);
10867 
10868   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10869 
10870   ins_pipe(ialu_reg_imm);
10871 %}
10872 
10873 // Integer Subtraction
10874 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10875   match(Set dst (SubI src1 src2));
10876 
10877   ins_cost(INSN_COST);
10878   format %{ "subw  $dst, $src1, $src2" %}
10879 
10880   ins_encode %{
10881     __ subw(as_Register($dst$$reg),
10882             as_Register($src1$$reg),
10883             as_Register($src2$$reg));
10884   %}
10885 
10886   ins_pipe(ialu_reg_reg);
10887 %}
10888 
10889 // Immediate Subtraction
10890 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10891   match(Set dst (SubI src1 src2));
10892 
10893   ins_cost(INSN_COST);
10894   format %{ "subw $dst, $src1, $src2" %}
10895 
10896   // use opcode to indicate that this is a sub not an add
10897   opcode(0x1);
10898 
10899   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10900 
10901   ins_pipe(ialu_reg_imm);
10902 %}
10903 
10904 // Long Subtraction
10905 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10906 
10907   match(Set dst (SubL src1 src2));
10908 
10909   ins_cost(INSN_COST);
10910   format %{ "sub  $dst, $src1, $src2" %}
10911 
10912   ins_encode %{
10913     __ sub(as_Register($dst$$reg),
10914            as_Register($src1$$reg),
10915            as_Register($src2$$reg));
10916   %}
10917 
10918   ins_pipe(ialu_reg_reg);
10919 %}
10920 
10921 // No constant pool entries requiredLong Immediate Subtraction.
10922 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10923   match(Set dst (SubL src1 src2));
10924 
10925   ins_cost(INSN_COST);
10926   format %{ "sub$dst, $src1, $src2" %}
10927 
10928   // use opcode to indicate that this is a sub not an add
10929   opcode(0x1);
10930 
10931   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10932 
10933   ins_pipe(ialu_reg_imm);
10934 %}
10935 
10936 // Integer Negation (special case for sub)
10937 
10938 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10939   match(Set dst (SubI zero src));
10940 
10941   ins_cost(INSN_COST);
10942   format %{ "negw $dst, $src\t# int" %}
10943 
10944   ins_encode %{
10945     __ negw(as_Register($dst$$reg),
10946             as_Register($src$$reg));
10947   %}
10948 
10949   ins_pipe(ialu_reg);
10950 %}
10951 
10952 // Long Negation
10953 
10954 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10955   match(Set dst (SubL zero src));
10956 
10957   ins_cost(INSN_COST);
10958   format %{ "neg $dst, $src\t# long" %}
10959 
10960   ins_encode %{
10961     __ neg(as_Register($dst$$reg),
10962            as_Register($src$$reg));
10963   %}
10964 
10965   ins_pipe(ialu_reg);
10966 %}
10967 
10968 // Integer Multiply
10969 
10970 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10971   match(Set dst (MulI src1 src2));
10972 
10973   ins_cost(INSN_COST * 3);
10974   format %{ "mulw  $dst, $src1, $src2" %}
10975 
10976   ins_encode %{
10977     __ mulw(as_Register($dst$$reg),
10978             as_Register($src1$$reg),
10979             as_Register($src2$$reg));
10980   %}
10981 
10982   ins_pipe(imul_reg_reg);
10983 %}
10984 
10985 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10986   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10987 
10988   ins_cost(INSN_COST * 3);
10989   format %{ "smull  $dst, $src1, $src2" %}
10990 
10991   ins_encode %{
10992     __ smull(as_Register($dst$$reg),
10993              as_Register($src1$$reg),
10994              as_Register($src2$$reg));
10995   %}
10996 
10997   ins_pipe(imul_reg_reg);
10998 %}
10999 
11000 // Long Multiply
11001 
11002 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11003   match(Set dst (MulL src1 src2));
11004 
11005   ins_cost(INSN_COST * 5);
11006   format %{ "mul  $dst, $src1, $src2" %}
11007 
11008   ins_encode %{
11009     __ mul(as_Register($dst$$reg),
11010            as_Register($src1$$reg),
11011            as_Register($src2$$reg));
11012   %}
11013 
11014   ins_pipe(lmul_reg_reg);
11015 %}
11016 
11017 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11018 %{
11019   match(Set dst (MulHiL src1 src2));
11020 
11021   ins_cost(INSN_COST * 7);
11022   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11023 
11024   ins_encode %{
11025     __ smulh(as_Register($dst$$reg),
11026              as_Register($src1$$reg),
11027              as_Register($src2$$reg));
11028   %}
11029 
11030   ins_pipe(lmul_reg_reg);
11031 %}
11032 
11033 // Combined Integer Multiply & Add/Sub
11034 
11035 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11036   match(Set dst (AddI src3 (MulI src1 src2)));
11037 
11038   ins_cost(INSN_COST * 3);
11039   format %{ "madd  $dst, $src1, $src2, $src3" %}
11040 
11041   ins_encode %{
11042     __ maddw(as_Register($dst$$reg),
11043              as_Register($src1$$reg),
11044              as_Register($src2$$reg),
11045              as_Register($src3$$reg));
11046   %}
11047 
11048   ins_pipe(imac_reg_reg);
11049 %}
11050 
11051 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11052   match(Set dst (SubI src3 (MulI src1 src2)));
11053 
11054   ins_cost(INSN_COST * 3);
11055   format %{ "msub  $dst, $src1, $src2, $src3" %}
11056 
11057   ins_encode %{
11058     __ msubw(as_Register($dst$$reg),
11059              as_Register($src1$$reg),
11060              as_Register($src2$$reg),
11061              as_Register($src3$$reg));
11062   %}
11063 
11064   ins_pipe(imac_reg_reg);
11065 %}
11066 
11067 // Combined Long Multiply & Add/Sub
11068 
11069 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11070   match(Set dst (AddL src3 (MulL src1 src2)));
11071 
11072   ins_cost(INSN_COST * 5);
11073   format %{ "madd  $dst, $src1, $src2, $src3" %}
11074 
11075   ins_encode %{
11076     __ madd(as_Register($dst$$reg),
11077             as_Register($src1$$reg),
11078             as_Register($src2$$reg),
11079             as_Register($src3$$reg));
11080   %}
11081 
11082   ins_pipe(lmac_reg_reg);
11083 %}
11084 
11085 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11086   match(Set dst (SubL src3 (MulL src1 src2)));
11087 
11088   ins_cost(INSN_COST * 5);
11089   format %{ "msub  $dst, $src1, $src2, $src3" %}
11090 
11091   ins_encode %{
11092     __ msub(as_Register($dst$$reg),
11093             as_Register($src1$$reg),
11094             as_Register($src2$$reg),
11095             as_Register($src3$$reg));
11096   %}
11097 
11098   ins_pipe(lmac_reg_reg);
11099 %}
11100 
11101 // Integer Divide
11102 
11103 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11104   match(Set dst (DivI src1 src2));
11105 
11106   ins_cost(INSN_COST * 19);
11107   format %{ "sdivw  $dst, $src1, $src2" %}
11108 
11109   ins_encode(aarch64_enc_divw(dst, src1, src2));
11110   ins_pipe(idiv_reg_reg);
11111 %}
11112 
11113 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11114   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11115   ins_cost(INSN_COST);
11116   format %{ "lsrw $dst, $src1, $div1" %}
11117   ins_encode %{
11118     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11119   %}
11120   ins_pipe(ialu_reg_shift);
11121 %}
11122 
11123 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11124   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11125   ins_cost(INSN_COST);
11126   format %{ "addw $dst, $src, LSR $div1" %}
11127 
11128   ins_encode %{
11129     __ addw(as_Register($dst$$reg),
11130               as_Register($src$$reg),
11131               as_Register($src$$reg),
11132               Assembler::LSR, 31);
11133   %}
11134   ins_pipe(ialu_reg);
11135 %}
11136 
11137 // Long Divide
11138 
11139 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11140   match(Set dst (DivL src1 src2));
11141 
11142   ins_cost(INSN_COST * 35);
11143   format %{ "sdiv   $dst, $src1, $src2" %}
11144 
11145   ins_encode(aarch64_enc_div(dst, src1, src2));
11146   ins_pipe(ldiv_reg_reg);
11147 %}
11148 
11149 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11150   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11151   ins_cost(INSN_COST);
11152   format %{ "lsr $dst, $src1, $div1" %}
11153   ins_encode %{
11154     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11155   %}
11156   ins_pipe(ialu_reg_shift);
11157 %}
11158 
11159 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11160   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11161   ins_cost(INSN_COST);
11162   format %{ "add $dst, $src, $div1" %}
11163 
11164   ins_encode %{
11165     __ add(as_Register($dst$$reg),
11166               as_Register($src$$reg),
11167               as_Register($src$$reg),
11168               Assembler::LSR, 63);
11169   %}
11170   ins_pipe(ialu_reg);
11171 %}
11172 
11173 // Integer Remainder
11174 
11175 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11176   match(Set dst (ModI src1 src2));
11177 
11178   ins_cost(INSN_COST * 22);
11179   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11180             "msubw($dst, rscratch1, $src2, $src1" %}
11181 
11182   ins_encode(aarch64_enc_modw(dst, src1, src2));
11183   ins_pipe(idiv_reg_reg);
11184 %}
11185 
11186 // Long Remainder
11187 
11188 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11189   match(Set dst (ModL src1 src2));
11190 
11191   ins_cost(INSN_COST * 38);
11192   format %{ "sdiv   rscratch1, $src1, $src2\n"
11193             "msub($dst, rscratch1, $src2, $src1" %}
11194 
11195   ins_encode(aarch64_enc_mod(dst, src1, src2));
11196   ins_pipe(ldiv_reg_reg);
11197 %}
11198 
11199 // Integer Shifts
11200 
11201 // Shift Left Register
11202 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11203   match(Set dst (LShiftI src1 src2));
11204 
11205   ins_cost(INSN_COST * 2);
11206   format %{ "lslvw  $dst, $src1, $src2" %}
11207 
11208   ins_encode %{
11209     __ lslvw(as_Register($dst$$reg),
11210              as_Register($src1$$reg),
11211              as_Register($src2$$reg));
11212   %}
11213 
11214   ins_pipe(ialu_reg_reg_vshift);
11215 %}
11216 
11217 // Shift Left Immediate
11218 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11219   match(Set dst (LShiftI src1 src2));
11220 
11221   ins_cost(INSN_COST);
11222   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11223 
11224   ins_encode %{
11225     __ lslw(as_Register($dst$$reg),
11226             as_Register($src1$$reg),
11227             $src2$$constant & 0x1f);
11228   %}
11229 
11230   ins_pipe(ialu_reg_shift);
11231 %}
11232 
11233 // Shift Right Logical Register
11234 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11235   match(Set dst (URShiftI src1 src2));
11236 
11237   ins_cost(INSN_COST * 2);
11238   format %{ "lsrvw  $dst, $src1, $src2" %}
11239 
11240   ins_encode %{
11241     __ lsrvw(as_Register($dst$$reg),
11242              as_Register($src1$$reg),
11243              as_Register($src2$$reg));
11244   %}
11245 
11246   ins_pipe(ialu_reg_reg_vshift);
11247 %}
11248 
11249 // Shift Right Logical Immediate
11250 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11251   match(Set dst (URShiftI src1 src2));
11252 
11253   ins_cost(INSN_COST);
11254   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11255 
11256   ins_encode %{
11257     __ lsrw(as_Register($dst$$reg),
11258             as_Register($src1$$reg),
11259             $src2$$constant & 0x1f);
11260   %}
11261 
11262   ins_pipe(ialu_reg_shift);
11263 %}
11264 
11265 // Shift Right Arithmetic Register
11266 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11267   match(Set dst (RShiftI src1 src2));
11268 
11269   ins_cost(INSN_COST * 2);
11270   format %{ "asrvw  $dst, $src1, $src2" %}
11271 
11272   ins_encode %{
11273     __ asrvw(as_Register($dst$$reg),
11274              as_Register($src1$$reg),
11275              as_Register($src2$$reg));
11276   %}
11277 
11278   ins_pipe(ialu_reg_reg_vshift);
11279 %}
11280 
11281 // Shift Right Arithmetic Immediate
11282 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11283   match(Set dst (RShiftI src1 src2));
11284 
11285   ins_cost(INSN_COST);
11286   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11287 
11288   ins_encode %{
11289     __ asrw(as_Register($dst$$reg),
11290             as_Register($src1$$reg),
11291             $src2$$constant & 0x1f);
11292   %}
11293 
11294   ins_pipe(ialu_reg_shift);
11295 %}
11296 
11297 // Combined Int Mask and Right Shift (using UBFM)
11298 // TODO
11299 
11300 // Long Shifts
11301 
11302 // Shift Left Register
11303 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11304   match(Set dst (LShiftL src1 src2));
11305 
11306   ins_cost(INSN_COST * 2);
11307   format %{ "lslv  $dst, $src1, $src2" %}
11308 
11309   ins_encode %{
11310     __ lslv(as_Register($dst$$reg),
11311             as_Register($src1$$reg),
11312             as_Register($src2$$reg));
11313   %}
11314 
11315   ins_pipe(ialu_reg_reg_vshift);
11316 %}
11317 
11318 // Shift Left Immediate
11319 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11320   match(Set dst (LShiftL src1 src2));
11321 
11322   ins_cost(INSN_COST);
11323   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11324 
11325   ins_encode %{
11326     __ lsl(as_Register($dst$$reg),
11327             as_Register($src1$$reg),
11328             $src2$$constant & 0x3f);
11329   %}
11330 
11331   ins_pipe(ialu_reg_shift);
11332 %}
11333 
11334 // Shift Right Logical Register
11335 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11336   match(Set dst (URShiftL src1 src2));
11337 
11338   ins_cost(INSN_COST * 2);
11339   format %{ "lsrv  $dst, $src1, $src2" %}
11340 
11341   ins_encode %{
11342     __ lsrv(as_Register($dst$$reg),
11343             as_Register($src1$$reg),
11344             as_Register($src2$$reg));
11345   %}
11346 
11347   ins_pipe(ialu_reg_reg_vshift);
11348 %}
11349 
11350 // Shift Right Logical Immediate
11351 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11352   match(Set dst (URShiftL src1 src2));
11353 
11354   ins_cost(INSN_COST);
11355   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11356 
11357   ins_encode %{
11358     __ lsr(as_Register($dst$$reg),
11359            as_Register($src1$$reg),
11360            $src2$$constant & 0x3f);
11361   %}
11362 
11363   ins_pipe(ialu_reg_shift);
11364 %}
11365 
11366 // A special-case pattern for card table stores.
11367 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11368   match(Set dst (URShiftL (CastP2X src1) src2));
11369 
11370   ins_cost(INSN_COST);
11371   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11372 
11373   ins_encode %{
11374     __ lsr(as_Register($dst$$reg),
11375            as_Register($src1$$reg),
11376            $src2$$constant & 0x3f);
11377   %}
11378 
11379   ins_pipe(ialu_reg_shift);
11380 %}
11381 
11382 // Shift Right Arithmetic Register
11383 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11384   match(Set dst (RShiftL src1 src2));
11385 
11386   ins_cost(INSN_COST * 2);
11387   format %{ "asrv  $dst, $src1, $src2" %}
11388 
11389   ins_encode %{
11390     __ asrv(as_Register($dst$$reg),
11391             as_Register($src1$$reg),
11392             as_Register($src2$$reg));
11393   %}
11394 
11395   ins_pipe(ialu_reg_reg_vshift);
11396 %}
11397 
11398 // Shift Right Arithmetic Immediate
11399 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11400   match(Set dst (RShiftL src1 src2));
11401 
11402   ins_cost(INSN_COST);
11403   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11404 
11405   ins_encode %{
11406     __ asr(as_Register($dst$$reg),
11407            as_Register($src1$$reg),
11408            $src2$$constant & 0x3f);
11409   %}
11410 
11411   ins_pipe(ialu_reg_shift);
11412 %}
11413 
11414 // BEGIN This section of the file is automatically generated. Do not edit --------------
11415 
11416 instruct regL_not_reg(iRegLNoSp dst,
11417                          iRegL src1, immL_M1 m1,
11418                          rFlagsReg cr) %{
11419   match(Set dst (XorL src1 m1));
11420   ins_cost(INSN_COST);
11421   format %{ "eon  $dst, $src1, zr" %}
11422 
11423   ins_encode %{
11424     __ eon(as_Register($dst$$reg),
11425               as_Register($src1$$reg),
11426               zr,
11427               Assembler::LSL, 0);
11428   %}
11429 
11430   ins_pipe(ialu_reg);
11431 %}
11432 instruct regI_not_reg(iRegINoSp dst,
11433                          iRegIorL2I src1, immI_M1 m1,
11434                          rFlagsReg cr) %{
11435   match(Set dst (XorI src1 m1));
11436   ins_cost(INSN_COST);
11437   format %{ "eonw  $dst, $src1, zr" %}
11438 
11439   ins_encode %{
11440     __ eonw(as_Register($dst$$reg),
11441               as_Register($src1$$reg),
11442               zr,
11443               Assembler::LSL, 0);
11444   %}
11445 
11446   ins_pipe(ialu_reg);
11447 %}
11448 
11449 instruct AndI_reg_not_reg(iRegINoSp dst,
11450                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11451                          rFlagsReg cr) %{
11452   match(Set dst (AndI src1 (XorI src2 m1)));
11453   ins_cost(INSN_COST);
11454   format %{ "bicw  $dst, $src1, $src2" %}
11455 
11456   ins_encode %{
11457     __ bicw(as_Register($dst$$reg),
11458               as_Register($src1$$reg),
11459               as_Register($src2$$reg),
11460               Assembler::LSL, 0);
11461   %}
11462 
11463   ins_pipe(ialu_reg_reg);
11464 %}
11465 
11466 instruct AndL_reg_not_reg(iRegLNoSp dst,
11467                          iRegL src1, iRegL src2, immL_M1 m1,
11468                          rFlagsReg cr) %{
11469   match(Set dst (AndL src1 (XorL src2 m1)));
11470   ins_cost(INSN_COST);
11471   format %{ "bic  $dst, $src1, $src2" %}
11472 
11473   ins_encode %{
11474     __ bic(as_Register($dst$$reg),
11475               as_Register($src1$$reg),
11476               as_Register($src2$$reg),
11477               Assembler::LSL, 0);
11478   %}
11479 
11480   ins_pipe(ialu_reg_reg);
11481 %}
11482 
11483 instruct OrI_reg_not_reg(iRegINoSp dst,
11484                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11485                          rFlagsReg cr) %{
11486   match(Set dst (OrI src1 (XorI src2 m1)));
11487   ins_cost(INSN_COST);
11488   format %{ "ornw  $dst, $src1, $src2" %}
11489 
11490   ins_encode %{
11491     __ ornw(as_Register($dst$$reg),
11492               as_Register($src1$$reg),
11493               as_Register($src2$$reg),
11494               Assembler::LSL, 0);
11495   %}
11496 
11497   ins_pipe(ialu_reg_reg);
11498 %}
11499 
11500 instruct OrL_reg_not_reg(iRegLNoSp dst,
11501                          iRegL src1, iRegL src2, immL_M1 m1,
11502                          rFlagsReg cr) %{
11503   match(Set dst (OrL src1 (XorL src2 m1)));
11504   ins_cost(INSN_COST);
11505   format %{ "orn  $dst, $src1, $src2" %}
11506 
11507   ins_encode %{
11508     __ orn(as_Register($dst$$reg),
11509               as_Register($src1$$reg),
11510               as_Register($src2$$reg),
11511               Assembler::LSL, 0);
11512   %}
11513 
11514   ins_pipe(ialu_reg_reg);
11515 %}
11516 
11517 instruct XorI_reg_not_reg(iRegINoSp dst,
11518                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11519                          rFlagsReg cr) %{
11520   match(Set dst (XorI m1 (XorI src2 src1)));
11521   ins_cost(INSN_COST);
11522   format %{ "eonw  $dst, $src1, $src2" %}
11523 
11524   ins_encode %{
11525     __ eonw(as_Register($dst$$reg),
11526               as_Register($src1$$reg),
11527               as_Register($src2$$reg),
11528               Assembler::LSL, 0);
11529   %}
11530 
11531   ins_pipe(ialu_reg_reg);
11532 %}
11533 
11534 instruct XorL_reg_not_reg(iRegLNoSp dst,
11535                          iRegL src1, iRegL src2, immL_M1 m1,
11536                          rFlagsReg cr) %{
11537   match(Set dst (XorL m1 (XorL src2 src1)));
11538   ins_cost(INSN_COST);
11539   format %{ "eon  $dst, $src1, $src2" %}
11540 
11541   ins_encode %{
11542     __ eon(as_Register($dst$$reg),
11543               as_Register($src1$$reg),
11544               as_Register($src2$$reg),
11545               Assembler::LSL, 0);
11546   %}
11547 
11548   ins_pipe(ialu_reg_reg);
11549 %}
11550 
11551 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11552                          iRegIorL2I src1, iRegIorL2I src2,
11553                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11554   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11555   ins_cost(1.9 * INSN_COST);
11556   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11557 
11558   ins_encode %{
11559     __ bicw(as_Register($dst$$reg),
11560               as_Register($src1$$reg),
11561               as_Register($src2$$reg),
11562               Assembler::LSR,
11563               $src3$$constant & 0x1f);
11564   %}
11565 
11566   ins_pipe(ialu_reg_reg_shift);
11567 %}
11568 
11569 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11570                          iRegL src1, iRegL src2,
11571                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11572   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11573   ins_cost(1.9 * INSN_COST);
11574   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11575 
11576   ins_encode %{
11577     __ bic(as_Register($dst$$reg),
11578               as_Register($src1$$reg),
11579               as_Register($src2$$reg),
11580               Assembler::LSR,
11581               $src3$$constant & 0x3f);
11582   %}
11583 
11584   ins_pipe(ialu_reg_reg_shift);
11585 %}
11586 
11587 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11588                          iRegIorL2I src1, iRegIorL2I src2,
11589                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11590   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11591   ins_cost(1.9 * INSN_COST);
11592   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11593 
11594   ins_encode %{
11595     __ bicw(as_Register($dst$$reg),
11596               as_Register($src1$$reg),
11597               as_Register($src2$$reg),
11598               Assembler::ASR,
11599               $src3$$constant & 0x1f);
11600   %}
11601 
11602   ins_pipe(ialu_reg_reg_shift);
11603 %}
11604 
11605 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11606                          iRegL src1, iRegL src2,
11607                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11608   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11609   ins_cost(1.9 * INSN_COST);
11610   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11611 
11612   ins_encode %{
11613     __ bic(as_Register($dst$$reg),
11614               as_Register($src1$$reg),
11615               as_Register($src2$$reg),
11616               Assembler::ASR,
11617               $src3$$constant & 0x3f);
11618   %}
11619 
11620   ins_pipe(ialu_reg_reg_shift);
11621 %}
11622 
11623 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11624                          iRegIorL2I src1, iRegIorL2I src2,
11625                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11626   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11627   ins_cost(1.9 * INSN_COST);
11628   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11629 
11630   ins_encode %{
11631     __ bicw(as_Register($dst$$reg),
11632               as_Register($src1$$reg),
11633               as_Register($src2$$reg),
11634               Assembler::LSL,
11635               $src3$$constant & 0x1f);
11636   %}
11637 
11638   ins_pipe(ialu_reg_reg_shift);
11639 %}
11640 
11641 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11642                          iRegL src1, iRegL src2,
11643                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11644   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11645   ins_cost(1.9 * INSN_COST);
11646   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11647 
11648   ins_encode %{
11649     __ bic(as_Register($dst$$reg),
11650               as_Register($src1$$reg),
11651               as_Register($src2$$reg),
11652               Assembler::LSL,
11653               $src3$$constant & 0x3f);
11654   %}
11655 
11656   ins_pipe(ialu_reg_reg_shift);
11657 %}
11658 
11659 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11660                          iRegIorL2I src1, iRegIorL2I src2,
11661                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11662   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11663   ins_cost(1.9 * INSN_COST);
11664   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11665 
11666   ins_encode %{
11667     __ eonw(as_Register($dst$$reg),
11668               as_Register($src1$$reg),
11669               as_Register($src2$$reg),
11670               Assembler::LSR,
11671               $src3$$constant & 0x1f);
11672   %}
11673 
11674   ins_pipe(ialu_reg_reg_shift);
11675 %}
11676 
11677 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11678                          iRegL src1, iRegL src2,
11679                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11680   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11681   ins_cost(1.9 * INSN_COST);
11682   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11683 
11684   ins_encode %{
11685     __ eon(as_Register($dst$$reg),
11686               as_Register($src1$$reg),
11687               as_Register($src2$$reg),
11688               Assembler::LSR,
11689               $src3$$constant & 0x3f);
11690   %}
11691 
11692   ins_pipe(ialu_reg_reg_shift);
11693 %}
11694 
11695 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11696                          iRegIorL2I src1, iRegIorL2I src2,
11697                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11698   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11699   ins_cost(1.9 * INSN_COST);
11700   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11701 
11702   ins_encode %{
11703     __ eonw(as_Register($dst$$reg),
11704               as_Register($src1$$reg),
11705               as_Register($src2$$reg),
11706               Assembler::ASR,
11707               $src3$$constant & 0x1f);
11708   %}
11709 
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11714                          iRegL src1, iRegL src2,
11715                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11716   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11717   ins_cost(1.9 * INSN_COST);
11718   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11719 
11720   ins_encode %{
11721     __ eon(as_Register($dst$$reg),
11722               as_Register($src1$$reg),
11723               as_Register($src2$$reg),
11724               Assembler::ASR,
11725               $src3$$constant & 0x3f);
11726   %}
11727 
11728   ins_pipe(ialu_reg_reg_shift);
11729 %}
11730 
11731 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11732                          iRegIorL2I src1, iRegIorL2I src2,
11733                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11734   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11735   ins_cost(1.9 * INSN_COST);
11736   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11737 
11738   ins_encode %{
11739     __ eonw(as_Register($dst$$reg),
11740               as_Register($src1$$reg),
11741               as_Register($src2$$reg),
11742               Assembler::LSL,
11743               $src3$$constant & 0x1f);
11744   %}
11745 
11746   ins_pipe(ialu_reg_reg_shift);
11747 %}
11748 
11749 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11750                          iRegL src1, iRegL src2,
11751                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11752   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11753   ins_cost(1.9 * INSN_COST);
11754   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11755 
11756   ins_encode %{
11757     __ eon(as_Register($dst$$reg),
11758               as_Register($src1$$reg),
11759               as_Register($src2$$reg),
11760               Assembler::LSL,
11761               $src3$$constant & 0x3f);
11762   %}
11763 
11764   ins_pipe(ialu_reg_reg_shift);
11765 %}
11766 
11767 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11768                          iRegIorL2I src1, iRegIorL2I src2,
11769                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11770   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11771   ins_cost(1.9 * INSN_COST);
11772   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11773 
11774   ins_encode %{
11775     __ ornw(as_Register($dst$$reg),
11776               as_Register($src1$$reg),
11777               as_Register($src2$$reg),
11778               Assembler::LSR,
11779               $src3$$constant & 0x1f);
11780   %}
11781 
11782   ins_pipe(ialu_reg_reg_shift);
11783 %}
11784 
11785 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11786                          iRegL src1, iRegL src2,
11787                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11788   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11789   ins_cost(1.9 * INSN_COST);
11790   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11791 
11792   ins_encode %{
11793     __ orn(as_Register($dst$$reg),
11794               as_Register($src1$$reg),
11795               as_Register($src2$$reg),
11796               Assembler::LSR,
11797               $src3$$constant & 0x3f);
11798   %}
11799 
11800   ins_pipe(ialu_reg_reg_shift);
11801 %}
11802 
11803 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11804                          iRegIorL2I src1, iRegIorL2I src2,
11805                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11806   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11807   ins_cost(1.9 * INSN_COST);
11808   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11809 
11810   ins_encode %{
11811     __ ornw(as_Register($dst$$reg),
11812               as_Register($src1$$reg),
11813               as_Register($src2$$reg),
11814               Assembler::ASR,
11815               $src3$$constant & 0x1f);
11816   %}
11817 
11818   ins_pipe(ialu_reg_reg_shift);
11819 %}
11820 
11821 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11822                          iRegL src1, iRegL src2,
11823                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11824   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11825   ins_cost(1.9 * INSN_COST);
11826   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11827 
11828   ins_encode %{
11829     __ orn(as_Register($dst$$reg),
11830               as_Register($src1$$reg),
11831               as_Register($src2$$reg),
11832               Assembler::ASR,
11833               $src3$$constant & 0x3f);
11834   %}
11835 
11836   ins_pipe(ialu_reg_reg_shift);
11837 %}
11838 
11839 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11840                          iRegIorL2I src1, iRegIorL2I src2,
11841                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11842   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11843   ins_cost(1.9 * INSN_COST);
11844   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11845 
11846   ins_encode %{
11847     __ ornw(as_Register($dst$$reg),
11848               as_Register($src1$$reg),
11849               as_Register($src2$$reg),
11850               Assembler::LSL,
11851               $src3$$constant & 0x1f);
11852   %}
11853 
11854   ins_pipe(ialu_reg_reg_shift);
11855 %}
11856 
11857 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11858                          iRegL src1, iRegL src2,
11859                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11860   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11861   ins_cost(1.9 * INSN_COST);
11862   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11863 
11864   ins_encode %{
11865     __ orn(as_Register($dst$$reg),
11866               as_Register($src1$$reg),
11867               as_Register($src2$$reg),
11868               Assembler::LSL,
11869               $src3$$constant & 0x3f);
11870   %}
11871 
11872   ins_pipe(ialu_reg_reg_shift);
11873 %}
11874 
11875 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11876                          iRegIorL2I src1, iRegIorL2I src2,
11877                          immI src3, rFlagsReg cr) %{
11878   match(Set dst (AndI src1 (URShiftI src2 src3)));
11879 
11880   ins_cost(1.9 * INSN_COST);
11881   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11882 
11883   ins_encode %{
11884     __ andw(as_Register($dst$$reg),
11885               as_Register($src1$$reg),
11886               as_Register($src2$$reg),
11887               Assembler::LSR,
11888               $src3$$constant & 0x1f);
11889   %}
11890 
11891   ins_pipe(ialu_reg_reg_shift);
11892 %}
11893 
11894 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11895                          iRegL src1, iRegL src2,
11896                          immI src3, rFlagsReg cr) %{
11897   match(Set dst (AndL src1 (URShiftL src2 src3)));
11898 
11899   ins_cost(1.9 * INSN_COST);
11900   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11901 
11902   ins_encode %{
11903     __ andr(as_Register($dst$$reg),
11904               as_Register($src1$$reg),
11905               as_Register($src2$$reg),
11906               Assembler::LSR,
11907               $src3$$constant & 0x3f);
11908   %}
11909 
11910   ins_pipe(ialu_reg_reg_shift);
11911 %}
11912 
11913 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11914                          iRegIorL2I src1, iRegIorL2I src2,
11915                          immI src3, rFlagsReg cr) %{
11916   match(Set dst (AndI src1 (RShiftI src2 src3)));
11917 
11918   ins_cost(1.9 * INSN_COST);
11919   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11920 
11921   ins_encode %{
11922     __ andw(as_Register($dst$$reg),
11923               as_Register($src1$$reg),
11924               as_Register($src2$$reg),
11925               Assembler::ASR,
11926               $src3$$constant & 0x1f);
11927   %}
11928 
11929   ins_pipe(ialu_reg_reg_shift);
11930 %}
11931 
11932 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11933                          iRegL src1, iRegL src2,
11934                          immI src3, rFlagsReg cr) %{
11935   match(Set dst (AndL src1 (RShiftL src2 src3)));
11936 
11937   ins_cost(1.9 * INSN_COST);
11938   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11939 
11940   ins_encode %{
11941     __ andr(as_Register($dst$$reg),
11942               as_Register($src1$$reg),
11943               as_Register($src2$$reg),
11944               Assembler::ASR,
11945               $src3$$constant & 0x3f);
11946   %}
11947 
11948   ins_pipe(ialu_reg_reg_shift);
11949 %}
11950 
11951 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11952                          iRegIorL2I src1, iRegIorL2I src2,
11953                          immI src3, rFlagsReg cr) %{
11954   match(Set dst (AndI src1 (LShiftI src2 src3)));
11955 
11956   ins_cost(1.9 * INSN_COST);
11957   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11958 
11959   ins_encode %{
11960     __ andw(as_Register($dst$$reg),
11961               as_Register($src1$$reg),
11962               as_Register($src2$$reg),
11963               Assembler::LSL,
11964               $src3$$constant & 0x1f);
11965   %}
11966 
11967   ins_pipe(ialu_reg_reg_shift);
11968 %}
11969 
11970 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11971                          iRegL src1, iRegL src2,
11972                          immI src3, rFlagsReg cr) %{
11973   match(Set dst (AndL src1 (LShiftL src2 src3)));
11974 
11975   ins_cost(1.9 * INSN_COST);
11976   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11977 
11978   ins_encode %{
11979     __ andr(as_Register($dst$$reg),
11980               as_Register($src1$$reg),
11981               as_Register($src2$$reg),
11982               Assembler::LSL,
11983               $src3$$constant & 0x3f);
11984   %}
11985 
11986   ins_pipe(ialu_reg_reg_shift);
11987 %}
11988 
11989 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11990                          iRegIorL2I src1, iRegIorL2I src2,
11991                          immI src3, rFlagsReg cr) %{
11992   match(Set dst (XorI src1 (URShiftI src2 src3)));
11993 
11994   ins_cost(1.9 * INSN_COST);
11995   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11996 
11997   ins_encode %{
11998     __ eorw(as_Register($dst$$reg),
11999               as_Register($src1$$reg),
12000               as_Register($src2$$reg),
12001               Assembler::LSR,
12002               $src3$$constant & 0x1f);
12003   %}
12004 
12005   ins_pipe(ialu_reg_reg_shift);
12006 %}
12007 
12008 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12009                          iRegL src1, iRegL src2,
12010                          immI src3, rFlagsReg cr) %{
12011   match(Set dst (XorL src1 (URShiftL src2 src3)));
12012 
12013   ins_cost(1.9 * INSN_COST);
12014   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12015 
12016   ins_encode %{
12017     __ eor(as_Register($dst$$reg),
12018               as_Register($src1$$reg),
12019               as_Register($src2$$reg),
12020               Assembler::LSR,
12021               $src3$$constant & 0x3f);
12022   %}
12023 
12024   ins_pipe(ialu_reg_reg_shift);
12025 %}
12026 
12027 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12028                          iRegIorL2I src1, iRegIorL2I src2,
12029                          immI src3, rFlagsReg cr) %{
12030   match(Set dst (XorI src1 (RShiftI src2 src3)));
12031 
12032   ins_cost(1.9 * INSN_COST);
12033   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12034 
12035   ins_encode %{
12036     __ eorw(as_Register($dst$$reg),
12037               as_Register($src1$$reg),
12038               as_Register($src2$$reg),
12039               Assembler::ASR,
12040               $src3$$constant & 0x1f);
12041   %}
12042 
12043   ins_pipe(ialu_reg_reg_shift);
12044 %}
12045 
12046 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12047                          iRegL src1, iRegL src2,
12048                          immI src3, rFlagsReg cr) %{
12049   match(Set dst (XorL src1 (RShiftL src2 src3)));
12050 
12051   ins_cost(1.9 * INSN_COST);
12052   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12053 
12054   ins_encode %{
12055     __ eor(as_Register($dst$$reg),
12056               as_Register($src1$$reg),
12057               as_Register($src2$$reg),
12058               Assembler::ASR,
12059               $src3$$constant & 0x3f);
12060   %}
12061 
12062   ins_pipe(ialu_reg_reg_shift);
12063 %}
12064 
12065 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12066                          iRegIorL2I src1, iRegIorL2I src2,
12067                          immI src3, rFlagsReg cr) %{
12068   match(Set dst (XorI src1 (LShiftI src2 src3)));
12069 
12070   ins_cost(1.9 * INSN_COST);
12071   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12072 
12073   ins_encode %{
12074     __ eorw(as_Register($dst$$reg),
12075               as_Register($src1$$reg),
12076               as_Register($src2$$reg),
12077               Assembler::LSL,
12078               $src3$$constant & 0x1f);
12079   %}
12080 
12081   ins_pipe(ialu_reg_reg_shift);
12082 %}
12083 
12084 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12085                          iRegL src1, iRegL src2,
12086                          immI src3, rFlagsReg cr) %{
12087   match(Set dst (XorL src1 (LShiftL src2 src3)));
12088 
12089   ins_cost(1.9 * INSN_COST);
12090   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12091 
12092   ins_encode %{
12093     __ eor(as_Register($dst$$reg),
12094               as_Register($src1$$reg),
12095               as_Register($src2$$reg),
12096               Assembler::LSL,
12097               $src3$$constant & 0x3f);
12098   %}
12099 
12100   ins_pipe(ialu_reg_reg_shift);
12101 %}
12102 
12103 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12104                          iRegIorL2I src1, iRegIorL2I src2,
12105                          immI src3, rFlagsReg cr) %{
12106   match(Set dst (OrI src1 (URShiftI src2 src3)));
12107 
12108   ins_cost(1.9 * INSN_COST);
12109   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12110 
12111   ins_encode %{
12112     __ orrw(as_Register($dst$$reg),
12113               as_Register($src1$$reg),
12114               as_Register($src2$$reg),
12115               Assembler::LSR,
12116               $src3$$constant & 0x1f);
12117   %}
12118 
12119   ins_pipe(ialu_reg_reg_shift);
12120 %}
12121 
12122 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12123                          iRegL src1, iRegL src2,
12124                          immI src3, rFlagsReg cr) %{
12125   match(Set dst (OrL src1 (URShiftL src2 src3)));
12126 
12127   ins_cost(1.9 * INSN_COST);
12128   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12129 
12130   ins_encode %{
12131     __ orr(as_Register($dst$$reg),
12132               as_Register($src1$$reg),
12133               as_Register($src2$$reg),
12134               Assembler::LSR,
12135               $src3$$constant & 0x3f);
12136   %}
12137 
12138   ins_pipe(ialu_reg_reg_shift);
12139 %}
12140 
12141 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12142                          iRegIorL2I src1, iRegIorL2I src2,
12143                          immI src3, rFlagsReg cr) %{
12144   match(Set dst (OrI src1 (RShiftI src2 src3)));
12145 
12146   ins_cost(1.9 * INSN_COST);
12147   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12148 
12149   ins_encode %{
12150     __ orrw(as_Register($dst$$reg),
12151               as_Register($src1$$reg),
12152               as_Register($src2$$reg),
12153               Assembler::ASR,
12154               $src3$$constant & 0x1f);
12155   %}
12156 
12157   ins_pipe(ialu_reg_reg_shift);
12158 %}
12159 
12160 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12161                          iRegL src1, iRegL src2,
12162                          immI src3, rFlagsReg cr) %{
12163   match(Set dst (OrL src1 (RShiftL src2 src3)));
12164 
12165   ins_cost(1.9 * INSN_COST);
12166   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12167 
12168   ins_encode %{
12169     __ orr(as_Register($dst$$reg),
12170               as_Register($src1$$reg),
12171               as_Register($src2$$reg),
12172               Assembler::ASR,
12173               $src3$$constant & 0x3f);
12174   %}
12175 
12176   ins_pipe(ialu_reg_reg_shift);
12177 %}
12178 
12179 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12180                          iRegIorL2I src1, iRegIorL2I src2,
12181                          immI src3, rFlagsReg cr) %{
12182   match(Set dst (OrI src1 (LShiftI src2 src3)));
12183 
12184   ins_cost(1.9 * INSN_COST);
12185   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12186 
12187   ins_encode %{
12188     __ orrw(as_Register($dst$$reg),
12189               as_Register($src1$$reg),
12190               as_Register($src2$$reg),
12191               Assembler::LSL,
12192               $src3$$constant & 0x1f);
12193   %}
12194 
12195   ins_pipe(ialu_reg_reg_shift);
12196 %}
12197 
12198 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12199                          iRegL src1, iRegL src2,
12200                          immI src3, rFlagsReg cr) %{
12201   match(Set dst (OrL src1 (LShiftL src2 src3)));
12202 
12203   ins_cost(1.9 * INSN_COST);
12204   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12205 
12206   ins_encode %{
12207     __ orr(as_Register($dst$$reg),
12208               as_Register($src1$$reg),
12209               as_Register($src2$$reg),
12210               Assembler::LSL,
12211               $src3$$constant & 0x3f);
12212   %}
12213 
12214   ins_pipe(ialu_reg_reg_shift);
12215 %}
12216 
12217 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12218                          iRegIorL2I src1, iRegIorL2I src2,
12219                          immI src3, rFlagsReg cr) %{
12220   match(Set dst (AddI src1 (URShiftI src2 src3)));
12221 
12222   ins_cost(1.9 * INSN_COST);
12223   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12224 
12225   ins_encode %{
12226     __ addw(as_Register($dst$$reg),
12227               as_Register($src1$$reg),
12228               as_Register($src2$$reg),
12229               Assembler::LSR,
12230               $src3$$constant & 0x1f);
12231   %}
12232 
12233   ins_pipe(ialu_reg_reg_shift);
12234 %}
12235 
12236 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12237                          iRegL src1, iRegL src2,
12238                          immI src3, rFlagsReg cr) %{
12239   match(Set dst (AddL src1 (URShiftL src2 src3)));
12240 
12241   ins_cost(1.9 * INSN_COST);
12242   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12243 
12244   ins_encode %{
12245     __ add(as_Register($dst$$reg),
12246               as_Register($src1$$reg),
12247               as_Register($src2$$reg),
12248               Assembler::LSR,
12249               $src3$$constant & 0x3f);
12250   %}
12251 
12252   ins_pipe(ialu_reg_reg_shift);
12253 %}
12254 
12255 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12256                          iRegIorL2I src1, iRegIorL2I src2,
12257                          immI src3, rFlagsReg cr) %{
12258   match(Set dst (AddI src1 (RShiftI src2 src3)));
12259 
12260   ins_cost(1.9 * INSN_COST);
12261   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12262 
12263   ins_encode %{
12264     __ addw(as_Register($dst$$reg),
12265               as_Register($src1$$reg),
12266               as_Register($src2$$reg),
12267               Assembler::ASR,
12268               $src3$$constant & 0x1f);
12269   %}
12270 
12271   ins_pipe(ialu_reg_reg_shift);
12272 %}
12273 
12274 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12275                          iRegL src1, iRegL src2,
12276                          immI src3, rFlagsReg cr) %{
12277   match(Set dst (AddL src1 (RShiftL src2 src3)));
12278 
12279   ins_cost(1.9 * INSN_COST);
12280   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12281 
12282   ins_encode %{
12283     __ add(as_Register($dst$$reg),
12284               as_Register($src1$$reg),
12285               as_Register($src2$$reg),
12286               Assembler::ASR,
12287               $src3$$constant & 0x3f);
12288   %}
12289 
12290   ins_pipe(ialu_reg_reg_shift);
12291 %}
12292 
12293 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12294                          iRegIorL2I src1, iRegIorL2I src2,
12295                          immI src3, rFlagsReg cr) %{
12296   match(Set dst (AddI src1 (LShiftI src2 src3)));
12297 
12298   ins_cost(1.9 * INSN_COST);
12299   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12300 
12301   ins_encode %{
12302     __ addw(as_Register($dst$$reg),
12303               as_Register($src1$$reg),
12304               as_Register($src2$$reg),
12305               Assembler::LSL,
12306               $src3$$constant & 0x1f);
12307   %}
12308 
12309   ins_pipe(ialu_reg_reg_shift);
12310 %}
12311 
12312 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12313                          iRegL src1, iRegL src2,
12314                          immI src3, rFlagsReg cr) %{
12315   match(Set dst (AddL src1 (LShiftL src2 src3)));
12316 
12317   ins_cost(1.9 * INSN_COST);
12318   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12319 
12320   ins_encode %{
12321     __ add(as_Register($dst$$reg),
12322               as_Register($src1$$reg),
12323               as_Register($src2$$reg),
12324               Assembler::LSL,
12325               $src3$$constant & 0x3f);
12326   %}
12327 
12328   ins_pipe(ialu_reg_reg_shift);
12329 %}
12330 
12331 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12332                          iRegIorL2I src1, iRegIorL2I src2,
12333                          immI src3, rFlagsReg cr) %{
12334   match(Set dst (SubI src1 (URShiftI src2 src3)));
12335 
12336   ins_cost(1.9 * INSN_COST);
12337   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12338 
12339   ins_encode %{
12340     __ subw(as_Register($dst$$reg),
12341               as_Register($src1$$reg),
12342               as_Register($src2$$reg),
12343               Assembler::LSR,
12344               $src3$$constant & 0x1f);
12345   %}
12346 
12347   ins_pipe(ialu_reg_reg_shift);
12348 %}
12349 
12350 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12351                          iRegL src1, iRegL src2,
12352                          immI src3, rFlagsReg cr) %{
12353   match(Set dst (SubL src1 (URShiftL src2 src3)));
12354 
12355   ins_cost(1.9 * INSN_COST);
12356   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12357 
12358   ins_encode %{
12359     __ sub(as_Register($dst$$reg),
12360               as_Register($src1$$reg),
12361               as_Register($src2$$reg),
12362               Assembler::LSR,
12363               $src3$$constant & 0x3f);
12364   %}
12365 
12366   ins_pipe(ialu_reg_reg_shift);
12367 %}
12368 
12369 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12370                          iRegIorL2I src1, iRegIorL2I src2,
12371                          immI src3, rFlagsReg cr) %{
12372   match(Set dst (SubI src1 (RShiftI src2 src3)));
12373 
12374   ins_cost(1.9 * INSN_COST);
12375   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12376 
12377   ins_encode %{
12378     __ subw(as_Register($dst$$reg),
12379               as_Register($src1$$reg),
12380               as_Register($src2$$reg),
12381               Assembler::ASR,
12382               $src3$$constant & 0x1f);
12383   %}
12384 
12385   ins_pipe(ialu_reg_reg_shift);
12386 %}
12387 
12388 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12389                          iRegL src1, iRegL src2,
12390                          immI src3, rFlagsReg cr) %{
12391   match(Set dst (SubL src1 (RShiftL src2 src3)));
12392 
12393   ins_cost(1.9 * INSN_COST);
12394   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12395 
12396   ins_encode %{
12397     __ sub(as_Register($dst$$reg),
12398               as_Register($src1$$reg),
12399               as_Register($src2$$reg),
12400               Assembler::ASR,
12401               $src3$$constant & 0x3f);
12402   %}
12403 
12404   ins_pipe(ialu_reg_reg_shift);
12405 %}
12406 
12407 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12408                          iRegIorL2I src1, iRegIorL2I src2,
12409                          immI src3, rFlagsReg cr) %{
12410   match(Set dst (SubI src1 (LShiftI src2 src3)));
12411 
12412   ins_cost(1.9 * INSN_COST);
12413   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12414 
12415   ins_encode %{
12416     __ subw(as_Register($dst$$reg),
12417               as_Register($src1$$reg),
12418               as_Register($src2$$reg),
12419               Assembler::LSL,
12420               $src3$$constant & 0x1f);
12421   %}
12422 
12423   ins_pipe(ialu_reg_reg_shift);
12424 %}
12425 
12426 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12427                          iRegL src1, iRegL src2,
12428                          immI src3, rFlagsReg cr) %{
12429   match(Set dst (SubL src1 (LShiftL src2 src3)));
12430 
12431   ins_cost(1.9 * INSN_COST);
12432   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12433 
12434   ins_encode %{
12435     __ sub(as_Register($dst$$reg),
12436               as_Register($src1$$reg),
12437               as_Register($src2$$reg),
12438               Assembler::LSL,
12439               $src3$$constant & 0x3f);
12440   %}
12441 
12442   ins_pipe(ialu_reg_reg_shift);
12443 %}
12444 
12445 
12446 
12447 // Shift Left followed by Shift Right.
12448 // This idiom is used by the compiler for the i2b bytecode etc.
12449 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12450 %{
12451   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12452   // Make sure we are not going to exceed what sbfm can do.
12453   predicate((unsigned int)n->in(2)->get_int() <= 63
12454             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12455 
12456   ins_cost(INSN_COST * 2);
12457   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12458   ins_encode %{
12459     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12460     int s = 63 - lshift;
12461     int r = (rshift - lshift) & 63;
12462     __ sbfm(as_Register($dst$$reg),
12463             as_Register($src$$reg),
12464             r, s);
12465   %}
12466 
12467   ins_pipe(ialu_reg_shift);
12468 %}
12469 
12470 // Shift Left followed by Shift Right.
12471 // This idiom is used by the compiler for the i2b bytecode etc.
12472 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12473 %{
12474   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12475   // Make sure we are not going to exceed what sbfmw can do.
12476   predicate((unsigned int)n->in(2)->get_int() <= 31
12477             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12478 
12479   ins_cost(INSN_COST * 2);
12480   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12481   ins_encode %{
12482     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12483     int s = 31 - lshift;
12484     int r = (rshift - lshift) & 31;
12485     __ sbfmw(as_Register($dst$$reg),
12486             as_Register($src$$reg),
12487             r, s);
12488   %}
12489 
12490   ins_pipe(ialu_reg_shift);
12491 %}
12492 
12493 // Shift Left followed by Shift Right.
12494 // This idiom is used by the compiler for the i2b bytecode etc.
12495 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12496 %{
12497   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12498   // Make sure we are not going to exceed what ubfm can do.
12499   predicate((unsigned int)n->in(2)->get_int() <= 63
12500             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12501 
12502   ins_cost(INSN_COST * 2);
12503   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12504   ins_encode %{
12505     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12506     int s = 63 - lshift;
12507     int r = (rshift - lshift) & 63;
12508     __ ubfm(as_Register($dst$$reg),
12509             as_Register($src$$reg),
12510             r, s);
12511   %}
12512 
12513   ins_pipe(ialu_reg_shift);
12514 %}
12515 
12516 // Shift Left followed by Shift Right.
12517 // This idiom is used by the compiler for the i2b bytecode etc.
12518 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12519 %{
12520   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12521   // Make sure we are not going to exceed what ubfmw can do.
12522   predicate((unsigned int)n->in(2)->get_int() <= 31
12523             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12524 
12525   ins_cost(INSN_COST * 2);
12526   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12527   ins_encode %{
12528     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12529     int s = 31 - lshift;
12530     int r = (rshift - lshift) & 31;
12531     __ ubfmw(as_Register($dst$$reg),
12532             as_Register($src$$reg),
12533             r, s);
12534   %}
12535 
12536   ins_pipe(ialu_reg_shift);
12537 %}
12538 // Bitfield extract with shift & mask
12539 
12540 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12541 %{
12542   match(Set dst (AndI (URShiftI src rshift) mask));
12543 
12544   ins_cost(INSN_COST);
12545   format %{ "ubfxw $dst, $src, $mask" %}
12546   ins_encode %{
12547     int rshift = $rshift$$constant;
12548     long mask = $mask$$constant;
12549     int width = exact_log2(mask+1);
12550     __ ubfxw(as_Register($dst$$reg),
12551             as_Register($src$$reg), rshift, width);
12552   %}
12553   ins_pipe(ialu_reg_shift);
12554 %}
12555 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12556 %{
12557   match(Set dst (AndL (URShiftL src rshift) mask));
12558 
12559   ins_cost(INSN_COST);
12560   format %{ "ubfx $dst, $src, $mask" %}
12561   ins_encode %{
12562     int rshift = $rshift$$constant;
12563     long mask = $mask$$constant;
12564     int width = exact_log2(mask+1);
12565     __ ubfx(as_Register($dst$$reg),
12566             as_Register($src$$reg), rshift, width);
12567   %}
12568   ins_pipe(ialu_reg_shift);
12569 %}
12570 
12571 // We can use ubfx when extending an And with a mask when we know mask
12572 // is positive.  We know that because immI_bitmask guarantees it.
12573 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12574 %{
12575   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12576 
12577   ins_cost(INSN_COST * 2);
12578   format %{ "ubfx $dst, $src, $mask" %}
12579   ins_encode %{
12580     int rshift = $rshift$$constant;
12581     long mask = $mask$$constant;
12582     int width = exact_log2(mask+1);
12583     __ ubfx(as_Register($dst$$reg),
12584             as_Register($src$$reg), rshift, width);
12585   %}
12586   ins_pipe(ialu_reg_shift);
12587 %}
12588 
12589 // Rotations
12590 
12591 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12592 %{
12593   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12594   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12595 
12596   ins_cost(INSN_COST);
12597   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12598 
12599   ins_encode %{
12600     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12601             $rshift$$constant & 63);
12602   %}
12603   ins_pipe(ialu_reg_reg_extr);
12604 %}
12605 
12606 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12607 %{
12608   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12609   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12610 
12611   ins_cost(INSN_COST);
12612   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12613 
12614   ins_encode %{
12615     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12616             $rshift$$constant & 31);
12617   %}
12618   ins_pipe(ialu_reg_reg_extr);
12619 %}
12620 
12621 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12622 %{
12623   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12624   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12625 
12626   ins_cost(INSN_COST);
12627   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12628 
12629   ins_encode %{
12630     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12631             $rshift$$constant & 63);
12632   %}
12633   ins_pipe(ialu_reg_reg_extr);
12634 %}
12635 
12636 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12637 %{
12638   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12639   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12640 
12641   ins_cost(INSN_COST);
12642   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12643 
12644   ins_encode %{
12645     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12646             $rshift$$constant & 31);
12647   %}
12648   ins_pipe(ialu_reg_reg_extr);
12649 %}
12650 
12651 
12652 // rol expander
12653 
12654 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12655 %{
12656   effect(DEF dst, USE src, USE shift);
12657 
12658   format %{ "rol    $dst, $src, $shift" %}
12659   ins_cost(INSN_COST * 3);
12660   ins_encode %{
12661     __ subw(rscratch1, zr, as_Register($shift$$reg));
12662     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12663             rscratch1);
12664     %}
12665   ins_pipe(ialu_reg_reg_vshift);
12666 %}
12667 
12668 // rol expander
12669 
12670 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12671 %{
12672   effect(DEF dst, USE src, USE shift);
12673 
12674   format %{ "rol    $dst, $src, $shift" %}
12675   ins_cost(INSN_COST * 3);
12676   ins_encode %{
12677     __ subw(rscratch1, zr, as_Register($shift$$reg));
12678     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12679             rscratch1);
12680     %}
12681   ins_pipe(ialu_reg_reg_vshift);
12682 %}
12683 
12684 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12685 %{
12686   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12687 
12688   expand %{
12689     rolL_rReg(dst, src, shift, cr);
12690   %}
12691 %}
12692 
12693 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12694 %{
12695   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12696 
12697   expand %{
12698     rolL_rReg(dst, src, shift, cr);
12699   %}
12700 %}
12701 
12702 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12703 %{
12704   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12705 
12706   expand %{
12707     rolI_rReg(dst, src, shift, cr);
12708   %}
12709 %}
12710 
12711 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12712 %{
12713   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12714 
12715   expand %{
12716     rolI_rReg(dst, src, shift, cr);
12717   %}
12718 %}
12719 
12720 // ror expander
12721 
12722 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12723 %{
12724   effect(DEF dst, USE src, USE shift);
12725 
12726   format %{ "ror    $dst, $src, $shift" %}
12727   ins_cost(INSN_COST);
12728   ins_encode %{
12729     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12730             as_Register($shift$$reg));
12731     %}
12732   ins_pipe(ialu_reg_reg_vshift);
12733 %}
12734 
12735 // ror expander
12736 
12737 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12738 %{
12739   effect(DEF dst, USE src, USE shift);
12740 
12741   format %{ "ror    $dst, $src, $shift" %}
12742   ins_cost(INSN_COST);
12743   ins_encode %{
12744     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12745             as_Register($shift$$reg));
12746     %}
12747   ins_pipe(ialu_reg_reg_vshift);
12748 %}
12749 
12750 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12751 %{
12752   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12753 
12754   expand %{
12755     rorL_rReg(dst, src, shift, cr);
12756   %}
12757 %}
12758 
12759 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12760 %{
12761   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12762 
12763   expand %{
12764     rorL_rReg(dst, src, shift, cr);
12765   %}
12766 %}
12767 
12768 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12769 %{
12770   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12771 
12772   expand %{
12773     rorI_rReg(dst, src, shift, cr);
12774   %}
12775 %}
12776 
12777 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12778 %{
12779   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12780 
12781   expand %{
12782     rorI_rReg(dst, src, shift, cr);
12783   %}
12784 %}
12785 
12786 // Add/subtract (extended)
12787 
12788 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12789 %{
12790   match(Set dst (AddL src1 (ConvI2L src2)));
12791   ins_cost(INSN_COST);
12792   format %{ "add  $dst, $src1, sxtw $src2" %}
12793 
12794    ins_encode %{
12795      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12796             as_Register($src2$$reg), ext::sxtw);
12797    %}
12798   ins_pipe(ialu_reg_reg);
12799 %};
12800 
12801 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12802 %{
12803   match(Set dst (SubL src1 (ConvI2L src2)));
12804   ins_cost(INSN_COST);
12805   format %{ "sub  $dst, $src1, sxtw $src2" %}
12806 
12807    ins_encode %{
12808      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12809             as_Register($src2$$reg), ext::sxtw);
12810    %}
12811   ins_pipe(ialu_reg_reg);
12812 %};
12813 
12814 
12815 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12816 %{
12817   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12818   ins_cost(INSN_COST);
12819   format %{ "add  $dst, $src1, sxth $src2" %}
12820 
12821    ins_encode %{
12822      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12823             as_Register($src2$$reg), ext::sxth);
12824    %}
12825   ins_pipe(ialu_reg_reg);
12826 %}
12827 
12828 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12829 %{
12830   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12831   ins_cost(INSN_COST);
12832   format %{ "add  $dst, $src1, sxtb $src2" %}
12833 
12834    ins_encode %{
12835      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12836             as_Register($src2$$reg), ext::sxtb);
12837    %}
12838   ins_pipe(ialu_reg_reg);
12839 %}
12840 
12841 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12842 %{
12843   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12844   ins_cost(INSN_COST);
12845   format %{ "add  $dst, $src1, uxtb $src2" %}
12846 
12847    ins_encode %{
12848      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12849             as_Register($src2$$reg), ext::uxtb);
12850    %}
12851   ins_pipe(ialu_reg_reg);
12852 %}
12853 
12854 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12855 %{
12856   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12857   ins_cost(INSN_COST);
12858   format %{ "add  $dst, $src1, sxth $src2" %}
12859 
12860    ins_encode %{
12861      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12862             as_Register($src2$$reg), ext::sxth);
12863    %}
12864   ins_pipe(ialu_reg_reg);
12865 %}
12866 
12867 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12868 %{
12869   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12870   ins_cost(INSN_COST);
12871   format %{ "add  $dst, $src1, sxtw $src2" %}
12872 
12873    ins_encode %{
12874      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12875             as_Register($src2$$reg), ext::sxtw);
12876    %}
12877   ins_pipe(ialu_reg_reg);
12878 %}
12879 
12880 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12881 %{
12882   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12883   ins_cost(INSN_COST);
12884   format %{ "add  $dst, $src1, sxtb $src2" %}
12885 
12886    ins_encode %{
12887      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12888             as_Register($src2$$reg), ext::sxtb);
12889    %}
12890   ins_pipe(ialu_reg_reg);
12891 %}
12892 
12893 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12894 %{
12895   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12896   ins_cost(INSN_COST);
12897   format %{ "add  $dst, $src1, uxtb $src2" %}
12898 
12899    ins_encode %{
12900      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12901             as_Register($src2$$reg), ext::uxtb);
12902    %}
12903   ins_pipe(ialu_reg_reg);
12904 %}
12905 
12906 
12907 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12908 %{
12909   match(Set dst (AddI src1 (AndI src2 mask)));
12910   ins_cost(INSN_COST);
12911   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12912 
12913    ins_encode %{
12914      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12915             as_Register($src2$$reg), ext::uxtb);
12916    %}
12917   ins_pipe(ialu_reg_reg);
12918 %}
12919 
12920 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12921 %{
12922   match(Set dst (AddI src1 (AndI src2 mask)));
12923   ins_cost(INSN_COST);
12924   format %{ "addw  $dst, $src1, $src2, uxth" %}
12925 
12926    ins_encode %{
12927      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12928             as_Register($src2$$reg), ext::uxth);
12929    %}
12930   ins_pipe(ialu_reg_reg);
12931 %}
12932 
12933 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12934 %{
12935   match(Set dst (AddL src1 (AndL src2 mask)));
12936   ins_cost(INSN_COST);
12937   format %{ "add  $dst, $src1, $src2, uxtb" %}
12938 
12939    ins_encode %{
12940      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12941             as_Register($src2$$reg), ext::uxtb);
12942    %}
12943   ins_pipe(ialu_reg_reg);
12944 %}
12945 
12946 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12947 %{
12948   match(Set dst (AddL src1 (AndL src2 mask)));
12949   ins_cost(INSN_COST);
12950   format %{ "add  $dst, $src1, $src2, uxth" %}
12951 
12952    ins_encode %{
12953      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12954             as_Register($src2$$reg), ext::uxth);
12955    %}
12956   ins_pipe(ialu_reg_reg);
12957 %}
12958 
12959 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12960 %{
12961   match(Set dst (AddL src1 (AndL src2 mask)));
12962   ins_cost(INSN_COST);
12963   format %{ "add  $dst, $src1, $src2, uxtw" %}
12964 
12965    ins_encode %{
12966      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12967             as_Register($src2$$reg), ext::uxtw);
12968    %}
12969   ins_pipe(ialu_reg_reg);
12970 %}
12971 
12972 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12973 %{
12974   match(Set dst (SubI src1 (AndI src2 mask)));
12975   ins_cost(INSN_COST);
12976   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12977 
12978    ins_encode %{
12979      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12980             as_Register($src2$$reg), ext::uxtb);
12981    %}
12982   ins_pipe(ialu_reg_reg);
12983 %}
12984 
12985 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12986 %{
12987   match(Set dst (SubI src1 (AndI src2 mask)));
12988   ins_cost(INSN_COST);
12989   format %{ "subw  $dst, $src1, $src2, uxth" %}
12990 
12991    ins_encode %{
12992      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12993             as_Register($src2$$reg), ext::uxth);
12994    %}
12995   ins_pipe(ialu_reg_reg);
12996 %}
12997 
12998 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12999 %{
13000   match(Set dst (SubL src1 (AndL src2 mask)));
13001   ins_cost(INSN_COST);
13002   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13003 
13004    ins_encode %{
13005      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13006             as_Register($src2$$reg), ext::uxtb);
13007    %}
13008   ins_pipe(ialu_reg_reg);
13009 %}
13010 
13011 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13012 %{
13013   match(Set dst (SubL src1 (AndL src2 mask)));
13014   ins_cost(INSN_COST);
13015   format %{ "sub  $dst, $src1, $src2, uxth" %}
13016 
13017    ins_encode %{
13018      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13019             as_Register($src2$$reg), ext::uxth);
13020    %}
13021   ins_pipe(ialu_reg_reg);
13022 %}
13023 
13024 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13025 %{
13026   match(Set dst (SubL src1 (AndL src2 mask)));
13027   ins_cost(INSN_COST);
13028   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13029 
13030    ins_encode %{
13031      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13032             as_Register($src2$$reg), ext::uxtw);
13033    %}
13034   ins_pipe(ialu_reg_reg);
13035 %}
13036 
13037 // END This section of the file is automatically generated. Do not edit --------------
13038 
13039 // ============================================================================
13040 // Floating Point Arithmetic Instructions
13041 
13042 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13043   match(Set dst (AddF src1 src2));
13044 
13045   ins_cost(INSN_COST * 5);
13046   format %{ "fadds   $dst, $src1, $src2" %}
13047 
13048   ins_encode %{
13049     __ fadds(as_FloatRegister($dst$$reg),
13050              as_FloatRegister($src1$$reg),
13051              as_FloatRegister($src2$$reg));
13052   %}
13053 
13054   ins_pipe(fp_dop_reg_reg_s);
13055 %}
13056 
13057 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13058   match(Set dst (AddD src1 src2));
13059 
13060   ins_cost(INSN_COST * 5);
13061   format %{ "faddd   $dst, $src1, $src2" %}
13062 
13063   ins_encode %{
13064     __ faddd(as_FloatRegister($dst$$reg),
13065              as_FloatRegister($src1$$reg),
13066              as_FloatRegister($src2$$reg));
13067   %}
13068 
13069   ins_pipe(fp_dop_reg_reg_d);
13070 %}
13071 
13072 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13073   match(Set dst (SubF src1 src2));
13074 
13075   ins_cost(INSN_COST * 5);
13076   format %{ "fsubs   $dst, $src1, $src2" %}
13077 
13078   ins_encode %{
13079     __ fsubs(as_FloatRegister($dst$$reg),
13080              as_FloatRegister($src1$$reg),
13081              as_FloatRegister($src2$$reg));
13082   %}
13083 
13084   ins_pipe(fp_dop_reg_reg_s);
13085 %}
13086 
13087 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13088   match(Set dst (SubD src1 src2));
13089 
13090   ins_cost(INSN_COST * 5);
13091   format %{ "fsubd   $dst, $src1, $src2" %}
13092 
13093   ins_encode %{
13094     __ fsubd(as_FloatRegister($dst$$reg),
13095              as_FloatRegister($src1$$reg),
13096              as_FloatRegister($src2$$reg));
13097   %}
13098 
13099   ins_pipe(fp_dop_reg_reg_d);
13100 %}
13101 
13102 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13103   match(Set dst (MulF src1 src2));
13104 
13105   ins_cost(INSN_COST * 6);
13106   format %{ "fmuls   $dst, $src1, $src2" %}
13107 
13108   ins_encode %{
13109     __ fmuls(as_FloatRegister($dst$$reg),
13110              as_FloatRegister($src1$$reg),
13111              as_FloatRegister($src2$$reg));
13112   %}
13113 
13114   ins_pipe(fp_dop_reg_reg_s);
13115 %}
13116 
13117 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13118   match(Set dst (MulD src1 src2));
13119 
13120   ins_cost(INSN_COST * 6);
13121   format %{ "fmuld   $dst, $src1, $src2" %}
13122 
13123   ins_encode %{
13124     __ fmuld(as_FloatRegister($dst$$reg),
13125              as_FloatRegister($src1$$reg),
13126              as_FloatRegister($src2$$reg));
13127   %}
13128 
13129   ins_pipe(fp_dop_reg_reg_d);
13130 %}
13131 
13132 // src1 * src2 + src3
13133 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13134   predicate(UseFMA);
13135   match(Set dst (FmaF src3 (Binary src1 src2)));
13136 
13137   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13138 
13139   ins_encode %{
13140     __ fmadds(as_FloatRegister($dst$$reg),
13141              as_FloatRegister($src1$$reg),
13142              as_FloatRegister($src2$$reg),
13143              as_FloatRegister($src3$$reg));
13144   %}
13145 
13146   ins_pipe(pipe_class_default);
13147 %}
13148 
13149 // src1 * src2 + src3
13150 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13151   predicate(UseFMA);
13152   match(Set dst (FmaD src3 (Binary src1 src2)));
13153 
13154   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13155 
13156   ins_encode %{
13157     __ fmaddd(as_FloatRegister($dst$$reg),
13158              as_FloatRegister($src1$$reg),
13159              as_FloatRegister($src2$$reg),
13160              as_FloatRegister($src3$$reg));
13161   %}
13162 
13163   ins_pipe(pipe_class_default);
13164 %}
13165 
13166 // -src1 * src2 + src3
13167 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13168   predicate(UseFMA);
13169   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13170   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13171 
13172   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13173 
13174   ins_encode %{
13175     __ fmsubs(as_FloatRegister($dst$$reg),
13176               as_FloatRegister($src1$$reg),
13177               as_FloatRegister($src2$$reg),
13178               as_FloatRegister($src3$$reg));
13179   %}
13180 
13181   ins_pipe(pipe_class_default);
13182 %}
13183 
13184 // -src1 * src2 + src3
13185 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13186   predicate(UseFMA);
13187   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13188   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13189 
13190   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13191 
13192   ins_encode %{
13193     __ fmsubd(as_FloatRegister($dst$$reg),
13194               as_FloatRegister($src1$$reg),
13195               as_FloatRegister($src2$$reg),
13196               as_FloatRegister($src3$$reg));
13197   %}
13198 
13199   ins_pipe(pipe_class_default);
13200 %}
13201 
13202 // -src1 * src2 - src3
13203 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13204   predicate(UseFMA);
13205   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13206   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13207 
13208   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13209 
13210   ins_encode %{
13211     __ fnmadds(as_FloatRegister($dst$$reg),
13212                as_FloatRegister($src1$$reg),
13213                as_FloatRegister($src2$$reg),
13214                as_FloatRegister($src3$$reg));
13215   %}
13216 
13217   ins_pipe(pipe_class_default);
13218 %}
13219 
13220 // -src1 * src2 - src3
13221 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13222   predicate(UseFMA);
13223   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13224   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13225 
13226   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13227 
13228   ins_encode %{
13229     __ fnmaddd(as_FloatRegister($dst$$reg),
13230                as_FloatRegister($src1$$reg),
13231                as_FloatRegister($src2$$reg),
13232                as_FloatRegister($src3$$reg));
13233   %}
13234 
13235   ins_pipe(pipe_class_default);
13236 %}
13237 
13238 // src1 * src2 - src3
13239 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13240   predicate(UseFMA);
13241   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13242 
13243   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13244 
13245   ins_encode %{
13246     __ fnmsubs(as_FloatRegister($dst$$reg),
13247                as_FloatRegister($src1$$reg),
13248                as_FloatRegister($src2$$reg),
13249                as_FloatRegister($src3$$reg));
13250   %}
13251 
13252   ins_pipe(pipe_class_default);
13253 %}
13254 
13255 // src1 * src2 - src3
13256 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13257   predicate(UseFMA);
13258   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13259 
13260   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13261 
13262   ins_encode %{
13263   // n.b. insn name should be fnmsubd
13264     __ fnmsub(as_FloatRegister($dst$$reg),
13265               as_FloatRegister($src1$$reg),
13266               as_FloatRegister($src2$$reg),
13267               as_FloatRegister($src3$$reg));
13268   %}
13269 
13270   ins_pipe(pipe_class_default);
13271 %}
13272 
13273 
13274 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13275   match(Set dst (DivF src1  src2));
13276 
13277   ins_cost(INSN_COST * 18);
13278   format %{ "fdivs   $dst, $src1, $src2" %}
13279 
13280   ins_encode %{
13281     __ fdivs(as_FloatRegister($dst$$reg),
13282              as_FloatRegister($src1$$reg),
13283              as_FloatRegister($src2$$reg));
13284   %}
13285 
13286   ins_pipe(fp_div_s);
13287 %}
13288 
13289 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13290   match(Set dst (DivD src1  src2));
13291 
13292   ins_cost(INSN_COST * 32);
13293   format %{ "fdivd   $dst, $src1, $src2" %}
13294 
13295   ins_encode %{
13296     __ fdivd(as_FloatRegister($dst$$reg),
13297              as_FloatRegister($src1$$reg),
13298              as_FloatRegister($src2$$reg));
13299   %}
13300 
13301   ins_pipe(fp_div_d);
13302 %}
13303 
13304 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13305   match(Set dst (NegF src));
13306 
13307   ins_cost(INSN_COST * 3);
13308   format %{ "fneg   $dst, $src" %}
13309 
13310   ins_encode %{
13311     __ fnegs(as_FloatRegister($dst$$reg),
13312              as_FloatRegister($src$$reg));
13313   %}
13314 
13315   ins_pipe(fp_uop_s);
13316 %}
13317 
13318 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13319   match(Set dst (NegD src));
13320 
13321   ins_cost(INSN_COST * 3);
13322   format %{ "fnegd   $dst, $src" %}
13323 
13324   ins_encode %{
13325     __ fnegd(as_FloatRegister($dst$$reg),
13326              as_FloatRegister($src$$reg));
13327   %}
13328 
13329   ins_pipe(fp_uop_d);
13330 %}
13331 
13332 instruct absF_reg(vRegF dst, vRegF src) %{
13333   match(Set dst (AbsF src));
13334 
13335   ins_cost(INSN_COST * 3);
13336   format %{ "fabss   $dst, $src" %}
13337   ins_encode %{
13338     __ fabss(as_FloatRegister($dst$$reg),
13339              as_FloatRegister($src$$reg));
13340   %}
13341 
13342   ins_pipe(fp_uop_s);
13343 %}
13344 
13345 instruct absD_reg(vRegD dst, vRegD src) %{
13346   match(Set dst (AbsD src));
13347 
13348   ins_cost(INSN_COST * 3);
13349   format %{ "fabsd   $dst, $src" %}
13350   ins_encode %{
13351     __ fabsd(as_FloatRegister($dst$$reg),
13352              as_FloatRegister($src$$reg));
13353   %}
13354 
13355   ins_pipe(fp_uop_d);
13356 %}
13357 
13358 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13359   match(Set dst (SqrtD src));
13360 
13361   ins_cost(INSN_COST * 50);
13362   format %{ "fsqrtd  $dst, $src" %}
13363   ins_encode %{
13364     __ fsqrtd(as_FloatRegister($dst$$reg),
13365              as_FloatRegister($src$$reg));
13366   %}
13367 
13368   ins_pipe(fp_div_s);
13369 %}
13370 
13371 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13372   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13373 
13374   ins_cost(INSN_COST * 50);
13375   format %{ "fsqrts  $dst, $src" %}
13376   ins_encode %{
13377     __ fsqrts(as_FloatRegister($dst$$reg),
13378              as_FloatRegister($src$$reg));
13379   %}
13380 
13381   ins_pipe(fp_div_d);
13382 %}
13383 
13384 // ============================================================================
13385 // Logical Instructions
13386 
13387 // Integer Logical Instructions
13388 
13389 // And Instructions
13390 
13391 
13392 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13393   match(Set dst (AndI src1 src2));
13394 
13395   format %{ "andw  $dst, $src1, $src2\t# int" %}
13396 
13397   ins_cost(INSN_COST);
13398   ins_encode %{
13399     __ andw(as_Register($dst$$reg),
13400             as_Register($src1$$reg),
13401             as_Register($src2$$reg));
13402   %}
13403 
13404   ins_pipe(ialu_reg_reg);
13405 %}
13406 
13407 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13408   match(Set dst (AndI src1 src2));
13409 
13410   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13411 
13412   ins_cost(INSN_COST);
13413   ins_encode %{
13414     __ andw(as_Register($dst$$reg),
13415             as_Register($src1$$reg),
13416             (unsigned long)($src2$$constant));
13417   %}
13418 
13419   ins_pipe(ialu_reg_imm);
13420 %}
13421 
13422 // Or Instructions
13423 
13424 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13425   match(Set dst (OrI src1 src2));
13426 
13427   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13428 
13429   ins_cost(INSN_COST);
13430   ins_encode %{
13431     __ orrw(as_Register($dst$$reg),
13432             as_Register($src1$$reg),
13433             as_Register($src2$$reg));
13434   %}
13435 
13436   ins_pipe(ialu_reg_reg);
13437 %}
13438 
13439 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13440   match(Set dst (OrI src1 src2));
13441 
13442   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13443 
13444   ins_cost(INSN_COST);
13445   ins_encode %{
13446     __ orrw(as_Register($dst$$reg),
13447             as_Register($src1$$reg),
13448             (unsigned long)($src2$$constant));
13449   %}
13450 
13451   ins_pipe(ialu_reg_imm);
13452 %}
13453 
13454 // Xor Instructions
13455 
13456 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13457   match(Set dst (XorI src1 src2));
13458 
13459   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13460 
13461   ins_cost(INSN_COST);
13462   ins_encode %{
13463     __ eorw(as_Register($dst$$reg),
13464             as_Register($src1$$reg),
13465             as_Register($src2$$reg));
13466   %}
13467 
13468   ins_pipe(ialu_reg_reg);
13469 %}
13470 
13471 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13472   match(Set dst (XorI src1 src2));
13473 
13474   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13475 
13476   ins_cost(INSN_COST);
13477   ins_encode %{
13478     __ eorw(as_Register($dst$$reg),
13479             as_Register($src1$$reg),
13480             (unsigned long)($src2$$constant));
13481   %}
13482 
13483   ins_pipe(ialu_reg_imm);
13484 %}
13485 
13486 // Long Logical Instructions
13487 // TODO
13488 
13489 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13490   match(Set dst (AndL src1 src2));
13491 
13492   format %{ "and  $dst, $src1, $src2\t# int" %}
13493 
13494   ins_cost(INSN_COST);
13495   ins_encode %{
13496     __ andr(as_Register($dst$$reg),
13497             as_Register($src1$$reg),
13498             as_Register($src2$$reg));
13499   %}
13500 
13501   ins_pipe(ialu_reg_reg);
13502 %}
13503 
13504 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13505   match(Set dst (AndL src1 src2));
13506 
13507   format %{ "and  $dst, $src1, $src2\t# int" %}
13508 
13509   ins_cost(INSN_COST);
13510   ins_encode %{
13511     __ andr(as_Register($dst$$reg),
13512             as_Register($src1$$reg),
13513             (unsigned long)($src2$$constant));
13514   %}
13515 
13516   ins_pipe(ialu_reg_imm);
13517 %}
13518 
13519 // Or Instructions
13520 
13521 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13522   match(Set dst (OrL src1 src2));
13523 
13524   format %{ "orr  $dst, $src1, $src2\t# int" %}
13525 
13526   ins_cost(INSN_COST);
13527   ins_encode %{
13528     __ orr(as_Register($dst$$reg),
13529            as_Register($src1$$reg),
13530            as_Register($src2$$reg));
13531   %}
13532 
13533   ins_pipe(ialu_reg_reg);
13534 %}
13535 
13536 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13537   match(Set dst (OrL src1 src2));
13538 
13539   format %{ "orr  $dst, $src1, $src2\t# int" %}
13540 
13541   ins_cost(INSN_COST);
13542   ins_encode %{
13543     __ orr(as_Register($dst$$reg),
13544            as_Register($src1$$reg),
13545            (unsigned long)($src2$$constant));
13546   %}
13547 
13548   ins_pipe(ialu_reg_imm);
13549 %}
13550 
13551 // Xor Instructions
13552 
13553 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13554   match(Set dst (XorL src1 src2));
13555 
13556   format %{ "eor  $dst, $src1, $src2\t# int" %}
13557 
13558   ins_cost(INSN_COST);
13559   ins_encode %{
13560     __ eor(as_Register($dst$$reg),
13561            as_Register($src1$$reg),
13562            as_Register($src2$$reg));
13563   %}
13564 
13565   ins_pipe(ialu_reg_reg);
13566 %}
13567 
13568 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13569   match(Set dst (XorL src1 src2));
13570 
13571   ins_cost(INSN_COST);
13572   format %{ "eor  $dst, $src1, $src2\t# int" %}
13573 
13574   ins_encode %{
13575     __ eor(as_Register($dst$$reg),
13576            as_Register($src1$$reg),
13577            (unsigned long)($src2$$constant));
13578   %}
13579 
13580   ins_pipe(ialu_reg_imm);
13581 %}
13582 
13583 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13584 %{
13585   match(Set dst (ConvI2L src));
13586 
13587   ins_cost(INSN_COST);
13588   format %{ "sxtw  $dst, $src\t# i2l" %}
13589   ins_encode %{
13590     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13591   %}
13592   ins_pipe(ialu_reg_shift);
13593 %}
13594 
13595 // this pattern occurs in bigmath arithmetic
13596 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13597 %{
13598   match(Set dst (AndL (ConvI2L src) mask));
13599 
13600   ins_cost(INSN_COST);
13601   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13602   ins_encode %{
13603     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13604   %}
13605 
13606   ins_pipe(ialu_reg_shift);
13607 %}
13608 
13609 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13610   match(Set dst (ConvL2I src));
13611 
13612   ins_cost(INSN_COST);
13613   format %{ "movw  $dst, $src \t// l2i" %}
13614 
13615   ins_encode %{
13616     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13617   %}
13618 
13619   ins_pipe(ialu_reg);
13620 %}
13621 
13622 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13623 %{
13624   match(Set dst (Conv2B src));
13625   effect(KILL cr);
13626 
13627   format %{
13628     "cmpw $src, zr\n\t"
13629     "cset $dst, ne"
13630   %}
13631 
13632   ins_encode %{
13633     __ cmpw(as_Register($src$$reg), zr);
13634     __ cset(as_Register($dst$$reg), Assembler::NE);
13635   %}
13636 
13637   ins_pipe(ialu_reg);
13638 %}
13639 
13640 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13641 %{
13642   match(Set dst (Conv2B src));
13643   effect(KILL cr);
13644 
13645   format %{
13646     "cmp  $src, zr\n\t"
13647     "cset $dst, ne"
13648   %}
13649 
13650   ins_encode %{
13651     __ cmp(as_Register($src$$reg), zr);
13652     __ cset(as_Register($dst$$reg), Assembler::NE);
13653   %}
13654 
13655   ins_pipe(ialu_reg);
13656 %}
13657 
13658 instruct convD2F_reg(vRegF dst, vRegD src) %{
13659   match(Set dst (ConvD2F src));
13660 
13661   ins_cost(INSN_COST * 5);
13662   format %{ "fcvtd  $dst, $src \t// d2f" %}
13663 
13664   ins_encode %{
13665     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13666   %}
13667 
13668   ins_pipe(fp_d2f);
13669 %}
13670 
13671 instruct convF2D_reg(vRegD dst, vRegF src) %{
13672   match(Set dst (ConvF2D src));
13673 
13674   ins_cost(INSN_COST * 5);
13675   format %{ "fcvts  $dst, $src \t// f2d" %}
13676 
13677   ins_encode %{
13678     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13679   %}
13680 
13681   ins_pipe(fp_f2d);
13682 %}
13683 
13684 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13685   match(Set dst (ConvF2I src));
13686 
13687   ins_cost(INSN_COST * 5);
13688   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13689 
13690   ins_encode %{
13691     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13692   %}
13693 
13694   ins_pipe(fp_f2i);
13695 %}
13696 
13697 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13698   match(Set dst (ConvF2L src));
13699 
13700   ins_cost(INSN_COST * 5);
13701   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13702 
13703   ins_encode %{
13704     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13705   %}
13706 
13707   ins_pipe(fp_f2l);
13708 %}
13709 
13710 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13711   match(Set dst (ConvI2F src));
13712 
13713   ins_cost(INSN_COST * 5);
13714   format %{ "scvtfws  $dst, $src \t// i2f" %}
13715 
13716   ins_encode %{
13717     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13718   %}
13719 
13720   ins_pipe(fp_i2f);
13721 %}
13722 
13723 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13724   match(Set dst (ConvL2F src));
13725 
13726   ins_cost(INSN_COST * 5);
13727   format %{ "scvtfs  $dst, $src \t// l2f" %}
13728 
13729   ins_encode %{
13730     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13731   %}
13732 
13733   ins_pipe(fp_l2f);
13734 %}
13735 
13736 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13737   match(Set dst (ConvD2I src));
13738 
13739   ins_cost(INSN_COST * 5);
13740   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13741 
13742   ins_encode %{
13743     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13744   %}
13745 
13746   ins_pipe(fp_d2i);
13747 %}
13748 
13749 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13750   match(Set dst (ConvD2L src));
13751 
13752   ins_cost(INSN_COST * 5);
13753   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13754 
13755   ins_encode %{
13756     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13757   %}
13758 
13759   ins_pipe(fp_d2l);
13760 %}
13761 
13762 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13763   match(Set dst (ConvI2D src));
13764 
13765   ins_cost(INSN_COST * 5);
13766   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13767 
13768   ins_encode %{
13769     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13770   %}
13771 
13772   ins_pipe(fp_i2d);
13773 %}
13774 
13775 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13776   match(Set dst (ConvL2D src));
13777 
13778   ins_cost(INSN_COST * 5);
13779   format %{ "scvtfd  $dst, $src \t// l2d" %}
13780 
13781   ins_encode %{
13782     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13783   %}
13784 
13785   ins_pipe(fp_l2d);
13786 %}
13787 
13788 // stack <-> reg and reg <-> reg shuffles with no conversion
13789 
13790 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13791 
13792   match(Set dst (MoveF2I src));
13793 
13794   effect(DEF dst, USE src);
13795 
13796   ins_cost(4 * INSN_COST);
13797 
13798   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13799 
13800   ins_encode %{
13801     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13802   %}
13803 
13804   ins_pipe(iload_reg_reg);
13805 
13806 %}
13807 
13808 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13809 
13810   match(Set dst (MoveI2F src));
13811 
13812   effect(DEF dst, USE src);
13813 
13814   ins_cost(4 * INSN_COST);
13815 
13816   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13817 
13818   ins_encode %{
13819     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13820   %}
13821 
13822   ins_pipe(pipe_class_memory);
13823 
13824 %}
13825 
13826 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13827 
13828   match(Set dst (MoveD2L src));
13829 
13830   effect(DEF dst, USE src);
13831 
13832   ins_cost(4 * INSN_COST);
13833 
13834   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13835 
13836   ins_encode %{
13837     __ ldr($dst$$Register, Address(sp, $src$$disp));
13838   %}
13839 
13840   ins_pipe(iload_reg_reg);
13841 
13842 %}
13843 
13844 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13845 
13846   match(Set dst (MoveL2D src));
13847 
13848   effect(DEF dst, USE src);
13849 
13850   ins_cost(4 * INSN_COST);
13851 
13852   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13853 
13854   ins_encode %{
13855     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13856   %}
13857 
13858   ins_pipe(pipe_class_memory);
13859 
13860 %}
13861 
13862 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13863 
13864   match(Set dst (MoveF2I src));
13865 
13866   effect(DEF dst, USE src);
13867 
13868   ins_cost(INSN_COST);
13869 
13870   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13871 
13872   ins_encode %{
13873     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13874   %}
13875 
13876   ins_pipe(pipe_class_memory);
13877 
13878 %}
13879 
13880 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13881 
13882   match(Set dst (MoveI2F src));
13883 
13884   effect(DEF dst, USE src);
13885 
13886   ins_cost(INSN_COST);
13887 
13888   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13889 
13890   ins_encode %{
13891     __ strw($src$$Register, Address(sp, $dst$$disp));
13892   %}
13893 
13894   ins_pipe(istore_reg_reg);
13895 
13896 %}
13897 
13898 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13899 
13900   match(Set dst (MoveD2L src));
13901 
13902   effect(DEF dst, USE src);
13903 
13904   ins_cost(INSN_COST);
13905 
13906   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13907 
13908   ins_encode %{
13909     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13910   %}
13911 
13912   ins_pipe(pipe_class_memory);
13913 
13914 %}
13915 
13916 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13917 
13918   match(Set dst (MoveL2D src));
13919 
13920   effect(DEF dst, USE src);
13921 
13922   ins_cost(INSN_COST);
13923 
13924   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13925 
13926   ins_encode %{
13927     __ str($src$$Register, Address(sp, $dst$$disp));
13928   %}
13929 
13930   ins_pipe(istore_reg_reg);
13931 
13932 %}
13933 
13934 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13935 
13936   match(Set dst (MoveF2I src));
13937 
13938   effect(DEF dst, USE src);
13939 
13940   ins_cost(INSN_COST);
13941 
13942   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13943 
13944   ins_encode %{
13945     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13946   %}
13947 
13948   ins_pipe(fp_f2i);
13949 
13950 %}
13951 
13952 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13953 
13954   match(Set dst (MoveI2F src));
13955 
13956   effect(DEF dst, USE src);
13957 
13958   ins_cost(INSN_COST);
13959 
13960   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13961 
13962   ins_encode %{
13963     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13964   %}
13965 
13966   ins_pipe(fp_i2f);
13967 
13968 %}
13969 
13970 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13971 
13972   match(Set dst (MoveD2L src));
13973 
13974   effect(DEF dst, USE src);
13975 
13976   ins_cost(INSN_COST);
13977 
13978   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13979 
13980   ins_encode %{
13981     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13982   %}
13983 
13984   ins_pipe(fp_d2l);
13985 
13986 %}
13987 
13988 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13989 
13990   match(Set dst (MoveL2D src));
13991 
13992   effect(DEF dst, USE src);
13993 
13994   ins_cost(INSN_COST);
13995 
13996   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13997 
13998   ins_encode %{
13999     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14000   %}
14001 
14002   ins_pipe(fp_l2d);
14003 
14004 %}
14005 
14006 // ============================================================================
14007 // clearing of an array
14008 
14009 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14010 %{
14011   match(Set dummy (ClearArray cnt base));
14012   effect(USE_KILL cnt, USE_KILL base);
14013 
14014   ins_cost(4 * INSN_COST);
14015   format %{ "ClearArray $cnt, $base" %}
14016 
14017   ins_encode %{
14018     __ zero_words($base$$Register, $cnt$$Register);
14019   %}
14020 
14021   ins_pipe(pipe_class_memory);
14022 %}
14023 
14024 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14025 %{
14026   predicate((u_int64_t)n->in(2)->get_long()
14027             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14028   match(Set dummy (ClearArray cnt base));
14029   effect(USE_KILL base);
14030 
14031   ins_cost(4 * INSN_COST);
14032   format %{ "ClearArray $cnt, $base" %}
14033 
14034   ins_encode %{
14035     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14036   %}
14037 
14038   ins_pipe(pipe_class_memory);
14039 %}
14040 
14041 // ============================================================================
14042 // Overflow Math Instructions
14043 
14044 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14045 %{
14046   match(Set cr (OverflowAddI op1 op2));
14047 
14048   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14049   ins_cost(INSN_COST);
14050   ins_encode %{
14051     __ cmnw($op1$$Register, $op2$$Register);
14052   %}
14053 
14054   ins_pipe(icmp_reg_reg);
14055 %}
14056 
14057 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14058 %{
14059   match(Set cr (OverflowAddI op1 op2));
14060 
14061   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14062   ins_cost(INSN_COST);
14063   ins_encode %{
14064     __ cmnw($op1$$Register, $op2$$constant);
14065   %}
14066 
14067   ins_pipe(icmp_reg_imm);
14068 %}
14069 
14070 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14071 %{
14072   match(Set cr (OverflowAddL op1 op2));
14073 
14074   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14075   ins_cost(INSN_COST);
14076   ins_encode %{
14077     __ cmn($op1$$Register, $op2$$Register);
14078   %}
14079 
14080   ins_pipe(icmp_reg_reg);
14081 %}
14082 
14083 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14084 %{
14085   match(Set cr (OverflowAddL op1 op2));
14086 
14087   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14088   ins_cost(INSN_COST);
14089   ins_encode %{
14090     __ cmn($op1$$Register, $op2$$constant);
14091   %}
14092 
14093   ins_pipe(icmp_reg_imm);
14094 %}
14095 
14096 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14097 %{
14098   match(Set cr (OverflowSubI op1 op2));
14099 
14100   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14101   ins_cost(INSN_COST);
14102   ins_encode %{
14103     __ cmpw($op1$$Register, $op2$$Register);
14104   %}
14105 
14106   ins_pipe(icmp_reg_reg);
14107 %}
14108 
14109 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14110 %{
14111   match(Set cr (OverflowSubI op1 op2));
14112 
14113   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14114   ins_cost(INSN_COST);
14115   ins_encode %{
14116     __ cmpw($op1$$Register, $op2$$constant);
14117   %}
14118 
14119   ins_pipe(icmp_reg_imm);
14120 %}
14121 
14122 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14123 %{
14124   match(Set cr (OverflowSubL op1 op2));
14125 
14126   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14127   ins_cost(INSN_COST);
14128   ins_encode %{
14129     __ cmp($op1$$Register, $op2$$Register);
14130   %}
14131 
14132   ins_pipe(icmp_reg_reg);
14133 %}
14134 
14135 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14136 %{
14137   match(Set cr (OverflowSubL op1 op2));
14138 
14139   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14140   ins_cost(INSN_COST);
14141   ins_encode %{
14142     __ cmp($op1$$Register, $op2$$constant);
14143   %}
14144 
14145   ins_pipe(icmp_reg_imm);
14146 %}
14147 
14148 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14149 %{
14150   match(Set cr (OverflowSubI zero op1));
14151 
14152   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14153   ins_cost(INSN_COST);
14154   ins_encode %{
14155     __ cmpw(zr, $op1$$Register);
14156   %}
14157 
14158   ins_pipe(icmp_reg_imm);
14159 %}
14160 
14161 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14162 %{
14163   match(Set cr (OverflowSubL zero op1));
14164 
14165   format %{ "cmp   zr, $op1\t# overflow check long" %}
14166   ins_cost(INSN_COST);
14167   ins_encode %{
14168     __ cmp(zr, $op1$$Register);
14169   %}
14170 
14171   ins_pipe(icmp_reg_imm);
14172 %}
14173 
14174 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14175 %{
14176   match(Set cr (OverflowMulI op1 op2));
14177 
14178   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14179             "cmp   rscratch1, rscratch1, sxtw\n\t"
14180             "movw  rscratch1, #0x80000000\n\t"
14181             "cselw rscratch1, rscratch1, zr, NE\n\t"
14182             "cmpw  rscratch1, #1" %}
14183   ins_cost(5 * INSN_COST);
14184   ins_encode %{
14185     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14186     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14187     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14188     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14189     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14190   %}
14191 
14192   ins_pipe(pipe_slow);
14193 %}
14194 
14195 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14196 %{
14197   match(If cmp (OverflowMulI op1 op2));
14198   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14199             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14200   effect(USE labl, KILL cr);
14201 
14202   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14203             "cmp   rscratch1, rscratch1, sxtw\n\t"
14204             "b$cmp   $labl" %}
14205   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14206   ins_encode %{
14207     Label* L = $labl$$label;
14208     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14209     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14210     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14211     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14212   %}
14213 
14214   ins_pipe(pipe_serial);
14215 %}
14216 
14217 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14218 %{
14219   match(Set cr (OverflowMulL op1 op2));
14220 
14221   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14222             "smulh rscratch2, $op1, $op2\n\t"
14223             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14224             "movw  rscratch1, #0x80000000\n\t"
14225             "cselw rscratch1, rscratch1, zr, NE\n\t"
14226             "cmpw  rscratch1, #1" %}
14227   ins_cost(6 * INSN_COST);
14228   ins_encode %{
14229     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14230     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14231     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14232     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14233     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14234     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14235   %}
14236 
14237   ins_pipe(pipe_slow);
14238 %}
14239 
14240 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14241 %{
14242   match(If cmp (OverflowMulL op1 op2));
14243   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14244             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14245   effect(USE labl, KILL cr);
14246 
14247   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14248             "smulh rscratch2, $op1, $op2\n\t"
14249             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14250             "b$cmp $labl" %}
14251   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14252   ins_encode %{
14253     Label* L = $labl$$label;
14254     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14255     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14256     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14257     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14258     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14259   %}
14260 
14261   ins_pipe(pipe_serial);
14262 %}
14263 
14264 // ============================================================================
14265 // Compare Instructions
14266 
14267 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14268 %{
14269   match(Set cr (CmpI op1 op2));
14270 
14271   effect(DEF cr, USE op1, USE op2);
14272 
14273   ins_cost(INSN_COST);
14274   format %{ "cmpw  $op1, $op2" %}
14275 
14276   ins_encode(aarch64_enc_cmpw(op1, op2));
14277 
14278   ins_pipe(icmp_reg_reg);
14279 %}
14280 
14281 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14282 %{
14283   match(Set cr (CmpI op1 zero));
14284 
14285   effect(DEF cr, USE op1);
14286 
14287   ins_cost(INSN_COST);
14288   format %{ "cmpw $op1, 0" %}
14289 
14290   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14291 
14292   ins_pipe(icmp_reg_imm);
14293 %}
14294 
14295 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14296 %{
14297   match(Set cr (CmpI op1 op2));
14298 
14299   effect(DEF cr, USE op1);
14300 
14301   ins_cost(INSN_COST);
14302   format %{ "cmpw  $op1, $op2" %}
14303 
14304   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14305 
14306   ins_pipe(icmp_reg_imm);
14307 %}
14308 
14309 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14310 %{
14311   match(Set cr (CmpI op1 op2));
14312 
14313   effect(DEF cr, USE op1);
14314 
14315   ins_cost(INSN_COST * 2);
14316   format %{ "cmpw  $op1, $op2" %}
14317 
14318   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14319 
14320   ins_pipe(icmp_reg_imm);
14321 %}
14322 
14323 // Unsigned compare Instructions; really, same as signed compare
14324 // except it should only be used to feed an If or a CMovI which takes a
14325 // cmpOpU.
14326 
14327 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14328 %{
14329   match(Set cr (CmpU op1 op2));
14330 
14331   effect(DEF cr, USE op1, USE op2);
14332 
14333   ins_cost(INSN_COST);
14334   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14335 
14336   ins_encode(aarch64_enc_cmpw(op1, op2));
14337 
14338   ins_pipe(icmp_reg_reg);
14339 %}
14340 
14341 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14342 %{
14343   match(Set cr (CmpU op1 zero));
14344 
14345   effect(DEF cr, USE op1);
14346 
14347   ins_cost(INSN_COST);
14348   format %{ "cmpw $op1, #0\t# unsigned" %}
14349 
14350   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14351 
14352   ins_pipe(icmp_reg_imm);
14353 %}
14354 
14355 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14356 %{
14357   match(Set cr (CmpU op1 op2));
14358 
14359   effect(DEF cr, USE op1);
14360 
14361   ins_cost(INSN_COST);
14362   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14363 
14364   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14365 
14366   ins_pipe(icmp_reg_imm);
14367 %}
14368 
14369 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14370 %{
14371   match(Set cr (CmpU op1 op2));
14372 
14373   effect(DEF cr, USE op1);
14374 
14375   ins_cost(INSN_COST * 2);
14376   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14377 
14378   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14379 
14380   ins_pipe(icmp_reg_imm);
14381 %}
14382 
14383 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14384 %{
14385   match(Set cr (CmpL op1 op2));
14386 
14387   effect(DEF cr, USE op1, USE op2);
14388 
14389   ins_cost(INSN_COST);
14390   format %{ "cmp  $op1, $op2" %}
14391 
14392   ins_encode(aarch64_enc_cmp(op1, op2));
14393 
14394   ins_pipe(icmp_reg_reg);
14395 %}
14396 
14397 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
14398 %{
14399   match(Set cr (CmpL op1 zero));
14400 
14401   effect(DEF cr, USE op1);
14402 
14403   ins_cost(INSN_COST);
14404   format %{ "tst  $op1" %}
14405 
14406   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14407 
14408   ins_pipe(icmp_reg_imm);
14409 %}
14410 
14411 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14412 %{
14413   match(Set cr (CmpL op1 op2));
14414 
14415   effect(DEF cr, USE op1);
14416 
14417   ins_cost(INSN_COST);
14418   format %{ "cmp  $op1, $op2" %}
14419 
14420   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14421 
14422   ins_pipe(icmp_reg_imm);
14423 %}
14424 
14425 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14426 %{
14427   match(Set cr (CmpL op1 op2));
14428 
14429   effect(DEF cr, USE op1);
14430 
14431   ins_cost(INSN_COST * 2);
14432   format %{ "cmp  $op1, $op2" %}
14433 
14434   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14435 
14436   ins_pipe(icmp_reg_imm);
14437 %}
14438 
14439 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14440 %{
14441   match(Set cr (CmpP op1 op2));
14442 
14443   effect(DEF cr, USE op1, USE op2);
14444 
14445   ins_cost(INSN_COST);
14446   format %{ "cmp  $op1, $op2\t // ptr" %}
14447 
14448   ins_encode(aarch64_enc_cmpp(op1, op2));
14449 
14450   ins_pipe(icmp_reg_reg);
14451 %}
14452 
14453 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14454 %{
14455   match(Set cr (CmpN op1 op2));
14456 
14457   effect(DEF cr, USE op1, USE op2);
14458 
14459   ins_cost(INSN_COST);
14460   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14461 
14462   ins_encode(aarch64_enc_cmpn(op1, op2));
14463 
14464   ins_pipe(icmp_reg_reg);
14465 %}
14466 
14467 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14468 %{
14469   match(Set cr (CmpP op1 zero));
14470 
14471   effect(DEF cr, USE op1, USE zero);
14472 
14473   ins_cost(INSN_COST);
14474   format %{ "cmp  $op1, 0\t // ptr" %}
14475 
14476   ins_encode(aarch64_enc_testp(op1));
14477 
14478   ins_pipe(icmp_reg_imm);
14479 %}
14480 
14481 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14482 %{
14483   match(Set cr (CmpN op1 zero));
14484 
14485   effect(DEF cr, USE op1, USE zero);
14486 
14487   ins_cost(INSN_COST);
14488   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14489 
14490   ins_encode(aarch64_enc_testn(op1));
14491 
14492   ins_pipe(icmp_reg_imm);
14493 %}
14494 
14495 // FP comparisons
14496 //
14497 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14498 // using normal cmpOp. See declaration of rFlagsReg for details.
14499 
14500 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14501 %{
14502   match(Set cr (CmpF src1 src2));
14503 
14504   ins_cost(3 * INSN_COST);
14505   format %{ "fcmps $src1, $src2" %}
14506 
14507   ins_encode %{
14508     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14509   %}
14510 
14511   ins_pipe(pipe_class_compare);
14512 %}
14513 
14514 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14515 %{
14516   match(Set cr (CmpF src1 src2));
14517 
14518   ins_cost(3 * INSN_COST);
14519   format %{ "fcmps $src1, 0.0" %}
14520 
14521   ins_encode %{
14522     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14523   %}
14524 
14525   ins_pipe(pipe_class_compare);
14526 %}
14527 // FROM HERE
14528 
14529 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14530 %{
14531   match(Set cr (CmpD src1 src2));
14532 
14533   ins_cost(3 * INSN_COST);
14534   format %{ "fcmpd $src1, $src2" %}
14535 
14536   ins_encode %{
14537     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14538   %}
14539 
14540   ins_pipe(pipe_class_compare);
14541 %}
14542 
14543 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14544 %{
14545   match(Set cr (CmpD src1 src2));
14546 
14547   ins_cost(3 * INSN_COST);
14548   format %{ "fcmpd $src1, 0.0" %}
14549 
14550   ins_encode %{
14551     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14552   %}
14553 
14554   ins_pipe(pipe_class_compare);
14555 %}
14556 
14557 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14558 %{
14559   match(Set dst (CmpF3 src1 src2));
14560   effect(KILL cr);
14561 
14562   ins_cost(5 * INSN_COST);
14563   format %{ "fcmps $src1, $src2\n\t"
14564             "csinvw($dst, zr, zr, eq\n\t"
14565             "csnegw($dst, $dst, $dst, lt)"
14566   %}
14567 
14568   ins_encode %{
14569     Label done;
14570     FloatRegister s1 = as_FloatRegister($src1$$reg);
14571     FloatRegister s2 = as_FloatRegister($src2$$reg);
14572     Register d = as_Register($dst$$reg);
14573     __ fcmps(s1, s2);
14574     // installs 0 if EQ else -1
14575     __ csinvw(d, zr, zr, Assembler::EQ);
14576     // keeps -1 if less or unordered else installs 1
14577     __ csnegw(d, d, d, Assembler::LT);
14578     __ bind(done);
14579   %}
14580 
14581   ins_pipe(pipe_class_default);
14582 
14583 %}
14584 
14585 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14586 %{
14587   match(Set dst (CmpD3 src1 src2));
14588   effect(KILL cr);
14589 
14590   ins_cost(5 * INSN_COST);
14591   format %{ "fcmpd $src1, $src2\n\t"
14592             "csinvw($dst, zr, zr, eq\n\t"
14593             "csnegw($dst, $dst, $dst, lt)"
14594   %}
14595 
14596   ins_encode %{
14597     Label done;
14598     FloatRegister s1 = as_FloatRegister($src1$$reg);
14599     FloatRegister s2 = as_FloatRegister($src2$$reg);
14600     Register d = as_Register($dst$$reg);
14601     __ fcmpd(s1, s2);
14602     // installs 0 if EQ else -1
14603     __ csinvw(d, zr, zr, Assembler::EQ);
14604     // keeps -1 if less or unordered else installs 1
14605     __ csnegw(d, d, d, Assembler::LT);
14606     __ bind(done);
14607   %}
14608   ins_pipe(pipe_class_default);
14609 
14610 %}
14611 
14612 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14613 %{
14614   match(Set dst (CmpF3 src1 zero));
14615   effect(KILL cr);
14616 
14617   ins_cost(5 * INSN_COST);
14618   format %{ "fcmps $src1, 0.0\n\t"
14619             "csinvw($dst, zr, zr, eq\n\t"
14620             "csnegw($dst, $dst, $dst, lt)"
14621   %}
14622 
14623   ins_encode %{
14624     Label done;
14625     FloatRegister s1 = as_FloatRegister($src1$$reg);
14626     Register d = as_Register($dst$$reg);
14627     __ fcmps(s1, 0.0D);
14628     // installs 0 if EQ else -1
14629     __ csinvw(d, zr, zr, Assembler::EQ);
14630     // keeps -1 if less or unordered else installs 1
14631     __ csnegw(d, d, d, Assembler::LT);
14632     __ bind(done);
14633   %}
14634 
14635   ins_pipe(pipe_class_default);
14636 
14637 %}
14638 
14639 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14640 %{
14641   match(Set dst (CmpD3 src1 zero));
14642   effect(KILL cr);
14643 
14644   ins_cost(5 * INSN_COST);
14645   format %{ "fcmpd $src1, 0.0\n\t"
14646             "csinvw($dst, zr, zr, eq\n\t"
14647             "csnegw($dst, $dst, $dst, lt)"
14648   %}
14649 
14650   ins_encode %{
14651     Label done;
14652     FloatRegister s1 = as_FloatRegister($src1$$reg);
14653     Register d = as_Register($dst$$reg);
14654     __ fcmpd(s1, 0.0D);
14655     // installs 0 if EQ else -1
14656     __ csinvw(d, zr, zr, Assembler::EQ);
14657     // keeps -1 if less or unordered else installs 1
14658     __ csnegw(d, d, d, Assembler::LT);
14659     __ bind(done);
14660   %}
14661   ins_pipe(pipe_class_default);
14662 
14663 %}
14664 
14665 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14666 %{
14667   match(Set dst (CmpLTMask p q));
14668   effect(KILL cr);
14669 
14670   ins_cost(3 * INSN_COST);
14671 
14672   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14673             "csetw $dst, lt\n\t"
14674             "subw $dst, zr, $dst"
14675   %}
14676 
14677   ins_encode %{
14678     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14679     __ csetw(as_Register($dst$$reg), Assembler::LT);
14680     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14681   %}
14682 
14683   ins_pipe(ialu_reg_reg);
14684 %}
14685 
14686 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14687 %{
14688   match(Set dst (CmpLTMask src zero));
14689   effect(KILL cr);
14690 
14691   ins_cost(INSN_COST);
14692 
14693   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14694 
14695   ins_encode %{
14696     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14697   %}
14698 
14699   ins_pipe(ialu_reg_shift);
14700 %}
14701 
14702 // ============================================================================
14703 // Max and Min
14704 
14705 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14706 %{
14707   match(Set dst (MinI src1 src2));
14708 
14709   effect(DEF dst, USE src1, USE src2, KILL cr);
14710   size(8);
14711 
14712   ins_cost(INSN_COST * 3);
14713   format %{
14714     "cmpw $src1 $src2\t signed int\n\t"
14715     "cselw $dst, $src1, $src2 lt\t"
14716   %}
14717 
14718   ins_encode %{
14719     __ cmpw(as_Register($src1$$reg),
14720             as_Register($src2$$reg));
14721     __ cselw(as_Register($dst$$reg),
14722              as_Register($src1$$reg),
14723              as_Register($src2$$reg),
14724              Assembler::LT);
14725   %}
14726 
14727   ins_pipe(ialu_reg_reg);
14728 %}
14729 // FROM HERE
14730 
14731 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14732 %{
14733   match(Set dst (MaxI src1 src2));
14734 
14735   effect(DEF dst, USE src1, USE src2, KILL cr);
14736   size(8);
14737 
14738   ins_cost(INSN_COST * 3);
14739   format %{
14740     "cmpw $src1 $src2\t signed int\n\t"
14741     "cselw $dst, $src1, $src2 gt\t"
14742   %}
14743 
14744   ins_encode %{
14745     __ cmpw(as_Register($src1$$reg),
14746             as_Register($src2$$reg));
14747     __ cselw(as_Register($dst$$reg),
14748              as_Register($src1$$reg),
14749              as_Register($src2$$reg),
14750              Assembler::GT);
14751   %}
14752 
14753   ins_pipe(ialu_reg_reg);
14754 %}
14755 
14756 // ============================================================================
14757 // Branch Instructions
14758 
14759 // Direct Branch.
14760 instruct branch(label lbl)
14761 %{
14762   match(Goto);
14763 
14764   effect(USE lbl);
14765 
14766   ins_cost(BRANCH_COST);
14767   format %{ "b  $lbl" %}
14768 
14769   ins_encode(aarch64_enc_b(lbl));
14770 
14771   ins_pipe(pipe_branch);
14772 %}
14773 
14774 // Conditional Near Branch
14775 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14776 %{
14777   // Same match rule as `branchConFar'.
14778   match(If cmp cr);
14779 
14780   effect(USE lbl);
14781 
14782   ins_cost(BRANCH_COST);
14783   // If set to 1 this indicates that the current instruction is a
14784   // short variant of a long branch. This avoids using this
14785   // instruction in first-pass matching. It will then only be used in
14786   // the `Shorten_branches' pass.
14787   // ins_short_branch(1);
14788   format %{ "b$cmp  $lbl" %}
14789 
14790   ins_encode(aarch64_enc_br_con(cmp, lbl));
14791 
14792   ins_pipe(pipe_branch_cond);
14793 %}
14794 
14795 // Conditional Near Branch Unsigned
14796 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14797 %{
14798   // Same match rule as `branchConFar'.
14799   match(If cmp cr);
14800 
14801   effect(USE lbl);
14802 
14803   ins_cost(BRANCH_COST);
14804   // If set to 1 this indicates that the current instruction is a
14805   // short variant of a long branch. This avoids using this
14806   // instruction in first-pass matching. It will then only be used in
14807   // the `Shorten_branches' pass.
14808   // ins_short_branch(1);
14809   format %{ "b$cmp  $lbl\t# unsigned" %}
14810 
14811   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14812 
14813   ins_pipe(pipe_branch_cond);
14814 %}
14815 
14816 // Make use of CBZ and CBNZ.  These instructions, as well as being
14817 // shorter than (cmp; branch), have the additional benefit of not
14818 // killing the flags.
14819 
14820 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14821   match(If cmp (CmpI op1 op2));
14822   effect(USE labl);
14823 
14824   ins_cost(BRANCH_COST);
14825   format %{ "cbw$cmp   $op1, $labl" %}
14826   ins_encode %{
14827     Label* L = $labl$$label;
14828     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14829     if (cond == Assembler::EQ)
14830       __ cbzw($op1$$Register, *L);
14831     else
14832       __ cbnzw($op1$$Register, *L);
14833   %}
14834   ins_pipe(pipe_cmp_branch);
14835 %}
14836 
14837 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14838   match(If cmp (CmpL op1 op2));
14839   effect(USE labl);
14840 
14841   ins_cost(BRANCH_COST);
14842   format %{ "cb$cmp   $op1, $labl" %}
14843   ins_encode %{
14844     Label* L = $labl$$label;
14845     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14846     if (cond == Assembler::EQ)
14847       __ cbz($op1$$Register, *L);
14848     else
14849       __ cbnz($op1$$Register, *L);
14850   %}
14851   ins_pipe(pipe_cmp_branch);
14852 %}
14853 
14854 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14855   match(If cmp (CmpP op1 op2));
14856   effect(USE labl);
14857 
14858   ins_cost(BRANCH_COST);
14859   format %{ "cb$cmp   $op1, $labl" %}
14860   ins_encode %{
14861     Label* L = $labl$$label;
14862     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14863     if (cond == Assembler::EQ)
14864       __ cbz($op1$$Register, *L);
14865     else
14866       __ cbnz($op1$$Register, *L);
14867   %}
14868   ins_pipe(pipe_cmp_branch);
14869 %}
14870 
14871 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14872   match(If cmp (CmpN op1 op2));
14873   effect(USE labl);
14874 
14875   ins_cost(BRANCH_COST);
14876   format %{ "cbw$cmp   $op1, $labl" %}
14877   ins_encode %{
14878     Label* L = $labl$$label;
14879     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14880     if (cond == Assembler::EQ)
14881       __ cbzw($op1$$Register, *L);
14882     else
14883       __ cbnzw($op1$$Register, *L);
14884   %}
14885   ins_pipe(pipe_cmp_branch);
14886 %}
14887 
14888 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14889   match(If cmp (CmpP (DecodeN oop) zero));
14890   effect(USE labl);
14891 
14892   ins_cost(BRANCH_COST);
14893   format %{ "cb$cmp   $oop, $labl" %}
14894   ins_encode %{
14895     Label* L = $labl$$label;
14896     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14897     if (cond == Assembler::EQ)
14898       __ cbzw($oop$$Register, *L);
14899     else
14900       __ cbnzw($oop$$Register, *L);
14901   %}
14902   ins_pipe(pipe_cmp_branch);
14903 %}
14904 
14905 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14906   match(If cmp (CmpU op1 op2));
14907   effect(USE labl);
14908 
14909   ins_cost(BRANCH_COST);
14910   format %{ "cbw$cmp   $op1, $labl" %}
14911   ins_encode %{
14912     Label* L = $labl$$label;
14913     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14914     if (cond == Assembler::EQ || cond == Assembler::LS)
14915       __ cbzw($op1$$Register, *L);
14916     else
14917       __ cbnzw($op1$$Register, *L);
14918   %}
14919   ins_pipe(pipe_cmp_branch);
14920 %}
14921 
14922 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14923   match(If cmp (CmpU op1 op2));
14924   effect(USE labl);
14925 
14926   ins_cost(BRANCH_COST);
14927   format %{ "cb$cmp   $op1, $labl" %}
14928   ins_encode %{
14929     Label* L = $labl$$label;
14930     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14931     if (cond == Assembler::EQ || cond == Assembler::LS)
14932       __ cbz($op1$$Register, *L);
14933     else
14934       __ cbnz($op1$$Register, *L);
14935   %}
14936   ins_pipe(pipe_cmp_branch);
14937 %}
14938 
14939 // Test bit and Branch
14940 
14941 // Patterns for short (< 32KiB) variants
14942 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14943   match(If cmp (CmpL op1 op2));
14944   effect(USE labl);
14945 
14946   ins_cost(BRANCH_COST);
14947   format %{ "cb$cmp   $op1, $labl # long" %}
14948   ins_encode %{
14949     Label* L = $labl$$label;
14950     Assembler::Condition cond =
14951       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14952     __ tbr(cond, $op1$$Register, 63, *L);
14953   %}
14954   ins_pipe(pipe_cmp_branch);
14955   ins_short_branch(1);
14956 %}
14957 
14958 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14959   match(If cmp (CmpI op1 op2));
14960   effect(USE labl);
14961 
14962   ins_cost(BRANCH_COST);
14963   format %{ "cb$cmp   $op1, $labl # int" %}
14964   ins_encode %{
14965     Label* L = $labl$$label;
14966     Assembler::Condition cond =
14967       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14968     __ tbr(cond, $op1$$Register, 31, *L);
14969   %}
14970   ins_pipe(pipe_cmp_branch);
14971   ins_short_branch(1);
14972 %}
14973 
14974 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14975   match(If cmp (CmpL (AndL op1 op2) op3));
14976   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14977   effect(USE labl);
14978 
14979   ins_cost(BRANCH_COST);
14980   format %{ "tb$cmp   $op1, $op2, $labl" %}
14981   ins_encode %{
14982     Label* L = $labl$$label;
14983     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14984     int bit = exact_log2($op2$$constant);
14985     __ tbr(cond, $op1$$Register, bit, *L);
14986   %}
14987   ins_pipe(pipe_cmp_branch);
14988   ins_short_branch(1);
14989 %}
14990 
14991 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14992   match(If cmp (CmpI (AndI op1 op2) op3));
14993   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14994   effect(USE labl);
14995 
14996   ins_cost(BRANCH_COST);
14997   format %{ "tb$cmp   $op1, $op2, $labl" %}
14998   ins_encode %{
14999     Label* L = $labl$$label;
15000     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15001     int bit = exact_log2($op2$$constant);
15002     __ tbr(cond, $op1$$Register, bit, *L);
15003   %}
15004   ins_pipe(pipe_cmp_branch);
15005   ins_short_branch(1);
15006 %}
15007 
15008 // And far variants
15009 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15010   match(If cmp (CmpL op1 op2));
15011   effect(USE labl);
15012 
15013   ins_cost(BRANCH_COST);
15014   format %{ "cb$cmp   $op1, $labl # long" %}
15015   ins_encode %{
15016     Label* L = $labl$$label;
15017     Assembler::Condition cond =
15018       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15019     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15020   %}
15021   ins_pipe(pipe_cmp_branch);
15022 %}
15023 
15024 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15025   match(If cmp (CmpI op1 op2));
15026   effect(USE labl);
15027 
15028   ins_cost(BRANCH_COST);
15029   format %{ "cb$cmp   $op1, $labl # int" %}
15030   ins_encode %{
15031     Label* L = $labl$$label;
15032     Assembler::Condition cond =
15033       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15034     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15035   %}
15036   ins_pipe(pipe_cmp_branch);
15037 %}
15038 
15039 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15040   match(If cmp (CmpL (AndL op1 op2) op3));
15041   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15042   effect(USE labl);
15043 
15044   ins_cost(BRANCH_COST);
15045   format %{ "tb$cmp   $op1, $op2, $labl" %}
15046   ins_encode %{
15047     Label* L = $labl$$label;
15048     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15049     int bit = exact_log2($op2$$constant);
15050     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15051   %}
15052   ins_pipe(pipe_cmp_branch);
15053 %}
15054 
15055 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15056   match(If cmp (CmpI (AndI op1 op2) op3));
15057   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15058   effect(USE labl);
15059 
15060   ins_cost(BRANCH_COST);
15061   format %{ "tb$cmp   $op1, $op2, $labl" %}
15062   ins_encode %{
15063     Label* L = $labl$$label;
15064     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15065     int bit = exact_log2($op2$$constant);
15066     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15067   %}
15068   ins_pipe(pipe_cmp_branch);
15069 %}
15070 
15071 // Test bits
15072 
15073 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15074   match(Set cr (CmpL (AndL op1 op2) op3));
15075   predicate(Assembler::operand_valid_for_logical_immediate
15076             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15077 
15078   ins_cost(INSN_COST);
15079   format %{ "tst $op1, $op2 # long" %}
15080   ins_encode %{
15081     __ tst($op1$$Register, $op2$$constant);
15082   %}
15083   ins_pipe(ialu_reg_reg);
15084 %}
15085 
15086 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15087   match(Set cr (CmpI (AndI op1 op2) op3));
15088   predicate(Assembler::operand_valid_for_logical_immediate
15089             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15090 
15091   ins_cost(INSN_COST);
15092   format %{ "tst $op1, $op2 # int" %}
15093   ins_encode %{
15094     __ tstw($op1$$Register, $op2$$constant);
15095   %}
15096   ins_pipe(ialu_reg_reg);
15097 %}
15098 
15099 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15100   match(Set cr (CmpL (AndL op1 op2) op3));
15101 
15102   ins_cost(INSN_COST);
15103   format %{ "tst $op1, $op2 # long" %}
15104   ins_encode %{
15105     __ tst($op1$$Register, $op2$$Register);
15106   %}
15107   ins_pipe(ialu_reg_reg);
15108 %}
15109 
15110 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15111   match(Set cr (CmpI (AndI op1 op2) op3));
15112 
15113   ins_cost(INSN_COST);
15114   format %{ "tstw $op1, $op2 # int" %}
15115   ins_encode %{
15116     __ tstw($op1$$Register, $op2$$Register);
15117   %}
15118   ins_pipe(ialu_reg_reg);
15119 %}
15120 
15121 
15122 // Conditional Far Branch
15123 // Conditional Far Branch Unsigned
15124 // TODO: fixme
15125 
15126 // counted loop end branch near
15127 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15128 %{
15129   match(CountedLoopEnd cmp cr);
15130 
15131   effect(USE lbl);
15132 
15133   ins_cost(BRANCH_COST);
15134   // short variant.
15135   // ins_short_branch(1);
15136   format %{ "b$cmp $lbl \t// counted loop end" %}
15137 
15138   ins_encode(aarch64_enc_br_con(cmp, lbl));
15139 
15140   ins_pipe(pipe_branch);
15141 %}
15142 
15143 // counted loop end branch near Unsigned
15144 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15145 %{
15146   match(CountedLoopEnd cmp cr);
15147 
15148   effect(USE lbl);
15149 
15150   ins_cost(BRANCH_COST);
15151   // short variant.
15152   // ins_short_branch(1);
15153   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15154 
15155   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15156 
15157   ins_pipe(pipe_branch);
15158 %}
15159 
15160 // counted loop end branch far
15161 // counted loop end branch far unsigned
15162 // TODO: fixme
15163 
15164 // ============================================================================
15165 // inlined locking and unlocking
15166 
15167 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15168 %{
15169   match(Set cr (FastLock object box));
15170   effect(TEMP tmp, TEMP tmp2);
15171 
15172   // TODO
15173   // identify correct cost
15174   ins_cost(5 * INSN_COST);
15175   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15176 
15177   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15178 
15179   ins_pipe(pipe_serial);
15180 %}
15181 
15182 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15183 %{
15184   match(Set cr (FastUnlock object box));
15185   effect(TEMP tmp, TEMP tmp2);
15186 
15187   ins_cost(5 * INSN_COST);
15188   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15189 
15190   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15191 
15192   ins_pipe(pipe_serial);
15193 %}
15194 
15195 
15196 // ============================================================================
15197 // Safepoint Instructions
15198 
15199 // TODO
15200 // provide a near and far version of this code
15201 
15202 instruct safePoint(iRegP poll)
15203 %{
15204   match(SafePoint poll);
15205 
15206   format %{
15207     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15208   %}
15209   ins_encode %{
15210     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15211   %}
15212   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15213 %}
15214 
15215 
15216 // ============================================================================
15217 // Procedure Call/Return Instructions
15218 
15219 // Call Java Static Instruction
15220 
15221 instruct CallStaticJavaDirect(method meth)
15222 %{
15223   match(CallStaticJava);
15224 
15225   effect(USE meth);
15226 
15227   ins_cost(CALL_COST);
15228 
15229   format %{ "call,static $meth \t// ==> " %}
15230 
15231   ins_encode( aarch64_enc_java_static_call(meth),
15232               aarch64_enc_call_epilog );
15233 
15234   ins_pipe(pipe_class_call);
15235 %}
15236 
15237 // TO HERE
15238 
15239 // Call Java Dynamic Instruction
15240 instruct CallDynamicJavaDirect(method meth)
15241 %{
15242   match(CallDynamicJava);
15243 
15244   effect(USE meth);
15245 
15246   ins_cost(CALL_COST);
15247 
15248   format %{ "CALL,dynamic $meth \t// ==> " %}
15249 
15250   ins_encode( aarch64_enc_java_dynamic_call(meth),
15251                aarch64_enc_call_epilog );
15252 
15253   ins_pipe(pipe_class_call);
15254 %}
15255 
15256 // Call Runtime Instruction
15257 
15258 instruct CallRuntimeDirect(method meth)
15259 %{
15260   match(CallRuntime);
15261 
15262   effect(USE meth);
15263 
15264   ins_cost(CALL_COST);
15265 
15266   format %{ "CALL, runtime $meth" %}
15267 
15268   ins_encode( aarch64_enc_java_to_runtime(meth) );
15269 
15270   ins_pipe(pipe_class_call);
15271 %}
15272 
15273 // Call Runtime Instruction
15274 
15275 instruct CallLeafDirect(method meth)
15276 %{
15277   match(CallLeaf);
15278 
15279   effect(USE meth);
15280 
15281   ins_cost(CALL_COST);
15282 
15283   format %{ "CALL, runtime leaf $meth" %}
15284 
15285   ins_encode( aarch64_enc_java_to_runtime(meth) );
15286 
15287   ins_pipe(pipe_class_call);
15288 %}
15289 
15290 // Call Runtime Instruction
15291 
15292 instruct CallLeafNoFPDirect(method meth)
15293 %{
15294   match(CallLeafNoFP);
15295 
15296   effect(USE meth);
15297 
15298   ins_cost(CALL_COST);
15299 
15300   format %{ "CALL, runtime leaf nofp $meth" %}
15301 
15302   ins_encode( aarch64_enc_java_to_runtime(meth) );
15303 
15304   ins_pipe(pipe_class_call);
15305 %}
15306 
15307 // Tail Call; Jump from runtime stub to Java code.
15308 // Also known as an 'interprocedural jump'.
15309 // Target of jump will eventually return to caller.
15310 // TailJump below removes the return address.
15311 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15312 %{
15313   match(TailCall jump_target method_oop);
15314 
15315   ins_cost(CALL_COST);
15316 
15317   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15318 
15319   ins_encode(aarch64_enc_tail_call(jump_target));
15320 
15321   ins_pipe(pipe_class_call);
15322 %}
15323 
15324 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15325 %{
15326   match(TailJump jump_target ex_oop);
15327 
15328   ins_cost(CALL_COST);
15329 
15330   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15331 
15332   ins_encode(aarch64_enc_tail_jmp(jump_target));
15333 
15334   ins_pipe(pipe_class_call);
15335 %}
15336 
15337 // Create exception oop: created by stack-crawling runtime code.
15338 // Created exception is now available to this handler, and is setup
15339 // just prior to jumping to this handler. No code emitted.
15340 // TODO check
15341 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15342 instruct CreateException(iRegP_R0 ex_oop)
15343 %{
15344   match(Set ex_oop (CreateEx));
15345 
15346   format %{ " -- \t// exception oop; no code emitted" %}
15347 
15348   size(0);
15349 
15350   ins_encode( /*empty*/ );
15351 
15352   ins_pipe(pipe_class_empty);
15353 %}
15354 
15355 // Rethrow exception: The exception oop will come in the first
15356 // argument position. Then JUMP (not call) to the rethrow stub code.
15357 instruct RethrowException() %{
15358   match(Rethrow);
15359   ins_cost(CALL_COST);
15360 
15361   format %{ "b rethrow_stub" %}
15362 
15363   ins_encode( aarch64_enc_rethrow() );
15364 
15365   ins_pipe(pipe_class_call);
15366 %}
15367 
15368 
15369 // Return Instruction
15370 // epilog node loads ret address into lr as part of frame pop
15371 instruct Ret()
15372 %{
15373   match(Return);
15374 
15375   format %{ "ret\t// return register" %}
15376 
15377   ins_encode( aarch64_enc_ret() );
15378 
15379   ins_pipe(pipe_branch);
15380 %}
15381 
15382 // Die now.
15383 instruct ShouldNotReachHere() %{
15384   match(Halt);
15385 
15386   ins_cost(CALL_COST);
15387   format %{ "ShouldNotReachHere" %}
15388 
15389   ins_encode %{
15390     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15391     // return true
15392     __ dpcs1(0xdead + 1);
15393   %}
15394 
15395   ins_pipe(pipe_class_default);
15396 %}
15397 
15398 // ============================================================================
15399 // Partial Subtype Check
15400 //
15401 // superklass array for an instance of the superklass.  Set a hidden
15402 // internal cache on a hit (cache is checked with exposed code in
15403 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15404 // encoding ALSO sets flags.
15405 
15406 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15407 %{
15408   match(Set result (PartialSubtypeCheck sub super));
15409   effect(KILL cr, KILL temp);
15410 
15411   ins_cost(1100);  // slightly larger than the next version
15412   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15413 
15414   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15415 
15416   opcode(0x1); // Force zero of result reg on hit
15417 
15418   ins_pipe(pipe_class_memory);
15419 %}
15420 
15421 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15422 %{
15423   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15424   effect(KILL temp, KILL result);
15425 
15426   ins_cost(1100);  // slightly larger than the next version
15427   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15428 
15429   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15430 
15431   opcode(0x0); // Don't zero result reg on hit
15432 
15433   ins_pipe(pipe_class_memory);
15434 %}
15435 
15436 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15437                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15438 %{
15439   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15440   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15441   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15442 
15443   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15444   ins_encode %{
15445     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15446     __ string_compare($str1$$Register, $str2$$Register,
15447                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15448                       $tmp1$$Register,
15449                       fnoreg, fnoreg, StrIntrinsicNode::UU);
15450   %}
15451   ins_pipe(pipe_class_memory);
15452 %}
15453 
15454 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15455                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15456 %{
15457   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15458   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15459   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15460 
15461   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15462   ins_encode %{
15463     __ string_compare($str1$$Register, $str2$$Register,
15464                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15465                       $tmp1$$Register,
15466                       fnoreg, fnoreg, StrIntrinsicNode::LL);
15467   %}
15468   ins_pipe(pipe_class_memory);
15469 %}
15470 
15471 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15472                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15473 %{
15474   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15475   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15476   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15477 
15478   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15479   ins_encode %{
15480     __ string_compare($str1$$Register, $str2$$Register,
15481                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15482                       $tmp1$$Register,
15483                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
15484   %}
15485   ins_pipe(pipe_class_memory);
15486 %}
15487 
15488 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15489                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15490 %{
15491   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15492   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15493   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15494 
15495   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15496   ins_encode %{
15497     __ string_compare($str1$$Register, $str2$$Register,
15498                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15499                       $tmp1$$Register,
15500                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
15501   %}
15502   ins_pipe(pipe_class_memory);
15503 %}
15504 
15505 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15506        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15507 %{
15508   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15509   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15510   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15511          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15512   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15513 
15514   ins_encode %{
15515     __ string_indexof($str1$$Register, $str2$$Register,
15516                       $cnt1$$Register, $cnt2$$Register,
15517                       $tmp1$$Register, $tmp2$$Register,
15518                       $tmp3$$Register, $tmp4$$Register,
15519                       -1, $result$$Register, StrIntrinsicNode::UU);
15520   %}
15521   ins_pipe(pipe_class_memory);
15522 %}
15523 
15524 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15525        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15526 %{
15527   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15528   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15529   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15530          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15531   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15532 
15533   ins_encode %{
15534     __ string_indexof($str1$$Register, $str2$$Register,
15535                       $cnt1$$Register, $cnt2$$Register,
15536                       $tmp1$$Register, $tmp2$$Register,
15537                       $tmp3$$Register, $tmp4$$Register,
15538                       -1, $result$$Register, StrIntrinsicNode::LL);
15539   %}
15540   ins_pipe(pipe_class_memory);
15541 %}
15542 
15543 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15544        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15545 %{
15546   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15547   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15548   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15549          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15550   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15551 
15552   ins_encode %{
15553     __ string_indexof($str1$$Register, $str2$$Register,
15554                       $cnt1$$Register, $cnt2$$Register,
15555                       $tmp1$$Register, $tmp2$$Register,
15556                       $tmp3$$Register, $tmp4$$Register,
15557                       -1, $result$$Register, StrIntrinsicNode::UL);
15558   %}
15559   ins_pipe(pipe_class_memory);
15560 %}
15561 
15562 instruct string_indexofLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15563        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15564 %{
15565   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
15566   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15567   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15568          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15569   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LU)" %}
15570 
15571   ins_encode %{
15572     __ string_indexof($str1$$Register, $str2$$Register,
15573                       $cnt1$$Register, $cnt2$$Register,
15574                       $tmp1$$Register, $tmp2$$Register,
15575                       $tmp3$$Register, $tmp4$$Register,
15576                       -1, $result$$Register, StrIntrinsicNode::LU);
15577   %}
15578   ins_pipe(pipe_class_memory);
15579 %}
15580 
15581 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15582                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15583                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15584 %{
15585   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15586   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15587   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15588          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15589   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15590 
15591   ins_encode %{
15592     int icnt2 = (int)$int_cnt2$$constant;
15593     __ string_indexof($str1$$Register, $str2$$Register,
15594                       $cnt1$$Register, zr,
15595                       $tmp1$$Register, $tmp2$$Register,
15596                       $tmp3$$Register, $tmp4$$Register,
15597                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15598   %}
15599   ins_pipe(pipe_class_memory);
15600 %}
15601 
15602 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15603                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15604                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15605 %{
15606   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15607   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15608   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15609          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15610   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15611 
15612   ins_encode %{
15613     int icnt2 = (int)$int_cnt2$$constant;
15614     __ string_indexof($str1$$Register, $str2$$Register,
15615                       $cnt1$$Register, zr,
15616                       $tmp1$$Register, $tmp2$$Register,
15617                       $tmp3$$Register, $tmp4$$Register,
15618                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15619   %}
15620   ins_pipe(pipe_class_memory);
15621 %}
15622 
15623 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15624                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15625                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15626 %{
15627   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15628   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15629   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15630          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15631   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15632 
15633   ins_encode %{
15634     int icnt2 = (int)$int_cnt2$$constant;
15635     __ string_indexof($str1$$Register, $str2$$Register,
15636                       $cnt1$$Register, zr,
15637                       $tmp1$$Register, $tmp2$$Register,
15638                       $tmp3$$Register, $tmp4$$Register,
15639                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15640   %}
15641   ins_pipe(pipe_class_memory);
15642 %}
15643 
15644 instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15645                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15646                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15647 %{
15648   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
15649   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15650   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15651          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15652   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LU)" %}
15653 
15654   ins_encode %{
15655     int icnt2 = (int)$int_cnt2$$constant;
15656     __ string_indexof($str1$$Register, $str2$$Register,
15657                       $cnt1$$Register, zr,
15658                       $tmp1$$Register, $tmp2$$Register,
15659                       $tmp3$$Register, $tmp4$$Register,
15660                       icnt2, $result$$Register, StrIntrinsicNode::LU);
15661   %}
15662   ins_pipe(pipe_class_memory);
15663 %}
15664 
15665 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15666                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15667                               iRegINoSp tmp3, rFlagsReg cr)
15668 %{
15669   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15670   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15671          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15672 
15673   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15674 
15675   ins_encode %{
15676     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15677                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15678                            $tmp3$$Register);
15679   %}
15680   ins_pipe(pipe_class_memory);
15681 %}
15682 
15683 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15684                         iRegI_R0 result, rFlagsReg cr)
15685 %{
15686   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15687   match(Set result (StrEquals (Binary str1 str2) cnt));
15688   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15689 
15690   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15691   ins_encode %{
15692     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15693     __ arrays_equals($str1$$Register, $str2$$Register,
15694                      $result$$Register, $cnt$$Register,
15695                      1, /*is_string*/true);
15696   %}
15697   ins_pipe(pipe_class_memory);
15698 %}
15699 
15700 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15701                         iRegI_R0 result, rFlagsReg cr)
15702 %{
15703   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15704   match(Set result (StrEquals (Binary str1 str2) cnt));
15705   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15706 
15707   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15708   ins_encode %{
15709     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15710     __ asrw($cnt$$Register, $cnt$$Register, 1);
15711     __ arrays_equals($str1$$Register, $str2$$Register,
15712                      $result$$Register, $cnt$$Register,
15713                      2, /*is_string*/true);
15714   %}
15715   ins_pipe(pipe_class_memory);
15716 %}
15717 
15718 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15719                       iRegP_R10 tmp, rFlagsReg cr)
15720 %{
15721   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15722   match(Set result (AryEq ary1 ary2));
15723   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15724 
15725   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15726   ins_encode %{
15727     __ arrays_equals($ary1$$Register, $ary2$$Register,
15728                      $result$$Register, $tmp$$Register,
15729                      1, /*is_string*/false);
15730     %}
15731   ins_pipe(pipe_class_memory);
15732 %}
15733 
15734 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15735                       iRegP_R10 tmp, rFlagsReg cr)
15736 %{
15737   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15738   match(Set result (AryEq ary1 ary2));
15739   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15740 
15741   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15742   ins_encode %{
15743     __ arrays_equals($ary1$$Register, $ary2$$Register,
15744                      $result$$Register, $tmp$$Register,
15745                      2, /*is_string*/false);
15746   %}
15747   ins_pipe(pipe_class_memory);
15748 %}
15749 
15750 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15751 %{
15752   match(Set result (HasNegatives ary1 len));
15753   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15754   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15755   ins_encode %{
15756     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15757   %}
15758   ins_pipe( pipe_slow );
15759 %}
15760 
15761 // fast char[] to byte[] compression
15762 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15763                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15764                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15765                          iRegI_R0 result, rFlagsReg cr)
15766 %{
15767   match(Set result (StrCompressedCopy src (Binary dst len)));
15768   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15769 
15770   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15771   ins_encode %{
15772     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15773                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15774                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15775                            $result$$Register);
15776   %}
15777   ins_pipe( pipe_slow );
15778 %}
15779 
15780 // fast byte[] to char[] inflation
15781 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15782                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15783 %{
15784   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15785   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15786 
15787   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15788   ins_encode %{
15789     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15790                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15791   %}
15792   ins_pipe(pipe_class_memory);
15793 %}
15794 
15795 // encode char[] to byte[] in ISO_8859_1
15796 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15797                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15798                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15799                           iRegI_R0 result, rFlagsReg cr)
15800 %{
15801   match(Set result (EncodeISOArray src (Binary dst len)));
15802   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15803          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15804 
15805   format %{ "Encode array $src,$dst,$len -> $result" %}
15806   ins_encode %{
15807     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15808          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15809          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15810   %}
15811   ins_pipe( pipe_class_memory );
15812 %}
15813 
15814 // ============================================================================
15815 // This name is KNOWN by the ADLC and cannot be changed.
15816 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15817 // for this guy.
15818 instruct tlsLoadP(thread_RegP dst)
15819 %{
15820   match(Set dst (ThreadLocal));
15821 
15822   ins_cost(0);
15823 
15824   format %{ " -- \t// $dst=Thread::current(), empty" %}
15825 
15826   size(0);
15827 
15828   ins_encode( /*empty*/ );
15829 
15830   ins_pipe(pipe_class_empty);
15831 %}
15832 
15833 // ====================VECTOR INSTRUCTIONS=====================================
15834 
15835 // Load vector (32 bits)
15836 instruct loadV4(vecD dst, vmem4 mem)
15837 %{
15838   predicate(n->as_LoadVector()->memory_size() == 4);
15839   match(Set dst (LoadVector mem));
15840   ins_cost(4 * INSN_COST);
15841   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15842   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15843   ins_pipe(vload_reg_mem64);
15844 %}
15845 
15846 // Load vector (64 bits)
15847 instruct loadV8(vecD dst, vmem8 mem)
15848 %{
15849   predicate(n->as_LoadVector()->memory_size() == 8);
15850   match(Set dst (LoadVector mem));
15851   ins_cost(4 * INSN_COST);
15852   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15853   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15854   ins_pipe(vload_reg_mem64);
15855 %}
15856 
15857 // Load Vector (128 bits)
15858 instruct loadV16(vecX dst, vmem16 mem)
15859 %{
15860   predicate(n->as_LoadVector()->memory_size() == 16);
15861   match(Set dst (LoadVector mem));
15862   ins_cost(4 * INSN_COST);
15863   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15864   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15865   ins_pipe(vload_reg_mem128);
15866 %}
15867 
15868 // Store Vector (32 bits)
15869 instruct storeV4(vecD src, vmem4 mem)
15870 %{
15871   predicate(n->as_StoreVector()->memory_size() == 4);
15872   match(Set mem (StoreVector mem src));
15873   ins_cost(4 * INSN_COST);
15874   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15875   ins_encode( aarch64_enc_strvS(src, mem) );
15876   ins_pipe(vstore_reg_mem64);
15877 %}
15878 
15879 // Store Vector (64 bits)
15880 instruct storeV8(vecD src, vmem8 mem)
15881 %{
15882   predicate(n->as_StoreVector()->memory_size() == 8);
15883   match(Set mem (StoreVector mem src));
15884   ins_cost(4 * INSN_COST);
15885   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15886   ins_encode( aarch64_enc_strvD(src, mem) );
15887   ins_pipe(vstore_reg_mem64);
15888 %}
15889 
15890 // Store Vector (128 bits)
15891 instruct storeV16(vecX src, vmem16 mem)
15892 %{
15893   predicate(n->as_StoreVector()->memory_size() == 16);
15894   match(Set mem (StoreVector mem src));
15895   ins_cost(4 * INSN_COST);
15896   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15897   ins_encode( aarch64_enc_strvQ(src, mem) );
15898   ins_pipe(vstore_reg_mem128);
15899 %}
15900 
15901 instruct replicate8B(vecD dst, iRegIorL2I src)
15902 %{
15903   predicate(n->as_Vector()->length() == 4 ||
15904             n->as_Vector()->length() == 8);
15905   match(Set dst (ReplicateB src));
15906   ins_cost(INSN_COST);
15907   format %{ "dup  $dst, $src\t# vector (8B)" %}
15908   ins_encode %{
15909     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15910   %}
15911   ins_pipe(vdup_reg_reg64);
15912 %}
15913 
15914 instruct replicate16B(vecX dst, iRegIorL2I src)
15915 %{
15916   predicate(n->as_Vector()->length() == 16);
15917   match(Set dst (ReplicateB src));
15918   ins_cost(INSN_COST);
15919   format %{ "dup  $dst, $src\t# vector (16B)" %}
15920   ins_encode %{
15921     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15922   %}
15923   ins_pipe(vdup_reg_reg128);
15924 %}
15925 
15926 instruct replicate8B_imm(vecD dst, immI con)
15927 %{
15928   predicate(n->as_Vector()->length() == 4 ||
15929             n->as_Vector()->length() == 8);
15930   match(Set dst (ReplicateB con));
15931   ins_cost(INSN_COST);
15932   format %{ "movi  $dst, $con\t# vector(8B)" %}
15933   ins_encode %{
15934     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15935   %}
15936   ins_pipe(vmovi_reg_imm64);
15937 %}
15938 
15939 instruct replicate16B_imm(vecX dst, immI con)
15940 %{
15941   predicate(n->as_Vector()->length() == 16);
15942   match(Set dst (ReplicateB con));
15943   ins_cost(INSN_COST);
15944   format %{ "movi  $dst, $con\t# vector(16B)" %}
15945   ins_encode %{
15946     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15947   %}
15948   ins_pipe(vmovi_reg_imm128);
15949 %}
15950 
15951 instruct replicate4S(vecD dst, iRegIorL2I src)
15952 %{
15953   predicate(n->as_Vector()->length() == 2 ||
15954             n->as_Vector()->length() == 4);
15955   match(Set dst (ReplicateS src));
15956   ins_cost(INSN_COST);
15957   format %{ "dup  $dst, $src\t# vector (4S)" %}
15958   ins_encode %{
15959     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15960   %}
15961   ins_pipe(vdup_reg_reg64);
15962 %}
15963 
15964 instruct replicate8S(vecX dst, iRegIorL2I src)
15965 %{
15966   predicate(n->as_Vector()->length() == 8);
15967   match(Set dst (ReplicateS src));
15968   ins_cost(INSN_COST);
15969   format %{ "dup  $dst, $src\t# vector (8S)" %}
15970   ins_encode %{
15971     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15972   %}
15973   ins_pipe(vdup_reg_reg128);
15974 %}
15975 
15976 instruct replicate4S_imm(vecD dst, immI con)
15977 %{
15978   predicate(n->as_Vector()->length() == 2 ||
15979             n->as_Vector()->length() == 4);
15980   match(Set dst (ReplicateS con));
15981   ins_cost(INSN_COST);
15982   format %{ "movi  $dst, $con\t# vector(4H)" %}
15983   ins_encode %{
15984     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15985   %}
15986   ins_pipe(vmovi_reg_imm64);
15987 %}
15988 
15989 instruct replicate8S_imm(vecX dst, immI con)
15990 %{
15991   predicate(n->as_Vector()->length() == 8);
15992   match(Set dst (ReplicateS con));
15993   ins_cost(INSN_COST);
15994   format %{ "movi  $dst, $con\t# vector(8H)" %}
15995   ins_encode %{
15996     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15997   %}
15998   ins_pipe(vmovi_reg_imm128);
15999 %}
16000 
16001 instruct replicate2I(vecD dst, iRegIorL2I src)
16002 %{
16003   predicate(n->as_Vector()->length() == 2);
16004   match(Set dst (ReplicateI src));
16005   ins_cost(INSN_COST);
16006   format %{ "dup  $dst, $src\t# vector (2I)" %}
16007   ins_encode %{
16008     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16009   %}
16010   ins_pipe(vdup_reg_reg64);
16011 %}
16012 
16013 instruct replicate4I(vecX dst, iRegIorL2I src)
16014 %{
16015   predicate(n->as_Vector()->length() == 4);
16016   match(Set dst (ReplicateI src));
16017   ins_cost(INSN_COST);
16018   format %{ "dup  $dst, $src\t# vector (4I)" %}
16019   ins_encode %{
16020     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16021   %}
16022   ins_pipe(vdup_reg_reg128);
16023 %}
16024 
16025 instruct replicate2I_imm(vecD dst, immI con)
16026 %{
16027   predicate(n->as_Vector()->length() == 2);
16028   match(Set dst (ReplicateI con));
16029   ins_cost(INSN_COST);
16030   format %{ "movi  $dst, $con\t# vector(2I)" %}
16031   ins_encode %{
16032     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16033   %}
16034   ins_pipe(vmovi_reg_imm64);
16035 %}
16036 
16037 instruct replicate4I_imm(vecX dst, immI con)
16038 %{
16039   predicate(n->as_Vector()->length() == 4);
16040   match(Set dst (ReplicateI con));
16041   ins_cost(INSN_COST);
16042   format %{ "movi  $dst, $con\t# vector(4I)" %}
16043   ins_encode %{
16044     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16045   %}
16046   ins_pipe(vmovi_reg_imm128);
16047 %}
16048 
16049 instruct replicate2L(vecX dst, iRegL src)
16050 %{
16051   predicate(n->as_Vector()->length() == 2);
16052   match(Set dst (ReplicateL src));
16053   ins_cost(INSN_COST);
16054   format %{ "dup  $dst, $src\t# vector (2L)" %}
16055   ins_encode %{
16056     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16057   %}
16058   ins_pipe(vdup_reg_reg128);
16059 %}
16060 
16061 instruct replicate2L_zero(vecX dst, immI0 zero)
16062 %{
16063   predicate(n->as_Vector()->length() == 2);
16064   match(Set dst (ReplicateI zero));
16065   ins_cost(INSN_COST);
16066   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16067   ins_encode %{
16068     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16069            as_FloatRegister($dst$$reg),
16070            as_FloatRegister($dst$$reg));
16071   %}
16072   ins_pipe(vmovi_reg_imm128);
16073 %}
16074 
16075 instruct replicate2F(vecD dst, vRegF src)
16076 %{
16077   predicate(n->as_Vector()->length() == 2);
16078   match(Set dst (ReplicateF src));
16079   ins_cost(INSN_COST);
16080   format %{ "dup  $dst, $src\t# vector (2F)" %}
16081   ins_encode %{
16082     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16083            as_FloatRegister($src$$reg));
16084   %}
16085   ins_pipe(vdup_reg_freg64);
16086 %}
16087 
16088 instruct replicate4F(vecX dst, vRegF src)
16089 %{
16090   predicate(n->as_Vector()->length() == 4);
16091   match(Set dst (ReplicateF src));
16092   ins_cost(INSN_COST);
16093   format %{ "dup  $dst, $src\t# vector (4F)" %}
16094   ins_encode %{
16095     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16096            as_FloatRegister($src$$reg));
16097   %}
16098   ins_pipe(vdup_reg_freg128);
16099 %}
16100 
16101 instruct replicate2D(vecX dst, vRegD src)
16102 %{
16103   predicate(n->as_Vector()->length() == 2);
16104   match(Set dst (ReplicateD src));
16105   ins_cost(INSN_COST);
16106   format %{ "dup  $dst, $src\t# vector (2D)" %}
16107   ins_encode %{
16108     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16109            as_FloatRegister($src$$reg));
16110   %}
16111   ins_pipe(vdup_reg_dreg128);
16112 %}
16113 
16114 // ====================REDUCTION ARITHMETIC====================================
16115 
16116 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16117 %{
16118   match(Set dst (AddReductionVI src1 src2));
16119   ins_cost(INSN_COST);
16120   effect(TEMP tmp, TEMP tmp2);
16121   format %{ "umov  $tmp, $src2, S, 0\n\t"
16122             "umov  $tmp2, $src2, S, 1\n\t"
16123             "addw  $dst, $src1, $tmp\n\t"
16124             "addw  $dst, $dst, $tmp2\t add reduction2i"
16125   %}
16126   ins_encode %{
16127     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16128     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16129     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16130     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16131   %}
16132   ins_pipe(pipe_class_default);
16133 %}
16134 
16135 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16136 %{
16137   match(Set dst (AddReductionVI src1 src2));
16138   ins_cost(INSN_COST);
16139   effect(TEMP tmp, TEMP tmp2);
16140   format %{ "addv  $tmp, T4S, $src2\n\t"
16141             "umov  $tmp2, $tmp, S, 0\n\t"
16142             "addw  $dst, $tmp2, $src1\t add reduction4i"
16143   %}
16144   ins_encode %{
16145     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16146             as_FloatRegister($src2$$reg));
16147     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16148     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16149   %}
16150   ins_pipe(pipe_class_default);
16151 %}
16152 
16153 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16154 %{
16155   match(Set dst (MulReductionVI src1 src2));
16156   ins_cost(INSN_COST);
16157   effect(TEMP tmp, TEMP dst);
16158   format %{ "umov  $tmp, $src2, S, 0\n\t"
16159             "mul   $dst, $tmp, $src1\n\t"
16160             "umov  $tmp, $src2, S, 1\n\t"
16161             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16162   %}
16163   ins_encode %{
16164     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16165     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16166     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16167     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16168   %}
16169   ins_pipe(pipe_class_default);
16170 %}
16171 
16172 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16173 %{
16174   match(Set dst (MulReductionVI src1 src2));
16175   ins_cost(INSN_COST);
16176   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16177   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16178             "mul   $tmp, $tmp, $src2\n\t"
16179             "umov  $tmp2, $tmp, S, 0\n\t"
16180             "mul   $dst, $tmp2, $src1\n\t"
16181             "umov  $tmp2, $tmp, S, 1\n\t"
16182             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16183   %}
16184   ins_encode %{
16185     __ ins(as_FloatRegister($tmp$$reg), __ D,
16186            as_FloatRegister($src2$$reg), 0, 1);
16187     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16188            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16189     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16190     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16191     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16192     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16193   %}
16194   ins_pipe(pipe_class_default);
16195 %}
16196 
16197 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16198 %{
16199   match(Set dst (AddReductionVF src1 src2));
16200   ins_cost(INSN_COST);
16201   effect(TEMP tmp, TEMP dst);
16202   format %{ "fadds $dst, $src1, $src2\n\t"
16203             "ins   $tmp, S, $src2, 0, 1\n\t"
16204             "fadds $dst, $dst, $tmp\t add reduction2f"
16205   %}
16206   ins_encode %{
16207     __ fadds(as_FloatRegister($dst$$reg),
16208              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16209     __ ins(as_FloatRegister($tmp$$reg), __ S,
16210            as_FloatRegister($src2$$reg), 0, 1);
16211     __ fadds(as_FloatRegister($dst$$reg),
16212              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16213   %}
16214   ins_pipe(pipe_class_default);
16215 %}
16216 
16217 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16218 %{
16219   match(Set dst (AddReductionVF src1 src2));
16220   ins_cost(INSN_COST);
16221   effect(TEMP tmp, TEMP dst);
16222   format %{ "fadds $dst, $src1, $src2\n\t"
16223             "ins   $tmp, S, $src2, 0, 1\n\t"
16224             "fadds $dst, $dst, $tmp\n\t"
16225             "ins   $tmp, S, $src2, 0, 2\n\t"
16226             "fadds $dst, $dst, $tmp\n\t"
16227             "ins   $tmp, S, $src2, 0, 3\n\t"
16228             "fadds $dst, $dst, $tmp\t add reduction4f"
16229   %}
16230   ins_encode %{
16231     __ fadds(as_FloatRegister($dst$$reg),
16232              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16233     __ ins(as_FloatRegister($tmp$$reg), __ S,
16234            as_FloatRegister($src2$$reg), 0, 1);
16235     __ fadds(as_FloatRegister($dst$$reg),
16236              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16237     __ ins(as_FloatRegister($tmp$$reg), __ S,
16238            as_FloatRegister($src2$$reg), 0, 2);
16239     __ fadds(as_FloatRegister($dst$$reg),
16240              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16241     __ ins(as_FloatRegister($tmp$$reg), __ S,
16242            as_FloatRegister($src2$$reg), 0, 3);
16243     __ fadds(as_FloatRegister($dst$$reg),
16244              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16245   %}
16246   ins_pipe(pipe_class_default);
16247 %}
16248 
16249 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16250 %{
16251   match(Set dst (MulReductionVF src1 src2));
16252   ins_cost(INSN_COST);
16253   effect(TEMP tmp, TEMP dst);
16254   format %{ "fmuls $dst, $src1, $src2\n\t"
16255             "ins   $tmp, S, $src2, 0, 1\n\t"
16256             "fmuls $dst, $dst, $tmp\t add reduction4f"
16257   %}
16258   ins_encode %{
16259     __ fmuls(as_FloatRegister($dst$$reg),
16260              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16261     __ ins(as_FloatRegister($tmp$$reg), __ S,
16262            as_FloatRegister($src2$$reg), 0, 1);
16263     __ fmuls(as_FloatRegister($dst$$reg),
16264              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16265   %}
16266   ins_pipe(pipe_class_default);
16267 %}
16268 
16269 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16270 %{
16271   match(Set dst (MulReductionVF src1 src2));
16272   ins_cost(INSN_COST);
16273   effect(TEMP tmp, TEMP dst);
16274   format %{ "fmuls $dst, $src1, $src2\n\t"
16275             "ins   $tmp, S, $src2, 0, 1\n\t"
16276             "fmuls $dst, $dst, $tmp\n\t"
16277             "ins   $tmp, S, $src2, 0, 2\n\t"
16278             "fmuls $dst, $dst, $tmp\n\t"
16279             "ins   $tmp, S, $src2, 0, 3\n\t"
16280             "fmuls $dst, $dst, $tmp\t add reduction4f"
16281   %}
16282   ins_encode %{
16283     __ fmuls(as_FloatRegister($dst$$reg),
16284              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16285     __ ins(as_FloatRegister($tmp$$reg), __ S,
16286            as_FloatRegister($src2$$reg), 0, 1);
16287     __ fmuls(as_FloatRegister($dst$$reg),
16288              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16289     __ ins(as_FloatRegister($tmp$$reg), __ S,
16290            as_FloatRegister($src2$$reg), 0, 2);
16291     __ fmuls(as_FloatRegister($dst$$reg),
16292              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16293     __ ins(as_FloatRegister($tmp$$reg), __ S,
16294            as_FloatRegister($src2$$reg), 0, 3);
16295     __ fmuls(as_FloatRegister($dst$$reg),
16296              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16297   %}
16298   ins_pipe(pipe_class_default);
16299 %}
16300 
16301 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16302 %{
16303   match(Set dst (AddReductionVD src1 src2));
16304   ins_cost(INSN_COST);
16305   effect(TEMP tmp, TEMP dst);
16306   format %{ "faddd $dst, $src1, $src2\n\t"
16307             "ins   $tmp, D, $src2, 0, 1\n\t"
16308             "faddd $dst, $dst, $tmp\t add reduction2d"
16309   %}
16310   ins_encode %{
16311     __ faddd(as_FloatRegister($dst$$reg),
16312              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16313     __ ins(as_FloatRegister($tmp$$reg), __ D,
16314            as_FloatRegister($src2$$reg), 0, 1);
16315     __ faddd(as_FloatRegister($dst$$reg),
16316              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16317   %}
16318   ins_pipe(pipe_class_default);
16319 %}
16320 
16321 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16322 %{
16323   match(Set dst (MulReductionVD src1 src2));
16324   ins_cost(INSN_COST);
16325   effect(TEMP tmp, TEMP dst);
16326   format %{ "fmuld $dst, $src1, $src2\n\t"
16327             "ins   $tmp, D, $src2, 0, 1\n\t"
16328             "fmuld $dst, $dst, $tmp\t add reduction2d"
16329   %}
16330   ins_encode %{
16331     __ fmuld(as_FloatRegister($dst$$reg),
16332              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16333     __ ins(as_FloatRegister($tmp$$reg), __ D,
16334            as_FloatRegister($src2$$reg), 0, 1);
16335     __ fmuld(as_FloatRegister($dst$$reg),
16336              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16337   %}
16338   ins_pipe(pipe_class_default);
16339 %}
16340 
16341 // ====================VECTOR ARITHMETIC=======================================
16342 
16343 // --------------------------------- ADD --------------------------------------
16344 
16345 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16346 %{
16347   predicate(n->as_Vector()->length() == 4 ||
16348             n->as_Vector()->length() == 8);
16349   match(Set dst (AddVB src1 src2));
16350   ins_cost(INSN_COST);
16351   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16352   ins_encode %{
16353     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16354             as_FloatRegister($src1$$reg),
16355             as_FloatRegister($src2$$reg));
16356   %}
16357   ins_pipe(vdop64);
16358 %}
16359 
16360 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16361 %{
16362   predicate(n->as_Vector()->length() == 16);
16363   match(Set dst (AddVB src1 src2));
16364   ins_cost(INSN_COST);
16365   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16366   ins_encode %{
16367     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16368             as_FloatRegister($src1$$reg),
16369             as_FloatRegister($src2$$reg));
16370   %}
16371   ins_pipe(vdop128);
16372 %}
16373 
16374 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16375 %{
16376   predicate(n->as_Vector()->length() == 2 ||
16377             n->as_Vector()->length() == 4);
16378   match(Set dst (AddVS src1 src2));
16379   ins_cost(INSN_COST);
16380   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16381   ins_encode %{
16382     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16383             as_FloatRegister($src1$$reg),
16384             as_FloatRegister($src2$$reg));
16385   %}
16386   ins_pipe(vdop64);
16387 %}
16388 
16389 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16390 %{
16391   predicate(n->as_Vector()->length() == 8);
16392   match(Set dst (AddVS src1 src2));
16393   ins_cost(INSN_COST);
16394   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16395   ins_encode %{
16396     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16397             as_FloatRegister($src1$$reg),
16398             as_FloatRegister($src2$$reg));
16399   %}
16400   ins_pipe(vdop128);
16401 %}
16402 
16403 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16404 %{
16405   predicate(n->as_Vector()->length() == 2);
16406   match(Set dst (AddVI src1 src2));
16407   ins_cost(INSN_COST);
16408   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16409   ins_encode %{
16410     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16411             as_FloatRegister($src1$$reg),
16412             as_FloatRegister($src2$$reg));
16413   %}
16414   ins_pipe(vdop64);
16415 %}
16416 
16417 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16418 %{
16419   predicate(n->as_Vector()->length() == 4);
16420   match(Set dst (AddVI src1 src2));
16421   ins_cost(INSN_COST);
16422   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16423   ins_encode %{
16424     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16425             as_FloatRegister($src1$$reg),
16426             as_FloatRegister($src2$$reg));
16427   %}
16428   ins_pipe(vdop128);
16429 %}
16430 
16431 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16432 %{
16433   predicate(n->as_Vector()->length() == 2);
16434   match(Set dst (AddVL src1 src2));
16435   ins_cost(INSN_COST);
16436   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16437   ins_encode %{
16438     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16439             as_FloatRegister($src1$$reg),
16440             as_FloatRegister($src2$$reg));
16441   %}
16442   ins_pipe(vdop128);
16443 %}
16444 
16445 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16446 %{
16447   predicate(n->as_Vector()->length() == 2);
16448   match(Set dst (AddVF src1 src2));
16449   ins_cost(INSN_COST);
16450   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16451   ins_encode %{
16452     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16453             as_FloatRegister($src1$$reg),
16454             as_FloatRegister($src2$$reg));
16455   %}
16456   ins_pipe(vdop_fp64);
16457 %}
16458 
16459 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16460 %{
16461   predicate(n->as_Vector()->length() == 4);
16462   match(Set dst (AddVF src1 src2));
16463   ins_cost(INSN_COST);
16464   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16465   ins_encode %{
16466     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16467             as_FloatRegister($src1$$reg),
16468             as_FloatRegister($src2$$reg));
16469   %}
16470   ins_pipe(vdop_fp128);
16471 %}
16472 
16473 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16474 %{
16475   match(Set dst (AddVD src1 src2));
16476   ins_cost(INSN_COST);
16477   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16478   ins_encode %{
16479     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16480             as_FloatRegister($src1$$reg),
16481             as_FloatRegister($src2$$reg));
16482   %}
16483   ins_pipe(vdop_fp128);
16484 %}
16485 
16486 // --------------------------------- SUB --------------------------------------
16487 
16488 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16489 %{
16490   predicate(n->as_Vector()->length() == 4 ||
16491             n->as_Vector()->length() == 8);
16492   match(Set dst (SubVB src1 src2));
16493   ins_cost(INSN_COST);
16494   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16495   ins_encode %{
16496     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16497             as_FloatRegister($src1$$reg),
16498             as_FloatRegister($src2$$reg));
16499   %}
16500   ins_pipe(vdop64);
16501 %}
16502 
16503 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16504 %{
16505   predicate(n->as_Vector()->length() == 16);
16506   match(Set dst (SubVB src1 src2));
16507   ins_cost(INSN_COST);
16508   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16509   ins_encode %{
16510     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16511             as_FloatRegister($src1$$reg),
16512             as_FloatRegister($src2$$reg));
16513   %}
16514   ins_pipe(vdop128);
16515 %}
16516 
16517 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16518 %{
16519   predicate(n->as_Vector()->length() == 2 ||
16520             n->as_Vector()->length() == 4);
16521   match(Set dst (SubVS src1 src2));
16522   ins_cost(INSN_COST);
16523   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16524   ins_encode %{
16525     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16526             as_FloatRegister($src1$$reg),
16527             as_FloatRegister($src2$$reg));
16528   %}
16529   ins_pipe(vdop64);
16530 %}
16531 
16532 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16533 %{
16534   predicate(n->as_Vector()->length() == 8);
16535   match(Set dst (SubVS src1 src2));
16536   ins_cost(INSN_COST);
16537   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16538   ins_encode %{
16539     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16540             as_FloatRegister($src1$$reg),
16541             as_FloatRegister($src2$$reg));
16542   %}
16543   ins_pipe(vdop128);
16544 %}
16545 
16546 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16547 %{
16548   predicate(n->as_Vector()->length() == 2);
16549   match(Set dst (SubVI src1 src2));
16550   ins_cost(INSN_COST);
16551   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16552   ins_encode %{
16553     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16554             as_FloatRegister($src1$$reg),
16555             as_FloatRegister($src2$$reg));
16556   %}
16557   ins_pipe(vdop64);
16558 %}
16559 
16560 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16561 %{
16562   predicate(n->as_Vector()->length() == 4);
16563   match(Set dst (SubVI src1 src2));
16564   ins_cost(INSN_COST);
16565   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16566   ins_encode %{
16567     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16568             as_FloatRegister($src1$$reg),
16569             as_FloatRegister($src2$$reg));
16570   %}
16571   ins_pipe(vdop128);
16572 %}
16573 
16574 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16575 %{
16576   predicate(n->as_Vector()->length() == 2);
16577   match(Set dst (SubVL src1 src2));
16578   ins_cost(INSN_COST);
16579   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16580   ins_encode %{
16581     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16582             as_FloatRegister($src1$$reg),
16583             as_FloatRegister($src2$$reg));
16584   %}
16585   ins_pipe(vdop128);
16586 %}
16587 
16588 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16589 %{
16590   predicate(n->as_Vector()->length() == 2);
16591   match(Set dst (SubVF src1 src2));
16592   ins_cost(INSN_COST);
16593   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16594   ins_encode %{
16595     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16596             as_FloatRegister($src1$$reg),
16597             as_FloatRegister($src2$$reg));
16598   %}
16599   ins_pipe(vdop_fp64);
16600 %}
16601 
16602 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16603 %{
16604   predicate(n->as_Vector()->length() == 4);
16605   match(Set dst (SubVF src1 src2));
16606   ins_cost(INSN_COST);
16607   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16608   ins_encode %{
16609     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16610             as_FloatRegister($src1$$reg),
16611             as_FloatRegister($src2$$reg));
16612   %}
16613   ins_pipe(vdop_fp128);
16614 %}
16615 
16616 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16617 %{
16618   predicate(n->as_Vector()->length() == 2);
16619   match(Set dst (SubVD src1 src2));
16620   ins_cost(INSN_COST);
16621   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16622   ins_encode %{
16623     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16624             as_FloatRegister($src1$$reg),
16625             as_FloatRegister($src2$$reg));
16626   %}
16627   ins_pipe(vdop_fp128);
16628 %}
16629 
16630 // --------------------------------- MUL --------------------------------------
16631 
16632 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16633 %{
16634   predicate(n->as_Vector()->length() == 2 ||
16635             n->as_Vector()->length() == 4);
16636   match(Set dst (MulVS src1 src2));
16637   ins_cost(INSN_COST);
16638   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16639   ins_encode %{
16640     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16641             as_FloatRegister($src1$$reg),
16642             as_FloatRegister($src2$$reg));
16643   %}
16644   ins_pipe(vmul64);
16645 %}
16646 
16647 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16648 %{
16649   predicate(n->as_Vector()->length() == 8);
16650   match(Set dst (MulVS src1 src2));
16651   ins_cost(INSN_COST);
16652   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16653   ins_encode %{
16654     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16655             as_FloatRegister($src1$$reg),
16656             as_FloatRegister($src2$$reg));
16657   %}
16658   ins_pipe(vmul128);
16659 %}
16660 
16661 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16662 %{
16663   predicate(n->as_Vector()->length() == 2);
16664   match(Set dst (MulVI src1 src2));
16665   ins_cost(INSN_COST);
16666   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16667   ins_encode %{
16668     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16669             as_FloatRegister($src1$$reg),
16670             as_FloatRegister($src2$$reg));
16671   %}
16672   ins_pipe(vmul64);
16673 %}
16674 
16675 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16676 %{
16677   predicate(n->as_Vector()->length() == 4);
16678   match(Set dst (MulVI src1 src2));
16679   ins_cost(INSN_COST);
16680   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16681   ins_encode %{
16682     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16683             as_FloatRegister($src1$$reg),
16684             as_FloatRegister($src2$$reg));
16685   %}
16686   ins_pipe(vmul128);
16687 %}
16688 
16689 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16690 %{
16691   predicate(n->as_Vector()->length() == 2);
16692   match(Set dst (MulVF src1 src2));
16693   ins_cost(INSN_COST);
16694   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16695   ins_encode %{
16696     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16697             as_FloatRegister($src1$$reg),
16698             as_FloatRegister($src2$$reg));
16699   %}
16700   ins_pipe(vmuldiv_fp64);
16701 %}
16702 
16703 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16704 %{
16705   predicate(n->as_Vector()->length() == 4);
16706   match(Set dst (MulVF src1 src2));
16707   ins_cost(INSN_COST);
16708   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16709   ins_encode %{
16710     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16711             as_FloatRegister($src1$$reg),
16712             as_FloatRegister($src2$$reg));
16713   %}
16714   ins_pipe(vmuldiv_fp128);
16715 %}
16716 
16717 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16718 %{
16719   predicate(n->as_Vector()->length() == 2);
16720   match(Set dst (MulVD src1 src2));
16721   ins_cost(INSN_COST);
16722   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16723   ins_encode %{
16724     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16725             as_FloatRegister($src1$$reg),
16726             as_FloatRegister($src2$$reg));
16727   %}
16728   ins_pipe(vmuldiv_fp128);
16729 %}
16730 
16731 // --------------------------------- MLA --------------------------------------
16732 
16733 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16734 %{
16735   predicate(n->as_Vector()->length() == 2 ||
16736             n->as_Vector()->length() == 4);
16737   match(Set dst (AddVS dst (MulVS src1 src2)));
16738   ins_cost(INSN_COST);
16739   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16740   ins_encode %{
16741     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16742             as_FloatRegister($src1$$reg),
16743             as_FloatRegister($src2$$reg));
16744   %}
16745   ins_pipe(vmla64);
16746 %}
16747 
16748 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16749 %{
16750   predicate(n->as_Vector()->length() == 8);
16751   match(Set dst (AddVS dst (MulVS src1 src2)));
16752   ins_cost(INSN_COST);
16753   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16754   ins_encode %{
16755     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16756             as_FloatRegister($src1$$reg),
16757             as_FloatRegister($src2$$reg));
16758   %}
16759   ins_pipe(vmla128);
16760 %}
16761 
16762 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16763 %{
16764   predicate(n->as_Vector()->length() == 2);
16765   match(Set dst (AddVI dst (MulVI src1 src2)));
16766   ins_cost(INSN_COST);
16767   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16768   ins_encode %{
16769     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16770             as_FloatRegister($src1$$reg),
16771             as_FloatRegister($src2$$reg));
16772   %}
16773   ins_pipe(vmla64);
16774 %}
16775 
16776 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16777 %{
16778   predicate(n->as_Vector()->length() == 4);
16779   match(Set dst (AddVI dst (MulVI src1 src2)));
16780   ins_cost(INSN_COST);
16781   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16782   ins_encode %{
16783     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16784             as_FloatRegister($src1$$reg),
16785             as_FloatRegister($src2$$reg));
16786   %}
16787   ins_pipe(vmla128);
16788 %}
16789 
16790 // dst + src1 * src2
16791 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16792   predicate(UseFMA && n->as_Vector()->length() == 2);
16793   match(Set dst (FmaVF  dst (Binary src1 src2)));
16794   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16795   ins_cost(INSN_COST);
16796   ins_encode %{
16797     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16798             as_FloatRegister($src1$$reg),
16799             as_FloatRegister($src2$$reg));
16800   %}
16801   ins_pipe(vmuldiv_fp64);
16802 %}
16803 
16804 // dst + src1 * src2
16805 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16806   predicate(UseFMA && n->as_Vector()->length() == 4);
16807   match(Set dst (FmaVF  dst (Binary src1 src2)));
16808   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16809   ins_cost(INSN_COST);
16810   ins_encode %{
16811     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16812             as_FloatRegister($src1$$reg),
16813             as_FloatRegister($src2$$reg));
16814   %}
16815   ins_pipe(vmuldiv_fp128);
16816 %}
16817 
16818 // dst + src1 * src2
16819 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16820   predicate(UseFMA && n->as_Vector()->length() == 2);
16821   match(Set dst (FmaVD  dst (Binary src1 src2)));
16822   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16823   ins_cost(INSN_COST);
16824   ins_encode %{
16825     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16826             as_FloatRegister($src1$$reg),
16827             as_FloatRegister($src2$$reg));
16828   %}
16829   ins_pipe(vmuldiv_fp128);
16830 %}
16831 
16832 // --------------------------------- MLS --------------------------------------
16833 
16834 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16835 %{
16836   predicate(n->as_Vector()->length() == 2 ||
16837             n->as_Vector()->length() == 4);
16838   match(Set dst (SubVS dst (MulVS src1 src2)));
16839   ins_cost(INSN_COST);
16840   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16841   ins_encode %{
16842     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16843             as_FloatRegister($src1$$reg),
16844             as_FloatRegister($src2$$reg));
16845   %}
16846   ins_pipe(vmla64);
16847 %}
16848 
16849 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16850 %{
16851   predicate(n->as_Vector()->length() == 8);
16852   match(Set dst (SubVS dst (MulVS src1 src2)));
16853   ins_cost(INSN_COST);
16854   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16855   ins_encode %{
16856     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16857             as_FloatRegister($src1$$reg),
16858             as_FloatRegister($src2$$reg));
16859   %}
16860   ins_pipe(vmla128);
16861 %}
16862 
16863 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16864 %{
16865   predicate(n->as_Vector()->length() == 2);
16866   match(Set dst (SubVI dst (MulVI src1 src2)));
16867   ins_cost(INSN_COST);
16868   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16869   ins_encode %{
16870     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16871             as_FloatRegister($src1$$reg),
16872             as_FloatRegister($src2$$reg));
16873   %}
16874   ins_pipe(vmla64);
16875 %}
16876 
16877 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16878 %{
16879   predicate(n->as_Vector()->length() == 4);
16880   match(Set dst (SubVI dst (MulVI src1 src2)));
16881   ins_cost(INSN_COST);
16882   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16883   ins_encode %{
16884     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16885             as_FloatRegister($src1$$reg),
16886             as_FloatRegister($src2$$reg));
16887   %}
16888   ins_pipe(vmla128);
16889 %}
16890 
16891 // dst - src1 * src2
16892 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16893   predicate(UseFMA && n->as_Vector()->length() == 2);
16894   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16895   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16896   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16897   ins_cost(INSN_COST);
16898   ins_encode %{
16899     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16900             as_FloatRegister($src1$$reg),
16901             as_FloatRegister($src2$$reg));
16902   %}
16903   ins_pipe(vmuldiv_fp64);
16904 %}
16905 
16906 // dst - src1 * src2
16907 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16908   predicate(UseFMA && n->as_Vector()->length() == 4);
16909   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16910   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16911   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16912   ins_cost(INSN_COST);
16913   ins_encode %{
16914     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16915             as_FloatRegister($src1$$reg),
16916             as_FloatRegister($src2$$reg));
16917   %}
16918   ins_pipe(vmuldiv_fp128);
16919 %}
16920 
16921 // dst - src1 * src2
16922 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16923   predicate(UseFMA && n->as_Vector()->length() == 2);
16924   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16925   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16926   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16927   ins_cost(INSN_COST);
16928   ins_encode %{
16929     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16930             as_FloatRegister($src1$$reg),
16931             as_FloatRegister($src2$$reg));
16932   %}
16933   ins_pipe(vmuldiv_fp128);
16934 %}
16935 
16936 // --------------------------------- DIV --------------------------------------
16937 
16938 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16939 %{
16940   predicate(n->as_Vector()->length() == 2);
16941   match(Set dst (DivVF src1 src2));
16942   ins_cost(INSN_COST);
16943   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16944   ins_encode %{
16945     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16946             as_FloatRegister($src1$$reg),
16947             as_FloatRegister($src2$$reg));
16948   %}
16949   ins_pipe(vmuldiv_fp64);
16950 %}
16951 
16952 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16953 %{
16954   predicate(n->as_Vector()->length() == 4);
16955   match(Set dst (DivVF src1 src2));
16956   ins_cost(INSN_COST);
16957   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16958   ins_encode %{
16959     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16960             as_FloatRegister($src1$$reg),
16961             as_FloatRegister($src2$$reg));
16962   %}
16963   ins_pipe(vmuldiv_fp128);
16964 %}
16965 
16966 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16967 %{
16968   predicate(n->as_Vector()->length() == 2);
16969   match(Set dst (DivVD src1 src2));
16970   ins_cost(INSN_COST);
16971   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16972   ins_encode %{
16973     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16974             as_FloatRegister($src1$$reg),
16975             as_FloatRegister($src2$$reg));
16976   %}
16977   ins_pipe(vmuldiv_fp128);
16978 %}
16979 
16980 // --------------------------------- SQRT -------------------------------------
16981 
16982 instruct vsqrt2D(vecX dst, vecX src)
16983 %{
16984   predicate(n->as_Vector()->length() == 2);
16985   match(Set dst (SqrtVD src));
16986   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16987   ins_encode %{
16988     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16989              as_FloatRegister($src$$reg));
16990   %}
16991   ins_pipe(vsqrt_fp128);
16992 %}
16993 
16994 // --------------------------------- ABS --------------------------------------
16995 
16996 instruct vabs2F(vecD dst, vecD src)
16997 %{
16998   predicate(n->as_Vector()->length() == 2);
16999   match(Set dst (AbsVF src));
17000   ins_cost(INSN_COST * 3);
17001   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17002   ins_encode %{
17003     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17004             as_FloatRegister($src$$reg));
17005   %}
17006   ins_pipe(vunop_fp64);
17007 %}
17008 
17009 instruct vabs4F(vecX dst, vecX src)
17010 %{
17011   predicate(n->as_Vector()->length() == 4);
17012   match(Set dst (AbsVF src));
17013   ins_cost(INSN_COST * 3);
17014   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17015   ins_encode %{
17016     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17017             as_FloatRegister($src$$reg));
17018   %}
17019   ins_pipe(vunop_fp128);
17020 %}
17021 
17022 instruct vabs2D(vecX dst, vecX src)
17023 %{
17024   predicate(n->as_Vector()->length() == 2);
17025   match(Set dst (AbsVD src));
17026   ins_cost(INSN_COST * 3);
17027   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17028   ins_encode %{
17029     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17030             as_FloatRegister($src$$reg));
17031   %}
17032   ins_pipe(vunop_fp128);
17033 %}
17034 
17035 // --------------------------------- NEG --------------------------------------
17036 
17037 instruct vneg2F(vecD dst, vecD src)
17038 %{
17039   predicate(n->as_Vector()->length() == 2);
17040   match(Set dst (NegVF src));
17041   ins_cost(INSN_COST * 3);
17042   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17043   ins_encode %{
17044     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17045             as_FloatRegister($src$$reg));
17046   %}
17047   ins_pipe(vunop_fp64);
17048 %}
17049 
17050 instruct vneg4F(vecX dst, vecX src)
17051 %{
17052   predicate(n->as_Vector()->length() == 4);
17053   match(Set dst (NegVF src));
17054   ins_cost(INSN_COST * 3);
17055   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17056   ins_encode %{
17057     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17058             as_FloatRegister($src$$reg));
17059   %}
17060   ins_pipe(vunop_fp128);
17061 %}
17062 
17063 instruct vneg2D(vecX dst, vecX src)
17064 %{
17065   predicate(n->as_Vector()->length() == 2);
17066   match(Set dst (NegVD src));
17067   ins_cost(INSN_COST * 3);
17068   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17069   ins_encode %{
17070     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17071             as_FloatRegister($src$$reg));
17072   %}
17073   ins_pipe(vunop_fp128);
17074 %}
17075 
17076 // --------------------------------- AND --------------------------------------
17077 
17078 instruct vand8B(vecD dst, vecD src1, vecD src2)
17079 %{
17080   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17081             n->as_Vector()->length_in_bytes() == 8);
17082   match(Set dst (AndV src1 src2));
17083   ins_cost(INSN_COST);
17084   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17085   ins_encode %{
17086     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17087             as_FloatRegister($src1$$reg),
17088             as_FloatRegister($src2$$reg));
17089   %}
17090   ins_pipe(vlogical64);
17091 %}
17092 
17093 instruct vand16B(vecX dst, vecX src1, vecX src2)
17094 %{
17095   predicate(n->as_Vector()->length_in_bytes() == 16);
17096   match(Set dst (AndV src1 src2));
17097   ins_cost(INSN_COST);
17098   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17099   ins_encode %{
17100     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17101             as_FloatRegister($src1$$reg),
17102             as_FloatRegister($src2$$reg));
17103   %}
17104   ins_pipe(vlogical128);
17105 %}
17106 
17107 // --------------------------------- OR ---------------------------------------
17108 
17109 instruct vor8B(vecD dst, vecD src1, vecD src2)
17110 %{
17111   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17112             n->as_Vector()->length_in_bytes() == 8);
17113   match(Set dst (OrV src1 src2));
17114   ins_cost(INSN_COST);
17115   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17116   ins_encode %{
17117     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17118             as_FloatRegister($src1$$reg),
17119             as_FloatRegister($src2$$reg));
17120   %}
17121   ins_pipe(vlogical64);
17122 %}
17123 
17124 instruct vor16B(vecX dst, vecX src1, vecX src2)
17125 %{
17126   predicate(n->as_Vector()->length_in_bytes() == 16);
17127   match(Set dst (OrV src1 src2));
17128   ins_cost(INSN_COST);
17129   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17130   ins_encode %{
17131     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17132             as_FloatRegister($src1$$reg),
17133             as_FloatRegister($src2$$reg));
17134   %}
17135   ins_pipe(vlogical128);
17136 %}
17137 
17138 // --------------------------------- XOR --------------------------------------
17139 
17140 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17141 %{
17142   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17143             n->as_Vector()->length_in_bytes() == 8);
17144   match(Set dst (XorV src1 src2));
17145   ins_cost(INSN_COST);
17146   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17147   ins_encode %{
17148     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17149             as_FloatRegister($src1$$reg),
17150             as_FloatRegister($src2$$reg));
17151   %}
17152   ins_pipe(vlogical64);
17153 %}
17154 
17155 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17156 %{
17157   predicate(n->as_Vector()->length_in_bytes() == 16);
17158   match(Set dst (XorV src1 src2));
17159   ins_cost(INSN_COST);
17160   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17161   ins_encode %{
17162     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17163             as_FloatRegister($src1$$reg),
17164             as_FloatRegister($src2$$reg));
17165   %}
17166   ins_pipe(vlogical128);
17167 %}
17168 
17169 // ------------------------------ Shift ---------------------------------------
17170 
17171 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17172   match(Set dst (LShiftCntV cnt));
17173   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17174   ins_encode %{
17175     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17176   %}
17177   ins_pipe(vdup_reg_reg128);
17178 %}
17179 
17180 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17181 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17182   match(Set dst (RShiftCntV cnt));
17183   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17184   ins_encode %{
17185     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17186     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17187   %}
17188   ins_pipe(vdup_reg_reg128);
17189 %}
17190 
17191 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17192   predicate(n->as_Vector()->length() == 4 ||
17193             n->as_Vector()->length() == 8);
17194   match(Set dst (LShiftVB src shift));
17195   match(Set dst (RShiftVB src shift));
17196   ins_cost(INSN_COST);
17197   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17198   ins_encode %{
17199     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17200             as_FloatRegister($src$$reg),
17201             as_FloatRegister($shift$$reg));
17202   %}
17203   ins_pipe(vshift64);
17204 %}
17205 
17206 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17207   predicate(n->as_Vector()->length() == 16);
17208   match(Set dst (LShiftVB src shift));
17209   match(Set dst (RShiftVB src shift));
17210   ins_cost(INSN_COST);
17211   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17212   ins_encode %{
17213     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17214             as_FloatRegister($src$$reg),
17215             as_FloatRegister($shift$$reg));
17216   %}
17217   ins_pipe(vshift128);
17218 %}
17219 
17220 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17221   predicate(n->as_Vector()->length() == 4 ||
17222             n->as_Vector()->length() == 8);
17223   match(Set dst (URShiftVB src shift));
17224   ins_cost(INSN_COST);
17225   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17226   ins_encode %{
17227     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17228             as_FloatRegister($src$$reg),
17229             as_FloatRegister($shift$$reg));
17230   %}
17231   ins_pipe(vshift64);
17232 %}
17233 
17234 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17235   predicate(n->as_Vector()->length() == 16);
17236   match(Set dst (URShiftVB src shift));
17237   ins_cost(INSN_COST);
17238   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17239   ins_encode %{
17240     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17241             as_FloatRegister($src$$reg),
17242             as_FloatRegister($shift$$reg));
17243   %}
17244   ins_pipe(vshift128);
17245 %}
17246 
17247 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17248   predicate(n->as_Vector()->length() == 4 ||
17249             n->as_Vector()->length() == 8);
17250   match(Set dst (LShiftVB src shift));
17251   ins_cost(INSN_COST);
17252   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17253   ins_encode %{
17254     int sh = (int)$shift$$constant & 31;
17255     if (sh >= 8) {
17256       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17257              as_FloatRegister($src$$reg),
17258              as_FloatRegister($src$$reg));
17259     } else {
17260       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17261              as_FloatRegister($src$$reg), sh);
17262     }
17263   %}
17264   ins_pipe(vshift64_imm);
17265 %}
17266 
17267 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17268   predicate(n->as_Vector()->length() == 16);
17269   match(Set dst (LShiftVB src shift));
17270   ins_cost(INSN_COST);
17271   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17272   ins_encode %{
17273     int sh = (int)$shift$$constant & 31;
17274     if (sh >= 8) {
17275       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17276              as_FloatRegister($src$$reg),
17277              as_FloatRegister($src$$reg));
17278     } else {
17279       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17280              as_FloatRegister($src$$reg), sh);
17281     }
17282   %}
17283   ins_pipe(vshift128_imm);
17284 %}
17285 
17286 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17287   predicate(n->as_Vector()->length() == 4 ||
17288             n->as_Vector()->length() == 8);
17289   match(Set dst (RShiftVB src shift));
17290   ins_cost(INSN_COST);
17291   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17292   ins_encode %{
17293     int sh = (int)$shift$$constant & 31;
17294     if (sh >= 8) sh = 7;
17295     sh = -sh & 7;
17296     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17297            as_FloatRegister($src$$reg), sh);
17298   %}
17299   ins_pipe(vshift64_imm);
17300 %}
17301 
17302 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17303   predicate(n->as_Vector()->length() == 16);
17304   match(Set dst (RShiftVB src shift));
17305   ins_cost(INSN_COST);
17306   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17307   ins_encode %{
17308     int sh = (int)$shift$$constant & 31;
17309     if (sh >= 8) sh = 7;
17310     sh = -sh & 7;
17311     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17312            as_FloatRegister($src$$reg), sh);
17313   %}
17314   ins_pipe(vshift128_imm);
17315 %}
17316 
17317 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17318   predicate(n->as_Vector()->length() == 4 ||
17319             n->as_Vector()->length() == 8);
17320   match(Set dst (URShiftVB src shift));
17321   ins_cost(INSN_COST);
17322   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17323   ins_encode %{
17324     int sh = (int)$shift$$constant & 31;
17325     if (sh >= 8) {
17326       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17327              as_FloatRegister($src$$reg),
17328              as_FloatRegister($src$$reg));
17329     } else {
17330       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17331              as_FloatRegister($src$$reg), -sh & 7);
17332     }
17333   %}
17334   ins_pipe(vshift64_imm);
17335 %}
17336 
17337 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17338   predicate(n->as_Vector()->length() == 16);
17339   match(Set dst (URShiftVB src shift));
17340   ins_cost(INSN_COST);
17341   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17342   ins_encode %{
17343     int sh = (int)$shift$$constant & 31;
17344     if (sh >= 8) {
17345       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17346              as_FloatRegister($src$$reg),
17347              as_FloatRegister($src$$reg));
17348     } else {
17349       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17350              as_FloatRegister($src$$reg), -sh & 7);
17351     }
17352   %}
17353   ins_pipe(vshift128_imm);
17354 %}
17355 
17356 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17357   predicate(n->as_Vector()->length() == 2 ||
17358             n->as_Vector()->length() == 4);
17359   match(Set dst (LShiftVS src shift));
17360   match(Set dst (RShiftVS src shift));
17361   ins_cost(INSN_COST);
17362   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17363   ins_encode %{
17364     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17365             as_FloatRegister($src$$reg),
17366             as_FloatRegister($shift$$reg));
17367   %}
17368   ins_pipe(vshift64);
17369 %}
17370 
17371 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17372   predicate(n->as_Vector()->length() == 8);
17373   match(Set dst (LShiftVS src shift));
17374   match(Set dst (RShiftVS src shift));
17375   ins_cost(INSN_COST);
17376   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17377   ins_encode %{
17378     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17379             as_FloatRegister($src$$reg),
17380             as_FloatRegister($shift$$reg));
17381   %}
17382   ins_pipe(vshift128);
17383 %}
17384 
17385 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17386   predicate(n->as_Vector()->length() == 2 ||
17387             n->as_Vector()->length() == 4);
17388   match(Set dst (URShiftVS src shift));
17389   ins_cost(INSN_COST);
17390   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17391   ins_encode %{
17392     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17393             as_FloatRegister($src$$reg),
17394             as_FloatRegister($shift$$reg));
17395   %}
17396   ins_pipe(vshift64);
17397 %}
17398 
17399 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17400   predicate(n->as_Vector()->length() == 8);
17401   match(Set dst (URShiftVS src shift));
17402   ins_cost(INSN_COST);
17403   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17404   ins_encode %{
17405     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17406             as_FloatRegister($src$$reg),
17407             as_FloatRegister($shift$$reg));
17408   %}
17409   ins_pipe(vshift128);
17410 %}
17411 
17412 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17413   predicate(n->as_Vector()->length() == 2 ||
17414             n->as_Vector()->length() == 4);
17415   match(Set dst (LShiftVS src shift));
17416   ins_cost(INSN_COST);
17417   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17418   ins_encode %{
17419     int sh = (int)$shift$$constant & 31;
17420     if (sh >= 16) {
17421       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17422              as_FloatRegister($src$$reg),
17423              as_FloatRegister($src$$reg));
17424     } else {
17425       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17426              as_FloatRegister($src$$reg), sh);
17427     }
17428   %}
17429   ins_pipe(vshift64_imm);
17430 %}
17431 
17432 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17433   predicate(n->as_Vector()->length() == 8);
17434   match(Set dst (LShiftVS src shift));
17435   ins_cost(INSN_COST);
17436   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17437   ins_encode %{
17438     int sh = (int)$shift$$constant & 31;
17439     if (sh >= 16) {
17440       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17441              as_FloatRegister($src$$reg),
17442              as_FloatRegister($src$$reg));
17443     } else {
17444       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17445              as_FloatRegister($src$$reg), sh);
17446     }
17447   %}
17448   ins_pipe(vshift128_imm);
17449 %}
17450 
17451 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17452   predicate(n->as_Vector()->length() == 2 ||
17453             n->as_Vector()->length() == 4);
17454   match(Set dst (RShiftVS src shift));
17455   ins_cost(INSN_COST);
17456   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17457   ins_encode %{
17458     int sh = (int)$shift$$constant & 31;
17459     if (sh >= 16) sh = 15;
17460     sh = -sh & 15;
17461     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17462            as_FloatRegister($src$$reg), sh);
17463   %}
17464   ins_pipe(vshift64_imm);
17465 %}
17466 
17467 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17468   predicate(n->as_Vector()->length() == 8);
17469   match(Set dst (RShiftVS src shift));
17470   ins_cost(INSN_COST);
17471   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17472   ins_encode %{
17473     int sh = (int)$shift$$constant & 31;
17474     if (sh >= 16) sh = 15;
17475     sh = -sh & 15;
17476     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17477            as_FloatRegister($src$$reg), sh);
17478   %}
17479   ins_pipe(vshift128_imm);
17480 %}
17481 
17482 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17483   predicate(n->as_Vector()->length() == 2 ||
17484             n->as_Vector()->length() == 4);
17485   match(Set dst (URShiftVS src shift));
17486   ins_cost(INSN_COST);
17487   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17488   ins_encode %{
17489     int sh = (int)$shift$$constant & 31;
17490     if (sh >= 16) {
17491       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17492              as_FloatRegister($src$$reg),
17493              as_FloatRegister($src$$reg));
17494     } else {
17495       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17496              as_FloatRegister($src$$reg), -sh & 15);
17497     }
17498   %}
17499   ins_pipe(vshift64_imm);
17500 %}
17501 
17502 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17503   predicate(n->as_Vector()->length() == 8);
17504   match(Set dst (URShiftVS src shift));
17505   ins_cost(INSN_COST);
17506   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17507   ins_encode %{
17508     int sh = (int)$shift$$constant & 31;
17509     if (sh >= 16) {
17510       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17511              as_FloatRegister($src$$reg),
17512              as_FloatRegister($src$$reg));
17513     } else {
17514       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17515              as_FloatRegister($src$$reg), -sh & 15);
17516     }
17517   %}
17518   ins_pipe(vshift128_imm);
17519 %}
17520 
17521 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17522   predicate(n->as_Vector()->length() == 2);
17523   match(Set dst (LShiftVI src shift));
17524   match(Set dst (RShiftVI src shift));
17525   ins_cost(INSN_COST);
17526   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17527   ins_encode %{
17528     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17529             as_FloatRegister($src$$reg),
17530             as_FloatRegister($shift$$reg));
17531   %}
17532   ins_pipe(vshift64);
17533 %}
17534 
17535 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17536   predicate(n->as_Vector()->length() == 4);
17537   match(Set dst (LShiftVI src shift));
17538   match(Set dst (RShiftVI src shift));
17539   ins_cost(INSN_COST);
17540   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17541   ins_encode %{
17542     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17543             as_FloatRegister($src$$reg),
17544             as_FloatRegister($shift$$reg));
17545   %}
17546   ins_pipe(vshift128);
17547 %}
17548 
17549 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17550   predicate(n->as_Vector()->length() == 2);
17551   match(Set dst (URShiftVI src shift));
17552   ins_cost(INSN_COST);
17553   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17554   ins_encode %{
17555     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17556             as_FloatRegister($src$$reg),
17557             as_FloatRegister($shift$$reg));
17558   %}
17559   ins_pipe(vshift64);
17560 %}
17561 
17562 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17563   predicate(n->as_Vector()->length() == 4);
17564   match(Set dst (URShiftVI src shift));
17565   ins_cost(INSN_COST);
17566   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17567   ins_encode %{
17568     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17569             as_FloatRegister($src$$reg),
17570             as_FloatRegister($shift$$reg));
17571   %}
17572   ins_pipe(vshift128);
17573 %}
17574 
17575 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17576   predicate(n->as_Vector()->length() == 2);
17577   match(Set dst (LShiftVI src shift));
17578   ins_cost(INSN_COST);
17579   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17580   ins_encode %{
17581     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17582            as_FloatRegister($src$$reg),
17583            (int)$shift$$constant & 31);
17584   %}
17585   ins_pipe(vshift64_imm);
17586 %}
17587 
17588 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17589   predicate(n->as_Vector()->length() == 4);
17590   match(Set dst (LShiftVI src shift));
17591   ins_cost(INSN_COST);
17592   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17593   ins_encode %{
17594     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17595            as_FloatRegister($src$$reg),
17596            (int)$shift$$constant & 31);
17597   %}
17598   ins_pipe(vshift128_imm);
17599 %}
17600 
17601 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17602   predicate(n->as_Vector()->length() == 2);
17603   match(Set dst (RShiftVI src shift));
17604   ins_cost(INSN_COST);
17605   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17606   ins_encode %{
17607     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17608             as_FloatRegister($src$$reg),
17609             -(int)$shift$$constant & 31);
17610   %}
17611   ins_pipe(vshift64_imm);
17612 %}
17613 
17614 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17615   predicate(n->as_Vector()->length() == 4);
17616   match(Set dst (RShiftVI src shift));
17617   ins_cost(INSN_COST);
17618   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17619   ins_encode %{
17620     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17621             as_FloatRegister($src$$reg),
17622             -(int)$shift$$constant & 31);
17623   %}
17624   ins_pipe(vshift128_imm);
17625 %}
17626 
17627 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17628   predicate(n->as_Vector()->length() == 2);
17629   match(Set dst (URShiftVI src shift));
17630   ins_cost(INSN_COST);
17631   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17632   ins_encode %{
17633     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17634             as_FloatRegister($src$$reg),
17635             -(int)$shift$$constant & 31);
17636   %}
17637   ins_pipe(vshift64_imm);
17638 %}
17639 
17640 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17641   predicate(n->as_Vector()->length() == 4);
17642   match(Set dst (URShiftVI src shift));
17643   ins_cost(INSN_COST);
17644   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17645   ins_encode %{
17646     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17647             as_FloatRegister($src$$reg),
17648             -(int)$shift$$constant & 31);
17649   %}
17650   ins_pipe(vshift128_imm);
17651 %}
17652 
17653 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17654   predicate(n->as_Vector()->length() == 2);
17655   match(Set dst (LShiftVL src shift));
17656   match(Set dst (RShiftVL src shift));
17657   ins_cost(INSN_COST);
17658   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17659   ins_encode %{
17660     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17661             as_FloatRegister($src$$reg),
17662             as_FloatRegister($shift$$reg));
17663   %}
17664   ins_pipe(vshift128);
17665 %}
17666 
17667 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
17668   predicate(n->as_Vector()->length() == 2);
17669   match(Set dst (URShiftVL src shift));
17670   ins_cost(INSN_COST);
17671   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
17672   ins_encode %{
17673     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17674             as_FloatRegister($src$$reg),
17675             as_FloatRegister($shift$$reg));
17676   %}
17677   ins_pipe(vshift128);
17678 %}
17679 
17680 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17681   predicate(n->as_Vector()->length() == 2);
17682   match(Set dst (LShiftVL src shift));
17683   ins_cost(INSN_COST);
17684   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17685   ins_encode %{
17686     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17687            as_FloatRegister($src$$reg),
17688            (int)$shift$$constant & 63);
17689   %}
17690   ins_pipe(vshift128_imm);
17691 %}
17692 
17693 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17694   predicate(n->as_Vector()->length() == 2);
17695   match(Set dst (RShiftVL src shift));
17696   ins_cost(INSN_COST);
17697   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17698   ins_encode %{
17699     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17700             as_FloatRegister($src$$reg),
17701             -(int)$shift$$constant & 63);
17702   %}
17703   ins_pipe(vshift128_imm);
17704 %}
17705 
17706 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17707   predicate(n->as_Vector()->length() == 2);
17708   match(Set dst (URShiftVL src shift));
17709   ins_cost(INSN_COST);
17710   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17711   ins_encode %{
17712     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17713             as_FloatRegister($src$$reg),
17714             -(int)$shift$$constant & 63);
17715   %}
17716   ins_pipe(vshift128_imm);
17717 %}
17718 
17719 //----------PEEPHOLE RULES-----------------------------------------------------
17720 // These must follow all instruction definitions as they use the names
17721 // defined in the instructions definitions.
17722 //
17723 // peepmatch ( root_instr_name [preceding_instruction]* );
17724 //
17725 // peepconstraint %{
17726 // (instruction_number.operand_name relational_op instruction_number.operand_name
17727 //  [, ...] );
17728 // // instruction numbers are zero-based using left to right order in peepmatch
17729 //
17730 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17731 // // provide an instruction_number.operand_name for each operand that appears
17732 // // in the replacement instruction's match rule
17733 //
17734 // ---------VM FLAGS---------------------------------------------------------
17735 //
17736 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17737 //
17738 // Each peephole rule is given an identifying number starting with zero and
17739 // increasing by one in the order seen by the parser.  An individual peephole
17740 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17741 // on the command-line.
17742 //
17743 // ---------CURRENT LIMITATIONS----------------------------------------------
17744 //
17745 // Only match adjacent instructions in same basic block
17746 // Only equality constraints
17747 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17748 // Only one replacement instruction
17749 //
17750 // ---------EXAMPLE----------------------------------------------------------
17751 //
17752 // // pertinent parts of existing instructions in architecture description
17753 // instruct movI(iRegINoSp dst, iRegI src)
17754 // %{
17755 //   match(Set dst (CopyI src));
17756 // %}
17757 //
17758 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17759 // %{
17760 //   match(Set dst (AddI dst src));
17761 //   effect(KILL cr);
17762 // %}
17763 //
17764 // // Change (inc mov) to lea
17765 // peephole %{
17766 //   // increment preceeded by register-register move
17767 //   peepmatch ( incI_iReg movI );
17768 //   // require that the destination register of the increment
17769 //   // match the destination register of the move
17770 //   peepconstraint ( 0.dst == 1.dst );
17771 //   // construct a replacement instruction that sets
17772 //   // the destination to ( move's source register + one )
17773 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17774 // %}
17775 //
17776 
17777 // Implementation no longer uses movX instructions since
17778 // machine-independent system no longer uses CopyX nodes.
17779 //
17780 // peephole
17781 // %{
17782 //   peepmatch (incI_iReg movI);
17783 //   peepconstraint (0.dst == 1.dst);
17784 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17785 // %}
17786 
17787 // peephole
17788 // %{
17789 //   peepmatch (decI_iReg movI);
17790 //   peepconstraint (0.dst == 1.dst);
17791 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17792 // %}
17793 
17794 // peephole
17795 // %{
17796 //   peepmatch (addI_iReg_imm movI);
17797 //   peepconstraint (0.dst == 1.dst);
17798 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17799 // %}
17800 
17801 // peephole
17802 // %{
17803 //   peepmatch (incL_iReg movL);
17804 //   peepconstraint (0.dst == 1.dst);
17805 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17806 // %}
17807 
17808 // peephole
17809 // %{
17810 //   peepmatch (decL_iReg movL);
17811 //   peepconstraint (0.dst == 1.dst);
17812 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17813 // %}
17814 
17815 // peephole
17816 // %{
17817 //   peepmatch (addL_iReg_imm movL);
17818 //   peepconstraint (0.dst == 1.dst);
17819 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17820 // %}
17821 
17822 // peephole
17823 // %{
17824 //   peepmatch (addP_iReg_imm movP);
17825 //   peepconstraint (0.dst == 1.dst);
17826 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17827 // %}
17828 
17829 // // Change load of spilled value to only a spill
17830 // instruct storeI(memory mem, iRegI src)
17831 // %{
17832 //   match(Set mem (StoreI mem src));
17833 // %}
17834 //
17835 // instruct loadI(iRegINoSp dst, memory mem)
17836 // %{
17837 //   match(Set dst (LoadI mem));
17838 // %}
17839 //
17840 
17841 //----------SMARTSPILL RULES---------------------------------------------------
17842 // These must follow all instruction definitions as they use the names
17843 // defined in the instructions definitions.
17844 
17845 // Local Variables:
17846 // mode: c++
17847 // End: