1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   //
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   //
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   //
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1289       return NULL;
1290     }
1291 
1292     membar = ctl->lookup(0);
1293 
1294     if (!membar || !membar->is_MemBar()) {
1295       return NULL;
1296     }
1297 
1298     if (mem->lookup(0) != membar) {
1299       return NULL;
1300     }
1301 
1302     return membar->as_MemBar();
1303   }
1304 
1305   // if n is linked to a child MemBarNode by intervening Control and
1306   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1307 
1308   MemBarNode *child_membar(const MemBarNode *n)
1309   {
1310     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1311     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1312 
1313     // MemBar needs to have both a Ctl and Mem projection
1314     if (! ctl || ! mem)
1315       return NULL;
1316 
1317     MemBarNode *child = NULL;
1318     Node *x;
1319 
1320     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1321       x = ctl->fast_out(i);
1322       // if we see a membar we keep hold of it. we may also see a new
1323       // arena copy of the original but it will appear later
1324       if (x->is_MemBar()) {
1325           child = x->as_MemBar();
1326           break;
1327       }
1328     }
1329 
1330     if (child == NULL) {
1331       return NULL;
1332     }
1333 
1334     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1335       x = mem->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x == child) {
1339         return child;
1340       }
1341     }
1342     return NULL;
1343   }
1344 
1345   // helper predicate use to filter candidates for a leading memory
1346   // barrier
1347   //
1348   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1349   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1350 
1351   bool leading_membar(const MemBarNode *barrier)
1352   {
1353     int opcode = barrier->Opcode();
1354     // if this is a release membar we are ok
1355     if (opcode == Op_MemBarRelease) {
1356       return true;
1357     }
1358     // if its a cpuorder membar . . .
1359     if (opcode != Op_MemBarCPUOrder) {
1360       return false;
1361     }
1362     // then the parent has to be a release membar
1363     MemBarNode *parent = parent_membar(barrier);
1364     if (!parent) {
1365       return false;
1366     }
1367     opcode = parent->Opcode();
1368     return opcode == Op_MemBarRelease;
1369   }
1370 
1371   // 2) card mark detection helper
1372 
1373   // helper predicate which can be used to detect a volatile membar
1374   // introduced as part of a conditional card mark sequence either by
1375   // G1 or by CMS when UseCondCardMark is true.
1376   //
1377   // membar can be definitively determined to be part of a card mark
1378   // sequence if and only if all the following hold
1379   //
1380   // i) it is a MemBarVolatile
1381   //
1382   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1383   // true
1384   //
1385   // iii) the node's Mem projection feeds a StoreCM node.
1386 
1387   bool is_card_mark_membar(const MemBarNode *barrier)
1388   {
1389     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1390       return false;
1391     }
1392 
1393     if (barrier->Opcode() != Op_MemBarVolatile) {
1394       return false;
1395     }
1396 
1397     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1398 
1399     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1400       Node *y = mem->fast_out(i);
1401       if (y->Opcode() == Op_StoreCM) {
1402         return true;
1403       }
1404     }
1405 
1406     return false;
1407   }
1408 
1409 
1410   // 3) helper predicates to traverse volatile put or CAS graphs which
1411   // may contain GC barrier subgraphs
1412 
1413   // Preamble
1414   // --------
1415   //
1416   // for volatile writes we can omit generating barriers and employ a
1417   // releasing store when we see a node sequence sequence with a
1418   // leading MemBarRelease and a trailing MemBarVolatile as follows
1419   //
1420   //   MemBarRelease
1421   //  {      ||      } -- optional
1422   //  {MemBarCPUOrder}
1423   //         ||     \\
1424   //         ||     StoreX[mo_release]
1425   //         | \     /
1426   //         | MergeMem
1427   //         | /
1428   //   MemBarVolatile
1429   //
1430   // where
1431   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1432   //  | \ and / indicate further routing of the Ctl and Mem feeds
1433   //
1434   // this is the graph we see for non-object stores. however, for a
1435   // volatile Object store (StoreN/P) we may see other nodes below the
1436   // leading membar because of the need for a GC pre- or post-write
1437   // barrier.
1438   //
1439   // with most GC configurations we with see this simple variant which
1440   // includes a post-write barrier card mark.
1441   //
1442   //   MemBarRelease______________________________
1443   //         ||    \\               Ctl \        \\
1444   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1445   //         | \     /                       . . .  /
1446   //         | MergeMem
1447   //         | /
1448   //         ||      /
1449   //   MemBarVolatile
1450   //
1451   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1452   // the object address to an int used to compute the card offset) and
1453   // Ctl+Mem to a StoreB node (which does the actual card mark).
1454   //
1455   // n.b. a StoreCM node will only appear in this configuration when
1456   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1457   // because it implies a requirement to order visibility of the card
1458   // mark (StoreCM) relative to the object put (StoreP/N) using a
1459   // StoreStore memory barrier (arguably this ought to be represented
1460   // explicitly in the ideal graph but that is not how it works). This
1461   // ordering is required for both non-volatile and volatile
1462   // puts. Normally that means we need to translate a StoreCM using
1463   // the sequence
1464   //
1465   //   dmb ishst
1466   //   stlrb
1467   //
1468   // However, in the case of a volatile put if we can recognise this
1469   // configuration and plant an stlr for the object write then we can
1470   // omit the dmb and just plant an strb since visibility of the stlr
1471   // is ordered before visibility of subsequent stores. StoreCM nodes
1472   // also arise when using G1 or using CMS with conditional card
1473   // marking. In these cases (as we shall see) we don't need to insert
1474   // the dmb when translating StoreCM because there is already an
1475   // intervening StoreLoad barrier between it and the StoreP/N.
1476   //
1477   // It is also possible to perform the card mark conditionally on it
1478   // currently being unmarked in which case the volatile put graph
1479   // will look slightly different
1480   //
1481   //   MemBarRelease____________________________________________
1482   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1483   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1484   //         | \     /                              \            |
1485   //         | MergeMem                            . . .      StoreB
1486   //         | /                                                /
1487   //         ||     /
1488   //   MemBarVolatile
1489   //
1490   // It is worth noting at this stage that both the above
1491   // configurations can be uniquely identified by checking that the
1492   // memory flow includes the following subgraph:
1493   //
1494   //   MemBarRelease
1495   //  {MemBarCPUOrder}
1496   //          |  \      . . .
1497   //          |  StoreX[mo_release]  . . .
1498   //          |   /
1499   //         MergeMem
1500   //          |
1501   //   MemBarVolatile
1502   //
1503   // This is referred to as a *normal* subgraph. It can easily be
1504   // detected starting from any candidate MemBarRelease,
1505   // StoreX[mo_release] or MemBarVolatile.
1506   //
1507   // A simple variation on this normal case occurs for an unsafe CAS
1508   // operation. The basic graph for a non-object CAS is
1509   //
1510   //   MemBarRelease
1511   //         ||
1512   //   MemBarCPUOrder
1513   //         ||     \\   . . .
1514   //         ||     CompareAndSwapX
1515   //         ||       |
1516   //         ||     SCMemProj
1517   //         | \     /
1518   //         | MergeMem
1519   //         | /
1520   //   MemBarCPUOrder
1521   //         ||
1522   //   MemBarAcquire
1523   //
1524   // The same basic variations on this arrangement (mutatis mutandis)
1525   // occur when a card mark is introduced. i.e. we se the same basic
1526   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1527   // tail of the graph is a pair comprising a MemBarCPUOrder +
1528   // MemBarAcquire.
1529   //
1530   // So, in the case of a CAS the normal graph has the variant form
1531   //
1532   //   MemBarRelease
1533   //   MemBarCPUOrder
1534   //          |   \      . . .
1535   //          |  CompareAndSwapX  . . .
1536   //          |    |
1537   //          |   SCMemProj
1538   //          |   /  . . .
1539   //         MergeMem
1540   //          |
1541   //   MemBarCPUOrder
1542   //   MemBarAcquire
1543   //
1544   // This graph can also easily be detected starting from any
1545   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1546   //
1547   // the code below uses two helper predicates, leading_to_normal and
1548   // normal_to_leading to identify these normal graphs, one validating
1549   // the layout starting from the top membar and searching down and
1550   // the other validating the layout starting from the lower membar
1551   // and searching up.
1552   //
1553   // There are two special case GC configurations when a normal graph
1554   // may not be generated: when using G1 (which always employs a
1555   // conditional card mark); and when using CMS with conditional card
1556   // marking configured. These GCs are both concurrent rather than
1557   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1558   // graph between the leading and trailing membar nodes, in
1559   // particular enforcing stronger memory serialisation beween the
1560   // object put and the corresponding conditional card mark. CMS
1561   // employs a post-write GC barrier while G1 employs both a pre- and
1562   // post-write GC barrier. Of course the extra nodes may be absent --
1563   // they are only inserted for object puts. This significantly
1564   // complicates the task of identifying whether a MemBarRelease,
1565   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1566   // when using these GC configurations (see below). It adds similar
1567   // complexity to the task of identifying whether a MemBarRelease,
1568   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1569   //
1570   // In both cases the post-write subtree includes an auxiliary
1571   // MemBarVolatile (StoreLoad barrier) separating the object put and
1572   // the read of the corresponding card. This poses two additional
1573   // problems.
1574   //
1575   // Firstly, a card mark MemBarVolatile needs to be distinguished
1576   // from a normal trailing MemBarVolatile. Resolving this first
1577   // problem is straightforward: a card mark MemBarVolatile always
1578   // projects a Mem feed to a StoreCM node and that is a unique marker
1579   //
1580   //      MemBarVolatile (card mark)
1581   //       C |    \     . . .
1582   //         |   StoreCM   . . .
1583   //       . . .
1584   //
1585   // The second problem is how the code generator is to translate the
1586   // card mark barrier? It always needs to be translated to a "dmb
1587   // ish" instruction whether or not it occurs as part of a volatile
1588   // put. A StoreLoad barrier is needed after the object put to ensure
1589   // i) visibility to GC threads of the object put and ii) visibility
1590   // to the mutator thread of any card clearing write by a GC
1591   // thread. Clearly a normal store (str) will not guarantee this
1592   // ordering but neither will a releasing store (stlr). The latter
1593   // guarantees that the object put is visible but does not guarantee
1594   // that writes by other threads have also been observed.
1595   //
1596   // So, returning to the task of translating the object put and the
1597   // leading/trailing membar nodes: what do the non-normal node graph
1598   // look like for these 2 special cases? and how can we determine the
1599   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1600   // in both normal and non-normal cases?
1601   //
1602   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1603   // which selects conditonal execution based on the value loaded
1604   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1605   // intervening StoreLoad barrier (MemBarVolatile).
1606   //
1607   // So, with CMS we may see a node graph for a volatile object store
1608   // which looks like this
1609   //
1610   //   MemBarRelease
1611   //   MemBarCPUOrder_(leading)__________________
1612   //     C |    M \       \\                   C \
1613   //       |       \    StoreN/P[mo_release]  CastP2X
1614   //       |    Bot \    /
1615   //       |       MergeMem
1616   //       |         /
1617   //      MemBarVolatile (card mark)
1618   //     C |  ||    M |
1619   //       | LoadB    |
1620   //       |   |      |
1621   //       | Cmp      |\
1622   //       | /        | \
1623   //       If         |  \
1624   //       | \        |   \
1625   // IfFalse  IfTrue  |    \
1626   //       \     / \  |     \
1627   //        \   / StoreCM    |
1628   //         \ /      |      |
1629   //        Region   . . .   |
1630   //          | \           /
1631   //          |  . . .  \  / Bot
1632   //          |       MergeMem
1633   //          |          |
1634   //        MemBarVolatile (trailing)
1635   //
1636   // The first MergeMem merges the AliasIdxBot Mem slice from the
1637   // leading membar and the oopptr Mem slice from the Store into the
1638   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1639   // Mem slice from the card mark membar and the AliasIdxRaw slice
1640   // from the StoreCM into the trailing membar (n.b. the latter
1641   // proceeds via a Phi associated with the If region).
1642   //
1643   // The graph for a CAS varies slightly, the obvious difference being
1644   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1645   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1646   // MemBarAcquire pair. The other important difference is that the
1647   // CompareAndSwap node's SCMemProj is not merged into the card mark
1648   // membar - it still feeds the trailing MergeMem. This also means
1649   // that the card mark membar receives its Mem feed directly from the
1650   // leading membar rather than via a MergeMem.
1651   //
1652   //   MemBarRelease
1653   //   MemBarCPUOrder__(leading)_________________________
1654   //       ||                       \\                 C \
1655   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1656   //     C |  ||    M |              |
1657   //       | LoadB    |       ______/|
1658   //       |   |      |      /       |
1659   //       | Cmp      |     /      SCMemProj
1660   //       | /        |    /         |
1661   //       If         |   /         /
1662   //       | \        |  /         /
1663   // IfFalse  IfTrue  | /         /
1664   //       \     / \  |/ prec    /
1665   //        \   / StoreCM       /
1666   //         \ /      |        /
1667   //        Region   . . .    /
1668   //          | \            /
1669   //          |  . . .  \   / Bot
1670   //          |       MergeMem
1671   //          |          |
1672   //        MemBarCPUOrder
1673   //        MemBarAcquire (trailing)
1674   //
1675   // This has a slightly different memory subgraph to the one seen
1676   // previously but the core of it is the same as for the CAS normal
1677   // sungraph
1678   //
1679   //   MemBarRelease
1680   //   MemBarCPUOrder____
1681   //      ||             \      . . .
1682   //   MemBarVolatile  CompareAndSwapX  . . .
1683   //      |  \            |
1684   //        . . .   SCMemProj
1685   //          |     /  . . .
1686   //         MergeMem
1687   //          |
1688   //   MemBarCPUOrder
1689   //   MemBarAcquire
1690   //
1691   //
1692   // G1 is quite a lot more complicated. The nodes inserted on behalf
1693   // of G1 may comprise: a pre-write graph which adds the old value to
1694   // the SATB queue; the releasing store itself; and, finally, a
1695   // post-write graph which performs a card mark.
1696   //
1697   // The pre-write graph may be omitted, but only when the put is
1698   // writing to a newly allocated (young gen) object and then only if
1699   // there is a direct memory chain to the Initialize node for the
1700   // object allocation. This will not happen for a volatile put since
1701   // any memory chain passes through the leading membar.
1702   //
1703   // The pre-write graph includes a series of 3 If tests. The outermost
1704   // If tests whether SATB is enabled (no else case). The next If tests
1705   // whether the old value is non-NULL (no else case). The third tests
1706   // whether the SATB queue index is > 0, if so updating the queue. The
1707   // else case for this third If calls out to the runtime to allocate a
1708   // new queue buffer.
1709   //
1710   // So with G1 the pre-write and releasing store subgraph looks like
1711   // this (the nested Ifs are omitted).
1712   //
1713   //  MemBarRelease (leading)____________
1714   //     C |  ||  M \   M \    M \  M \ . . .
1715   //       | LoadB   \  LoadL  LoadN   \
1716   //       | /        \                 \
1717   //       If         |\                 \
1718   //       | \        | \                 \
1719   //  IfFalse  IfTrue |  \                 \
1720   //       |     |    |   \                 |
1721   //       |     If   |   /\                |
1722   //       |     |          \               |
1723   //       |                 \              |
1724   //       |    . . .         \             |
1725   //       | /       | /       |            |
1726   //      Region  Phi[M]       |            |
1727   //       | \       |         |            |
1728   //       |  \_____ | ___     |            |
1729   //     C | C \     |   C \ M |            |
1730   //       | CastP2X | StoreN/P[mo_release] |
1731   //       |         |         |            |
1732   //     C |       M |       M |          M |
1733   //        \        |         |           /
1734   //                  . . .
1735   //          (post write subtree elided)
1736   //                    . . .
1737   //             C \         M /
1738   //         MemBarVolatile (trailing)
1739   //
1740   // n.b. the LoadB in this subgraph is not the card read -- it's a
1741   // read of the SATB queue active flag.
1742   //
1743   // Once again the CAS graph is a minor variant on the above with the
1744   // expected substitutions of CompareAndSawpX for StoreN/P and
1745   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1746   //
1747   // The G1 post-write subtree is also optional, this time when the
1748   // new value being written is either null or can be identified as a
1749   // newly allocated (young gen) object with no intervening control
1750   // flow. The latter cannot happen but the former may, in which case
1751   // the card mark membar is omitted and the memory feeds form the
1752   // leading membar and the SToreN/P are merged direct into the
1753   // trailing membar as per the normal subgraph. So, the only special
1754   // case which arises is when the post-write subgraph is generated.
1755   //
1756   // The kernel of the post-write G1 subgraph is the card mark itself
1757   // which includes a card mark memory barrier (MemBarVolatile), a
1758   // card test (LoadB), and a conditional update (If feeding a
1759   // StoreCM). These nodes are surrounded by a series of nested Ifs
1760   // which try to avoid doing the card mark. The top level If skips if
1761   // the object reference does not cross regions (i.e. it tests if
1762   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1763   // need not be recorded. The next If, which skips on a NULL value,
1764   // may be absent (it is not generated if the type of value is >=
1765   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1766   // checking if card_val != young).  n.b. although this test requires
1767   // a pre-read of the card it can safely be done before the StoreLoad
1768   // barrier. However that does not bypass the need to reread the card
1769   // after the barrier.
1770   //
1771   //                (pre-write subtree elided)
1772   //        . . .                  . . .    . . .  . . .
1773   //        C |                    M |     M |    M |
1774   //       Region                  Phi[M] StoreN    |
1775   //          |                     / \      |      |
1776   //         / \_______            /   \     |      |
1777   //      C / C \      . . .            \    |      |
1778   //       If   CastP2X . . .            |   |      |
1779   //       / \                           |   |      |
1780   //      /   \                          |   |      |
1781   // IfFalse IfTrue                      |   |      |
1782   //   |       |                         |   |     /|
1783   //   |       If                        |   |    / |
1784   //   |      / \                        |   |   /  |
1785   //   |     /   \                        \  |  /   |
1786   //   | IfFalse IfTrue                   MergeMem  |
1787   //   |  . . .    / \                       /      |
1788   //   |          /   \                     /       |
1789   //   |     IfFalse IfTrue                /        |
1790   //   |      . . .    |                  /         |
1791   //   |               If                /          |
1792   //   |               / \              /           |
1793   //   |              /   \            /            |
1794   //   |         IfFalse IfTrue       /             |
1795   //   |           . . .   |         /              |
1796   //   |                    \       /               |
1797   //   |                     \     /                |
1798   //   |             MemBarVolatile__(card mark)    |
1799   //   |                ||   C |  M \  M \          |
1800   //   |               LoadB   If    |    |         |
1801   //   |                      / \    |    |         |
1802   //   |                     . . .   |    |         |
1803   //   |                          \  |    |        /
1804   //   |                        StoreCM   |       /
1805   //   |                          . . .   |      /
1806   //   |                        _________/      /
1807   //   |                       /  _____________/
1808   //   |   . . .       . . .  |  /            /
1809   //   |    |                 | /   _________/
1810   //   |    |               Phi[M] /        /
1811   //   |    |                 |   /        /
1812   //   |    |                 |  /        /
1813   //   |  Region  . . .     Phi[M]  _____/
1814   //   |    /                 |    /
1815   //   |                      |   /
1816   //   | . . .   . . .        |  /
1817   //   | /                    | /
1818   // Region           |  |  Phi[M]
1819   //   |              |  |  / Bot
1820   //    \            MergeMem
1821   //     \            /
1822   //     MemBarVolatile
1823   //
1824   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1825   // from the leading membar and the oopptr Mem slice from the Store
1826   // into the card mark membar i.e. the memory flow to the card mark
1827   // membar still looks like a normal graph.
1828   //
1829   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1830   // Mem slices (from the StoreCM and other card mark queue stores).
1831   // However in this case the AliasIdxBot Mem slice does not come
1832   // direct from the card mark membar. It is merged through a series
1833   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1834   // from the leading membar with the Mem feed from the card mark
1835   // membar. Each Phi corresponds to one of the Ifs which may skip
1836   // around the card mark membar. So when the If implementing the NULL
1837   // value check has been elided the total number of Phis is 2
1838   // otherwise it is 3.
1839   //
1840   // The CAS graph when using G1GC also includes a pre-write subgraph
1841   // and an optional post-write subgraph. Teh sam evarioations are
1842   // introduced as for CMS with conditional card marking i.e. the
1843   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1844   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1845   // Mem feed from the CompareAndSwapP/N includes a precedence
1846   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1847   // trailing membar. So, as before the configuration includes the
1848   // normal CAS graph as a subgraph of the memory flow.
1849   //
1850   // So, the upshot is that in all cases the volatile put graph will
1851   // include a *normal* memory subgraph betwen the leading membar and
1852   // its child membar, either a volatile put graph (including a
1853   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1854   // When that child is not a card mark membar then it marks the end
1855   // of the volatile put or CAS subgraph. If the child is a card mark
1856   // membar then the normal subgraph will form part of a volatile put
1857   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1858   // to a trailing barrier via a MergeMem. That feed is either direct
1859   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1860   // memory flow (for G1).
1861   //
1862   // The predicates controlling generation of instructions for store
1863   // and barrier nodes employ a few simple helper functions (described
1864   // below) which identify the presence or absence of all these
1865   // subgraph configurations and provide a means of traversing from
1866   // one node in the subgraph to another.
1867 
1868   // is_CAS(int opcode)
1869   //
1870   // return true if opcode is one of the possible CompareAndSwapX
1871   // values otherwise false.
1872 
1873   bool is_CAS(int opcode)
1874   {
1875     return (opcode == Op_CompareAndSwapI ||
1876             opcode == Op_CompareAndSwapL ||
1877             opcode == Op_CompareAndSwapN ||
1878             opcode == Op_CompareAndSwapP);
1879   }
1880 
1881   // leading_to_normal
1882   //
1883   //graph traversal helper which detects the normal case Mem feed from
1884   // a release membar (or, optionally, its cpuorder child) to a
1885   // dependent volatile membar i.e. it ensures that one or other of
1886   // the following Mem flow subgraph is present.
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //          |  \      . . .
1891   //          |  StoreN/P[mo_release]  . . .
1892   //          |   /
1893   //         MergeMem
1894   //          |
1895   //   MemBarVolatile {trailing or card mark}
1896   //
1897   //   MemBarRelease
1898   //   MemBarCPUOrder {leading}
1899   //      |       \      . . .
1900   //      |     CompareAndSwapX  . . .
1901   //               |
1902   //     . . .    SCMemProj
1903   //           \   |
1904   //      |    MergeMem
1905   //      |       /
1906   //    MemBarCPUOrder
1907   //    MemBarAcquire {trailing}
1908   //
1909   // if the correct configuration is present returns the trailing
1910   // membar otherwise NULL.
1911   //
1912   // the input membar is expected to be either a cpuorder membar or a
1913   // release membar. in the latter case it should not have a cpu membar
1914   // child.
1915   //
1916   // the returned value may be a card mark or trailing membar
1917   //
1918 
1919   MemBarNode *leading_to_normal(MemBarNode *leading)
1920   {
1921     assert((leading->Opcode() == Op_MemBarRelease ||
1922             leading->Opcode() == Op_MemBarCPUOrder),
1923            "expecting a volatile or cpuroder membar!");
1924 
1925     // check the mem flow
1926     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1927 
1928     if (!mem) {
1929       return NULL;
1930     }
1931 
1932     Node *x = NULL;
1933     StoreNode * st = NULL;
1934     LoadStoreNode *cas = NULL;
1935     MergeMemNode *mm = NULL;
1936 
1937     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1938       x = mem->fast_out(i);
1939       if (x->is_MergeMem()) {
1940         if (mm != NULL) {
1941           return NULL;
1942         }
1943         // two merge mems is one too many
1944         mm = x->as_MergeMem();
1945       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1946         // two releasing stores/CAS nodes is one too many
1947         if (st != NULL || cas != NULL) {
1948           return NULL;
1949         }
1950         st = x->as_Store();
1951       } else if (is_CAS(x->Opcode())) {
1952         if (st != NULL || cas != NULL) {
1953           return NULL;
1954         }
1955         cas = x->as_LoadStore();
1956       }
1957     }
1958 
1959     // must have a store or a cas
1960     if (!st && !cas) {
1961       return NULL;
1962     }
1963 
1964     // must have a merge if we also have st
1965     if (st && !mm) {
1966       return NULL;
1967     }
1968 
1969     Node *y = NULL;
1970     if (cas) {
1971       // look for an SCMemProj
1972       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1973         x = cas->fast_out(i);
1974         if (x->is_Proj()) {
1975           y = x;
1976           break;
1977         }
1978       }
1979       if (y == NULL) {
1980         return NULL;
1981       }
1982       // the proj must feed a MergeMem
1983       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1984         x = y->fast_out(i);
1985         if (x->is_MergeMem()) {
1986           mm = x->as_MergeMem();
1987           break;
1988         }
1989       }
1990       if (mm == NULL)
1991         return NULL;
1992     } else {
1993       // ensure the store feeds the existing mergemem;
1994       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1995         if (st->fast_out(i) == mm) {
1996           y = st;
1997           break;
1998         }
1999       }
2000       if (y == NULL) {
2001         return NULL;
2002       }
2003     }
2004 
2005     MemBarNode *mbar = NULL;
2006     // ensure the merge feeds to the expected type of membar
2007     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2008       x = mm->fast_out(i);
2009       if (x->is_MemBar()) {
2010         int opcode = x->Opcode();
2011         if (opcode == Op_MemBarVolatile && st) {
2012           mbar = x->as_MemBar();
2013         } else if (cas && opcode == Op_MemBarCPUOrder) {
2014           MemBarNode *y =  x->as_MemBar();
2015           y = child_membar(y);
2016           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2017             mbar = y;
2018           }
2019         }
2020         break;
2021       }
2022     }
2023 
2024     return mbar;
2025   }
2026 
2027   // normal_to_leading
2028   //
2029   // graph traversal helper which detects the normal case Mem feed
2030   // from either a card mark or a trailing membar to a preceding
2031   // release membar (optionally its cpuorder child) i.e. it ensures
2032   // that one or other of the following Mem flow subgraphs is present.
2033   //
2034   //   MemBarRelease
2035   //   MemBarCPUOrder {leading}
2036   //          |  \      . . .
2037   //          |  StoreN/P[mo_release]  . . .
2038   //          |   /
2039   //         MergeMem
2040   //          |
2041   //   MemBarVolatile {card mark or trailing}
2042   //
2043   //   MemBarRelease
2044   //   MemBarCPUOrder {leading}
2045   //      |       \      . . .
2046   //      |     CompareAndSwapX  . . .
2047   //               |
2048   //     . . .    SCMemProj
2049   //           \   |
2050   //      |    MergeMem
2051   //      |        /
2052   //    MemBarCPUOrder
2053   //    MemBarAcquire {trailing}
2054   //
2055   // this predicate checks for the same flow as the previous predicate
2056   // but starting from the bottom rather than the top.
2057   //
2058   // if the configuration is present returns the cpuorder member for
2059   // preference or when absent the release membar otherwise NULL.
2060   //
2061   // n.b. the input membar is expected to be a MemBarVolatile but
2062   // need not be a card mark membar.
2063 
2064   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2065   {
2066     // input must be a volatile membar
2067     assert((barrier->Opcode() == Op_MemBarVolatile ||
2068             barrier->Opcode() == Op_MemBarAcquire),
2069            "expecting a volatile or an acquire membar");
2070     Node *x;
2071     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2072 
2073     // if we have an acquire membar then it must be fed via a CPUOrder
2074     // membar
2075 
2076     if (is_cas) {
2077       // skip to parent barrier which must be a cpuorder
2078       x = parent_membar(barrier);
2079       if (x->Opcode() != Op_MemBarCPUOrder)
2080         return NULL;
2081     } else {
2082       // start from the supplied barrier
2083       x = (Node *)barrier;
2084     }
2085 
2086     // the Mem feed to the membar should be a merge
2087     x = x ->in(TypeFunc::Memory);
2088     if (!x->is_MergeMem())
2089       return NULL;
2090 
2091     MergeMemNode *mm = x->as_MergeMem();
2092 
2093     if (is_cas) {
2094       // the merge should be fed from the CAS via an SCMemProj node
2095       x = NULL;
2096       for (uint idx = 1; idx < mm->req(); idx++) {
2097         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2098           x = mm->in(idx);
2099           break;
2100         }
2101       }
2102       if (x == NULL) {
2103         return NULL;
2104       }
2105       // check for a CAS feeding this proj
2106       x = x->in(0);
2107       int opcode = x->Opcode();
2108       if (!is_CAS(opcode)) {
2109         return NULL;
2110       }
2111       // the CAS should get its mem feed from the leading membar
2112       x = x->in(MemNode::Memory);
2113     } else {
2114       // the merge should get its Bottom mem feed from the leading membar
2115       x = mm->in(Compile::AliasIdxBot);
2116     }
2117 
2118     // ensure this is a non control projection
2119     if (!x->is_Proj() || x->is_CFG()) {
2120       return NULL;
2121     }
2122     // if it is fed by a membar that's the one we want
2123     x = x->in(0);
2124 
2125     if (!x->is_MemBar()) {
2126       return NULL;
2127     }
2128 
2129     MemBarNode *leading = x->as_MemBar();
2130     // reject invalid candidates
2131     if (!leading_membar(leading)) {
2132       return NULL;
2133     }
2134 
2135     // ok, we have a leading membar, now for the sanity clauses
2136 
2137     // the leading membar must feed Mem to a releasing store or CAS
2138     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2139     StoreNode *st = NULL;
2140     LoadStoreNode *cas = NULL;
2141     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2142       x = mem->fast_out(i);
2143       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2144         // two stores or CASes is one too many
2145         if (st != NULL || cas != NULL) {
2146           return NULL;
2147         }
2148         st = x->as_Store();
2149       } else if (is_CAS(x->Opcode())) {
2150         if (st != NULL || cas != NULL) {
2151           return NULL;
2152         }
2153         cas = x->as_LoadStore();
2154       }
2155     }
2156 
2157     // we should not have both a store and a cas
2158     if (st == NULL & cas == NULL) {
2159       return NULL;
2160     }
2161 
2162     if (st == NULL) {
2163       // nothing more to check
2164       return leading;
2165     } else {
2166       // we should not have a store if we started from an acquire
2167       if (is_cas) {
2168         return NULL;
2169       }
2170 
2171       // the store should feed the merge we used to get here
2172       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2173         if (st->fast_out(i) == mm) {
2174           return leading;
2175         }
2176       }
2177     }
2178 
2179     return NULL;
2180   }
2181 
2182   // card_mark_to_trailing
2183   //
2184   // graph traversal helper which detects extra, non-normal Mem feed
2185   // from a card mark volatile membar to a trailing membar i.e. it
2186   // ensures that one of the following three GC post-write Mem flow
2187   // subgraphs is present.
2188   //
2189   // 1)
2190   //     . . .
2191   //       |
2192   //   MemBarVolatile (card mark)
2193   //      |          |
2194   //      |        StoreCM
2195   //      |          |
2196   //      |        . . .
2197   //  Bot |  /
2198   //   MergeMem
2199   //      |
2200   //      |
2201   //    MemBarVolatile {trailing}
2202   //
2203   // 2)
2204   //   MemBarRelease/CPUOrder (leading)
2205   //    |
2206   //    |
2207   //    |\       . . .
2208   //    | \        |
2209   //    |  \  MemBarVolatile (card mark)
2210   //    |   \   |     |
2211   //     \   \  |   StoreCM    . . .
2212   //      \   \ |
2213   //       \  Phi
2214   //        \ /
2215   //        Phi  . . .
2216   //     Bot |   /
2217   //       MergeMem
2218   //         |
2219   //    MemBarVolatile {trailing}
2220   //
2221   //
2222   // 3)
2223   //   MemBarRelease/CPUOrder (leading)
2224   //    |
2225   //    |\
2226   //    | \
2227   //    |  \      . . .
2228   //    |   \       |
2229   //    |\   \  MemBarVolatile (card mark)
2230   //    | \   \   |     |
2231   //    |  \   \  |   StoreCM    . . .
2232   //    |   \   \ |
2233   //     \   \  Phi
2234   //      \   \ /
2235   //       \  Phi
2236   //        \ /
2237   //        Phi  . . .
2238   //     Bot |   /
2239   //       MergeMem
2240   //         |
2241   //         |
2242   //    MemBarVolatile {trailing}
2243   //
2244   // configuration 1 is only valid if UseConcMarkSweepGC &&
2245   // UseCondCardMark
2246   //
2247   // configurations 2 and 3 are only valid if UseG1GC.
2248   //
2249   // if a valid configuration is present returns the trailing membar
2250   // otherwise NULL.
2251   //
2252   // n.b. the supplied membar is expected to be a card mark
2253   // MemBarVolatile i.e. the caller must ensure the input node has the
2254   // correct operand and feeds Mem to a StoreCM node
2255 
2256   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2257   {
2258     // input must be a card mark volatile membar
2259     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2260 
2261     Node *feed = barrier->proj_out(TypeFunc::Memory);
2262     Node *x;
2263     MergeMemNode *mm = NULL;
2264 
2265     const int MAX_PHIS = 3;     // max phis we will search through
2266     int phicount = 0;           // current search count
2267 
2268     bool retry_feed = true;
2269     while (retry_feed) {
2270       // see if we have a direct MergeMem feed
2271       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2272         x = feed->fast_out(i);
2273         // the correct Phi will be merging a Bot memory slice
2274         if (x->is_MergeMem()) {
2275           mm = x->as_MergeMem();
2276           break;
2277         }
2278       }
2279       if (mm) {
2280         retry_feed = false;
2281       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2282         // the barrier may feed indirectly via one or two Phi nodes
2283         PhiNode *phi = NULL;
2284         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2285           x = feed->fast_out(i);
2286           // the correct Phi will be merging a Bot memory slice
2287           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2288             phi = x->as_Phi();
2289             break;
2290           }
2291         }
2292         if (!phi) {
2293           return NULL;
2294         }
2295         // look for another merge below this phi
2296         feed = phi;
2297       } else {
2298         // couldn't find a merge
2299         return NULL;
2300       }
2301     }
2302 
2303     // sanity check this feed turns up as the expected slice
2304     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2305 
2306     MemBarNode *trailing = NULL;
2307     // be sure we have a trailing membar the merge
2308     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2309       x = mm->fast_out(i);
2310       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2311         trailing = x->as_MemBar();
2312         break;
2313       }
2314     }
2315 
2316     return trailing;
2317   }
2318 
2319   // trailing_to_card_mark
2320   //
2321   // graph traversal helper which detects extra, non-normal Mem feed
2322   // from a trailing volatile membar to a preceding card mark volatile
2323   // membar i.e. it identifies whether one of the three possible extra
2324   // GC post-write Mem flow subgraphs is present
2325   //
2326   // this predicate checks for the same flow as the previous predicate
2327   // but starting from the bottom rather than the top.
2328   //
2329   // if the configuration is present returns the card mark membar
2330   // otherwise NULL
2331   //
2332   // n.b. the supplied membar is expected to be a trailing
2333   // MemBarVolatile i.e. the caller must ensure the input node has the
2334   // correct opcode
2335 
2336   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2337   {
2338     assert(trailing->Opcode() == Op_MemBarVolatile,
2339            "expecting a volatile membar");
2340     assert(!is_card_mark_membar(trailing),
2341            "not expecting a card mark membar");
2342 
2343     // the Mem feed to the membar should be a merge
2344     Node *x = trailing->in(TypeFunc::Memory);
2345     if (!x->is_MergeMem()) {
2346       return NULL;
2347     }
2348 
2349     MergeMemNode *mm = x->as_MergeMem();
2350 
2351     x = mm->in(Compile::AliasIdxBot);
2352     // with G1 we may possibly see a Phi or two before we see a Memory
2353     // Proj from the card mark membar
2354 
2355     const int MAX_PHIS = 3;     // max phis we will search through
2356     int phicount = 0;           // current search count
2357 
2358     bool retry_feed = !x->is_Proj();
2359 
2360     while (retry_feed) {
2361       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2362         PhiNode *phi = x->as_Phi();
2363         ProjNode *proj = NULL;
2364         PhiNode *nextphi = NULL;
2365         bool found_leading = false;
2366         for (uint i = 1; i < phi->req(); i++) {
2367           x = phi->in(i);
2368           if (x->is_Phi()) {
2369             nextphi = x->as_Phi();
2370           } else if (x->is_Proj()) {
2371             int opcode = x->in(0)->Opcode();
2372             if (opcode == Op_MemBarVolatile) {
2373               proj = x->as_Proj();
2374             } else if (opcode == Op_MemBarRelease ||
2375                        opcode == Op_MemBarCPUOrder) {
2376               // probably a leading membar
2377               found_leading = true;
2378             }
2379           }
2380         }
2381         // if we found a correct looking proj then retry from there
2382         // otherwise we must see a leading and a phi or this the
2383         // wrong config
2384         if (proj != NULL) {
2385           x = proj;
2386           retry_feed = false;
2387         } else if (found_leading && nextphi != NULL) {
2388           // retry from this phi to check phi2
2389           x = nextphi;
2390         } else {
2391           // not what we were looking for
2392           return NULL;
2393         }
2394       } else {
2395         return NULL;
2396       }
2397     }
2398     // the proj has to come from the card mark membar
2399     x = x->in(0);
2400     if (!x->is_MemBar()) {
2401       return NULL;
2402     }
2403 
2404     MemBarNode *card_mark_membar = x->as_MemBar();
2405 
2406     if (!is_card_mark_membar(card_mark_membar)) {
2407       return NULL;
2408     }
2409 
2410     return card_mark_membar;
2411   }
2412 
2413   // trailing_to_leading
2414   //
2415   // graph traversal helper which checks the Mem flow up the graph
2416   // from a (non-card mark) trailing membar attempting to locate and
2417   // return an associated leading membar. it first looks for a
2418   // subgraph in the normal configuration (relying on helper
2419   // normal_to_leading). failing that it then looks for one of the
2420   // possible post-write card mark subgraphs linking the trailing node
2421   // to a the card mark membar (relying on helper
2422   // trailing_to_card_mark), and then checks that the card mark membar
2423   // is fed by a leading membar (once again relying on auxiliary
2424   // predicate normal_to_leading).
2425   //
2426   // if the configuration is valid returns the cpuorder member for
2427   // preference or when absent the release membar otherwise NULL.
2428   //
2429   // n.b. the input membar is expected to be either a volatile or
2430   // acquire membar but in the former case must *not* be a card mark
2431   // membar.
2432 
2433   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2434   {
2435     assert((trailing->Opcode() == Op_MemBarAcquire ||
2436             trailing->Opcode() == Op_MemBarVolatile),
2437            "expecting an acquire or volatile membar");
2438     assert((trailing->Opcode() != Op_MemBarVolatile ||
2439             !is_card_mark_membar(trailing)),
2440            "not expecting a card mark membar");
2441 
2442     MemBarNode *leading = normal_to_leading(trailing);
2443 
2444     if (leading) {
2445       return leading;
2446     }
2447 
2448     // nothing more to do if this is an acquire
2449     if (trailing->Opcode() == Op_MemBarAcquire) {
2450       return NULL;
2451     }
2452 
2453     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2454 
2455     if (!card_mark_membar) {
2456       return NULL;
2457     }
2458 
2459     return normal_to_leading(card_mark_membar);
2460   }
2461 
2462   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2463 
2464 bool unnecessary_acquire(const Node *barrier)
2465 {
2466   assert(barrier->is_MemBar(), "expecting a membar");
2467 
2468   if (UseBarriersForVolatile) {
2469     // we need to plant a dmb
2470     return false;
2471   }
2472 
2473   // a volatile read derived from bytecode (or also from an inlined
2474   // SHA field read via LibraryCallKit::load_field_from_object)
2475   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2476   // with a bogus read dependency on it's preceding load. so in those
2477   // cases we will find the load node at the PARMS offset of the
2478   // acquire membar.  n.b. there may be an intervening DecodeN node.
2479   //
2480   // a volatile load derived from an inlined unsafe field access
2481   // manifests as a cpuorder membar with Ctl and Mem projections
2482   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2483   // acquire then feeds another cpuorder membar via Ctl and Mem
2484   // projections. The load has no output dependency on these trailing
2485   // membars because subsequent nodes inserted into the graph take
2486   // their control feed from the final membar cpuorder meaning they
2487   // are all ordered after the load.
2488 
2489   Node *x = barrier->lookup(TypeFunc::Parms);
2490   if (x) {
2491     // we are starting from an acquire and it has a fake dependency
2492     //
2493     // need to check for
2494     //
2495     //   LoadX[mo_acquire]
2496     //   {  |1   }
2497     //   {DecodeN}
2498     //      |Parms
2499     //   MemBarAcquire*
2500     //
2501     // where * tags node we were passed
2502     // and |k means input k
2503     if (x->is_DecodeNarrowPtr()) {
2504       x = x->in(1);
2505     }
2506 
2507     return (x->is_Load() && x->as_Load()->is_acquire());
2508   }
2509 
2510   // now check for an unsafe volatile get
2511 
2512   // need to check for
2513   //
2514   //   MemBarCPUOrder
2515   //        ||       \\
2516   //   MemBarAcquire* LoadX[mo_acquire]
2517   //        ||
2518   //   MemBarCPUOrder
2519   //
2520   // where * tags node we were passed
2521   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2522 
2523   // check for a parent MemBarCPUOrder
2524   ProjNode *ctl;
2525   ProjNode *mem;
2526   MemBarNode *parent = parent_membar(barrier);
2527   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2528     return false;
2529   ctl = parent->proj_out(TypeFunc::Control);
2530   mem = parent->proj_out(TypeFunc::Memory);
2531   if (!ctl || !mem) {
2532     return false;
2533   }
2534   // ensure the proj nodes both feed a LoadX[mo_acquire]
2535   LoadNode *ld = NULL;
2536   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2537     x = ctl->fast_out(i);
2538     // if we see a load we keep hold of it and stop searching
2539     if (x->is_Load()) {
2540       ld = x->as_Load();
2541       break;
2542     }
2543   }
2544   // it must be an acquiring load
2545   if (ld && ld->is_acquire()) {
2546 
2547     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2548       x = mem->fast_out(i);
2549       // if we see the same load we drop it and stop searching
2550       if (x == ld) {
2551         ld = NULL;
2552         break;
2553       }
2554     }
2555     // we must have dropped the load
2556     if (ld == NULL) {
2557       // check for a child cpuorder membar
2558       MemBarNode *child  = child_membar(barrier->as_MemBar());
2559       if (child && child->Opcode() == Op_MemBarCPUOrder)
2560         return true;
2561     }
2562   }
2563 
2564   // final option for unnecessary mebar is that it is a trailing node
2565   // belonging to a CAS
2566 
2567   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2568 
2569   return leading != NULL;
2570 }
2571 
2572 bool needs_acquiring_load(const Node *n)
2573 {
2574   assert(n->is_Load(), "expecting a load");
2575   if (UseBarriersForVolatile) {
2576     // we use a normal load and a dmb
2577     return false;
2578   }
2579 
2580   LoadNode *ld = n->as_Load();
2581 
2582   if (!ld->is_acquire()) {
2583     return false;
2584   }
2585 
2586   // check if this load is feeding an acquire membar
2587   //
2588   //   LoadX[mo_acquire]
2589   //   {  |1   }
2590   //   {DecodeN}
2591   //      |Parms
2592   //   MemBarAcquire*
2593   //
2594   // where * tags node we were passed
2595   // and |k means input k
2596 
2597   Node *start = ld;
2598   Node *mbacq = NULL;
2599 
2600   // if we hit a DecodeNarrowPtr we reset the start node and restart
2601   // the search through the outputs
2602  restart:
2603 
2604   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2605     Node *x = start->fast_out(i);
2606     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2607       mbacq = x;
2608     } else if (!mbacq &&
2609                (x->is_DecodeNarrowPtr() ||
2610                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2611       start = x;
2612       goto restart;
2613     }
2614   }
2615 
2616   if (mbacq) {
2617     return true;
2618   }
2619 
2620   // now check for an unsafe volatile get
2621 
2622   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2623   //
2624   //     MemBarCPUOrder
2625   //        ||       \\
2626   //   MemBarAcquire* LoadX[mo_acquire]
2627   //        ||
2628   //   MemBarCPUOrder
2629 
2630   MemBarNode *membar;
2631 
2632   membar = parent_membar(ld);
2633 
2634   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2635     return false;
2636   }
2637 
2638   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2639 
2640   membar = child_membar(membar);
2641 
2642   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2643     return false;
2644   }
2645 
2646   membar = child_membar(membar);
2647 
2648   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2649     return false;
2650   }
2651 
2652   return true;
2653 }
2654 
2655 bool unnecessary_release(const Node *n)
2656 {
2657   assert((n->is_MemBar() &&
2658           n->Opcode() == Op_MemBarRelease),
2659          "expecting a release membar");
2660 
2661   if (UseBarriersForVolatile) {
2662     // we need to plant a dmb
2663     return false;
2664   }
2665 
2666   // if there is a dependent CPUOrder barrier then use that as the
2667   // leading
2668 
2669   MemBarNode *barrier = n->as_MemBar();
2670   // check for an intervening cpuorder membar
2671   MemBarNode *b = child_membar(barrier);
2672   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2673     // ok, so start the check from the dependent cpuorder barrier
2674     barrier = b;
2675   }
2676 
2677   // must start with a normal feed
2678   MemBarNode *child_barrier = leading_to_normal(barrier);
2679 
2680   if (!child_barrier) {
2681     return false;
2682   }
2683 
2684   if (!is_card_mark_membar(child_barrier)) {
2685     // this is the trailing membar and we are done
2686     return true;
2687   }
2688 
2689   // must be sure this card mark feeds a trailing membar
2690   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2691   return (trailing != NULL);
2692 }
2693 
2694 bool unnecessary_volatile(const Node *n)
2695 {
2696   // assert n->is_MemBar();
2697   if (UseBarriersForVolatile) {
2698     // we need to plant a dmb
2699     return false;
2700   }
2701 
2702   MemBarNode *mbvol = n->as_MemBar();
2703 
2704   // first we check if this is part of a card mark. if so then we have
2705   // to generate a StoreLoad barrier
2706 
2707   if (is_card_mark_membar(mbvol)) {
2708       return false;
2709   }
2710 
2711   // ok, if it's not a card mark then we still need to check if it is
2712   // a trailing membar of a volatile put hgraph.
2713 
2714   return (trailing_to_leading(mbvol) != NULL);
2715 }
2716 
2717 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2718 
2719 bool needs_releasing_store(const Node *n)
2720 {
2721   // assert n->is_Store();
2722   if (UseBarriersForVolatile) {
2723     // we use a normal store and dmb combination
2724     return false;
2725   }
2726 
2727   StoreNode *st = n->as_Store();
2728 
2729   // the store must be marked as releasing
2730   if (!st->is_release()) {
2731     return false;
2732   }
2733 
2734   // the store must be fed by a membar
2735 
2736   Node *x = st->lookup(StoreNode::Memory);
2737 
2738   if (! x || !x->is_Proj()) {
2739     return false;
2740   }
2741 
2742   ProjNode *proj = x->as_Proj();
2743 
2744   x = proj->lookup(0);
2745 
2746   if (!x || !x->is_MemBar()) {
2747     return false;
2748   }
2749 
2750   MemBarNode *barrier = x->as_MemBar();
2751 
2752   // if the barrier is a release membar or a cpuorder mmebar fed by a
2753   // release membar then we need to check whether that forms part of a
2754   // volatile put graph.
2755 
2756   // reject invalid candidates
2757   if (!leading_membar(barrier)) {
2758     return false;
2759   }
2760 
2761   // does this lead a normal subgraph?
2762   MemBarNode *mbvol = leading_to_normal(barrier);
2763 
2764   if (!mbvol) {
2765     return false;
2766   }
2767 
2768   // all done unless this is a card mark
2769   if (!is_card_mark_membar(mbvol)) {
2770     return true;
2771   }
2772 
2773   // we found a card mark -- just make sure we have a trailing barrier
2774 
2775   return (card_mark_to_trailing(mbvol) != NULL);
2776 }
2777 
2778 // predicate controlling translation of CAS
2779 //
2780 // returns true if CAS needs to use an acquiring load otherwise false
2781 
2782 bool needs_acquiring_load_exclusive(const Node *n)
2783 {
2784   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2785   if (UseBarriersForVolatile) {
2786     return false;
2787   }
2788 
2789   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2790 #ifdef ASSERT
2791   LoadStoreNode *st = n->as_LoadStore();
2792 
2793   // the store must be fed by a membar
2794 
2795   Node *x = st->lookup(StoreNode::Memory);
2796 
2797   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2798 
2799   ProjNode *proj = x->as_Proj();
2800 
2801   x = proj->lookup(0);
2802 
2803   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2804 
2805   MemBarNode *barrier = x->as_MemBar();
2806 
2807   // the barrier must be a cpuorder mmebar fed by a release membar
2808 
2809   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2810          "CAS not fed by cpuorder membar!");
2811 
2812   MemBarNode *b = parent_membar(barrier);
2813   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2814           "CAS not fed by cpuorder+release membar pair!");
2815 
2816   // does this lead a normal subgraph?
2817   MemBarNode *mbar = leading_to_normal(barrier);
2818 
2819   assert(mbar != NULL, "CAS not embedded in normal graph!");
2820 
2821   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2822 #endif // ASSERT
2823   // so we can just return true here
2824   return true;
2825 }
2826 
2827 // predicate controlling translation of StoreCM
2828 //
2829 // returns true if a StoreStore must precede the card write otherwise
2830 // false
2831 
2832 bool unnecessary_storestore(const Node *storecm)
2833 {
2834   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2835 
2836   // we only ever need to generate a dmb ishst between an object put
2837   // and the associated card mark when we are using CMS without
2838   // conditional card marking
2839 
2840   if (!UseConcMarkSweepGC || UseCondCardMark) {
2841     return true;
2842   }
2843 
2844   // if we are implementing volatile puts using barriers then the
2845   // object put as an str so we must insert the dmb ishst
2846 
2847   if (UseBarriersForVolatile) {
2848     return false;
2849   }
2850 
2851   // we can omit the dmb ishst if this StoreCM is part of a volatile
2852   // put because in thta case the put will be implemented by stlr
2853   //
2854   // we need to check for a normal subgraph feeding this StoreCM.
2855   // that means the StoreCM must be fed Memory from a leading membar,
2856   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2857   // leading membar must be part of a normal subgraph
2858 
2859   Node *x = storecm->in(StoreNode::Memory);
2860 
2861   if (!x->is_Proj()) {
2862     return false;
2863   }
2864 
2865   x = x->in(0);
2866 
2867   if (!x->is_MemBar()) {
2868     return false;
2869   }
2870 
2871   MemBarNode *leading = x->as_MemBar();
2872 
2873   // reject invalid candidates
2874   if (!leading_membar(leading)) {
2875     return false;
2876   }
2877 
2878   // we can omit the StoreStore if it is the head of a normal subgraph
2879   return (leading_to_normal(leading) != NULL);
2880 }
2881 
2882 
2883 #define __ _masm.
2884 
2885 // advance declarations for helper functions to convert register
2886 // indices to register objects
2887 
2888 // the ad file has to provide implementations of certain methods
2889 // expected by the generic code
2890 //
2891 // REQUIRED FUNCTIONALITY
2892 
2893 //=============================================================================
2894 
2895 // !!!!! Special hack to get all types of calls to specify the byte offset
2896 //       from the start of the call to the point where the return address
2897 //       will point.
2898 
2899 int MachCallStaticJavaNode::ret_addr_offset()
2900 {
2901   // call should be a simple bl
2902   int off = 4;
2903   return off;
2904 }
2905 
2906 int MachCallDynamicJavaNode::ret_addr_offset()
2907 {
2908   return 16; // movz, movk, movk, bl
2909 }
2910 
2911 int MachCallRuntimeNode::ret_addr_offset() {
2912   // for generated stubs the call will be
2913   //   far_call(addr)
2914   // for real runtime callouts it will be six instructions
2915   // see aarch64_enc_java_to_runtime
2916   //   adr(rscratch2, retaddr)
2917   //   lea(rscratch1, RuntimeAddress(addr)
2918   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2919   //   blrt rscratch1
2920   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2921   if (cb) {
2922     return MacroAssembler::far_branch_size();
2923   } else {
2924     return 6 * NativeInstruction::instruction_size;
2925   }
2926 }
2927 
2928 // Indicate if the safepoint node needs the polling page as an input
2929 
2930 // the shared code plants the oop data at the start of the generated
2931 // code for the safepoint node and that needs ot be at the load
2932 // instruction itself. so we cannot plant a mov of the safepoint poll
2933 // address followed by a load. setting this to true means the mov is
2934 // scheduled as a prior instruction. that's better for scheduling
2935 // anyway.
2936 
2937 bool SafePointNode::needs_polling_address_input()
2938 {
2939   return true;
2940 }
2941 
2942 //=============================================================================
2943 
2944 #ifndef PRODUCT
2945 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2946   st->print("BREAKPOINT");
2947 }
2948 #endif
2949 
2950 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2951   MacroAssembler _masm(&cbuf);
2952   __ brk(0);
2953 }
2954 
2955 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2956   return MachNode::size(ra_);
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2963     st->print("nop \t# %d bytes pad for loops and calls", _count);
2964   }
2965 #endif
2966 
2967   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2968     MacroAssembler _masm(&cbuf);
2969     for (int i = 0; i < _count; i++) {
2970       __ nop();
2971     }
2972   }
2973 
2974   uint MachNopNode::size(PhaseRegAlloc*) const {
2975     return _count * NativeInstruction::instruction_size;
2976   }
2977 
2978 //=============================================================================
2979 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2980 
2981 int Compile::ConstantTable::calculate_table_base_offset() const {
2982   return 0;  // absolute addressing, no offset
2983 }
2984 
2985 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2986 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2987   ShouldNotReachHere();
2988 }
2989 
2990 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2991   // Empty encoding
2992 }
2993 
2994 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2995   return 0;
2996 }
2997 
2998 #ifndef PRODUCT
2999 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3000   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3001 }
3002 #endif
3003 
3004 #ifndef PRODUCT
3005 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3006   Compile* C = ra_->C;
3007 
3008   int framesize = C->frame_slots() << LogBytesPerInt;
3009 
3010   if (C->need_stack_bang(framesize))
3011     st->print("# stack bang size=%d\n\t", framesize);
3012 
3013   if (framesize < ((1 << 9) + 2 * wordSize)) {
3014     st->print("sub  sp, sp, #%d\n\t", framesize);
3015     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3016     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3017   } else {
3018     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3019     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3020     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3021     st->print("sub  sp, sp, rscratch1");
3022   }
3023 }
3024 #endif
3025 
3026 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3027   Compile* C = ra_->C;
3028   MacroAssembler _masm(&cbuf);
3029 
3030   // n.b. frame size includes space for return pc and rfp
3031   const long framesize = C->frame_size_in_bytes();
3032   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3033 
3034   // insert a nop at the start of the prolog so we can patch in a
3035   // branch if we need to invalidate the method later
3036   __ nop();
3037 
3038   int bangsize = C->bang_size_in_bytes();
3039   if (C->need_stack_bang(bangsize) && UseStackBanging)
3040     __ generate_stack_overflow_check(bangsize);
3041 
3042   __ build_frame(framesize);
3043 
3044   if (NotifySimulator) {
3045     __ notify(Assembler::method_entry);
3046   }
3047 
3048   if (VerifyStackAtCalls) {
3049     Unimplemented();
3050   }
3051 
3052   C->set_frame_complete(cbuf.insts_size());
3053 
3054   if (C->has_mach_constant_base_node()) {
3055     // NOTE: We set the table base offset here because users might be
3056     // emitted before MachConstantBaseNode.
3057     Compile::ConstantTable& constant_table = C->constant_table();
3058     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3059   }
3060 }
3061 
3062 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3063 {
3064   return MachNode::size(ra_); // too many variables; just compute it
3065                               // the hard way
3066 }
3067 
3068 int MachPrologNode::reloc() const
3069 {
3070   return 0;
3071 }
3072 
3073 //=============================================================================
3074 
3075 #ifndef PRODUCT
3076 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3077   Compile* C = ra_->C;
3078   int framesize = C->frame_slots() << LogBytesPerInt;
3079 
3080   st->print("# pop frame %d\n\t",framesize);
3081 
3082   if (framesize == 0) {
3083     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3084   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3085     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3086     st->print("add  sp, sp, #%d\n\t", framesize);
3087   } else {
3088     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3089     st->print("add  sp, sp, rscratch1\n\t");
3090     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3091   }
3092 
3093   if (do_polling() && C->is_method_compilation()) {
3094     st->print("# touch polling page\n\t");
3095     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3096     st->print("ldr zr, [rscratch1]");
3097   }
3098 }
3099 #endif
3100 
3101 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3102   Compile* C = ra_->C;
3103   MacroAssembler _masm(&cbuf);
3104   int framesize = C->frame_slots() << LogBytesPerInt;
3105 
3106   __ remove_frame(framesize);
3107 
3108   if (NotifySimulator) {
3109     __ notify(Assembler::method_reentry);
3110   }
3111 
3112   if (do_polling() && C->is_method_compilation()) {
3113     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3114   }
3115 }
3116 
3117 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3118   // Variable size. Determine dynamically.
3119   return MachNode::size(ra_);
3120 }
3121 
3122 int MachEpilogNode::reloc() const {
3123   // Return number of relocatable values contained in this instruction.
3124   return 1; // 1 for polling page.
3125 }
3126 
3127 const Pipeline * MachEpilogNode::pipeline() const {
3128   return MachNode::pipeline_class();
3129 }
3130 
3131 // This method seems to be obsolete. It is declared in machnode.hpp
3132 // and defined in all *.ad files, but it is never called. Should we
3133 // get rid of it?
3134 int MachEpilogNode::safepoint_offset() const {
3135   assert(do_polling(), "no return for this epilog node");
3136   return 4;
3137 }
3138 
3139 //=============================================================================
3140 
3141 // Figure out which register class each belongs in: rc_int, rc_float or
3142 // rc_stack.
3143 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3144 
3145 static enum RC rc_class(OptoReg::Name reg) {
3146 
3147   if (reg == OptoReg::Bad) {
3148     return rc_bad;
3149   }
3150 
3151   // we have 30 int registers * 2 halves
3152   // (rscratch1 and rscratch2 are omitted)
3153 
3154   if (reg < 60) {
3155     return rc_int;
3156   }
3157 
3158   // we have 32 float register * 2 halves
3159   if (reg < 60 + 128) {
3160     return rc_float;
3161   }
3162 
3163   // Between float regs & stack is the flags regs.
3164   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3165 
3166   return rc_stack;
3167 }
3168 
3169 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3170   Compile* C = ra_->C;
3171 
3172   // Get registers to move.
3173   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3174   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3175   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3176   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3177 
3178   enum RC src_hi_rc = rc_class(src_hi);
3179   enum RC src_lo_rc = rc_class(src_lo);
3180   enum RC dst_hi_rc = rc_class(dst_hi);
3181   enum RC dst_lo_rc = rc_class(dst_lo);
3182 
3183   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3184 
3185   if (src_hi != OptoReg::Bad) {
3186     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3187            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3188            "expected aligned-adjacent pairs");
3189   }
3190 
3191   if (src_lo == dst_lo && src_hi == dst_hi) {
3192     return 0;            // Self copy, no move.
3193   }
3194 
3195   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3196               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3197   int src_offset = ra_->reg2offset(src_lo);
3198   int dst_offset = ra_->reg2offset(dst_lo);
3199 
3200   if (bottom_type()->isa_vect() != NULL) {
3201     uint ireg = ideal_reg();
3202     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3203     if (cbuf) {
3204       MacroAssembler _masm(cbuf);
3205       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3206       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3207         // stack->stack
3208         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3209         if (ireg == Op_VecD) {
3210           __ unspill(rscratch1, true, src_offset);
3211           __ spill(rscratch1, true, dst_offset);
3212         } else {
3213           __ spill_copy128(src_offset, dst_offset);
3214         }
3215       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3216         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3217                ireg == Op_VecD ? __ T8B : __ T16B,
3218                as_FloatRegister(Matcher::_regEncode[src_lo]));
3219       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3220         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3221                        ireg == Op_VecD ? __ D : __ Q,
3222                        ra_->reg2offset(dst_lo));
3223       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3224         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3225                        ireg == Op_VecD ? __ D : __ Q,
3226                        ra_->reg2offset(src_lo));
3227       } else {
3228         ShouldNotReachHere();
3229       }
3230     }
3231   } else if (cbuf) {
3232     MacroAssembler _masm(cbuf);
3233     switch (src_lo_rc) {
3234     case rc_int:
3235       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3236         if (is64) {
3237             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3238                    as_Register(Matcher::_regEncode[src_lo]));
3239         } else {
3240             MacroAssembler _masm(cbuf);
3241             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3242                     as_Register(Matcher::_regEncode[src_lo]));
3243         }
3244       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3245         if (is64) {
3246             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3247                      as_Register(Matcher::_regEncode[src_lo]));
3248         } else {
3249             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3250                      as_Register(Matcher::_regEncode[src_lo]));
3251         }
3252       } else {                    // gpr --> stack spill
3253         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3254         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3255       }
3256       break;
3257     case rc_float:
3258       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3259         if (is64) {
3260             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3261                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3262         } else {
3263             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3264                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3265         }
3266       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3267           if (cbuf) {
3268             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3270         } else {
3271             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3272                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3273         }
3274       } else {                    // fpr --> stack spill
3275         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3276         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3277                  is64 ? __ D : __ S, dst_offset);
3278       }
3279       break;
3280     case rc_stack:
3281       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3282         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3283       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3284         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3285                    is64 ? __ D : __ S, src_offset);
3286       } else {                    // stack --> stack copy
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ unspill(rscratch1, is64, src_offset);
3289         __ spill(rscratch1, is64, dst_offset);
3290       }
3291       break;
3292     default:
3293       assert(false, "bad rc_class for spill");
3294       ShouldNotReachHere();
3295     }
3296   }
3297 
3298   if (st) {
3299     st->print("spill ");
3300     if (src_lo_rc == rc_stack) {
3301       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3302     } else {
3303       st->print("%s -> ", Matcher::regName[src_lo]);
3304     }
3305     if (dst_lo_rc == rc_stack) {
3306       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3307     } else {
3308       st->print("%s", Matcher::regName[dst_lo]);
3309     }
3310     if (bottom_type()->isa_vect() != NULL) {
3311       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3312     } else {
3313       st->print("\t# spill size = %d", is64 ? 64:32);
3314     }
3315   }
3316 
3317   return 0;
3318 
3319 }
3320 
3321 #ifndef PRODUCT
3322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3323   if (!ra_)
3324     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3325   else
3326     implementation(NULL, ra_, false, st);
3327 }
3328 #endif
3329 
3330 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3331   implementation(&cbuf, ra_, false, NULL);
3332 }
3333 
3334 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3335   return MachNode::size(ra_);
3336 }
3337 
3338 //=============================================================================
3339 
3340 #ifndef PRODUCT
3341 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3342   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3343   int reg = ra_->get_reg_first(this);
3344   st->print("add %s, rsp, #%d]\t# box lock",
3345             Matcher::regName[reg], offset);
3346 }
3347 #endif
3348 
3349 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3350   MacroAssembler _masm(&cbuf);
3351 
3352   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3353   int reg    = ra_->get_encode(this);
3354 
3355   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3356     __ add(as_Register(reg), sp, offset);
3357   } else {
3358     ShouldNotReachHere();
3359   }
3360 }
3361 
3362 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3363   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3364   return 4;
3365 }
3366 
3367 //=============================================================================
3368 
3369 #ifndef PRODUCT
3370 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3371 {
3372   st->print_cr("# MachUEPNode");
3373   if (UseCompressedClassPointers) {
3374     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3375     if (Universe::narrow_klass_shift() != 0) {
3376       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3377     }
3378   } else {
3379    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3380   }
3381   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3382   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3383 }
3384 #endif
3385 
3386 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3387 {
3388   // This is the unverified entry point.
3389   MacroAssembler _masm(&cbuf);
3390 
3391   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3392   Label skip;
3393   // TODO
3394   // can we avoid this skip and still use a reloc?
3395   __ br(Assembler::EQ, skip);
3396   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3397   __ bind(skip);
3398 }
3399 
3400 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3401 {
3402   return MachNode::size(ra_);
3403 }
3404 
3405 // REQUIRED EMIT CODE
3406 
3407 //=============================================================================
3408 
3409 // Emit exception handler code.
3410 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3411 {
3412   // mov rscratch1 #exception_blob_entry_point
3413   // br rscratch1
3414   // Note that the code buffer's insts_mark is always relative to insts.
3415   // That's why we must use the macroassembler to generate a handler.
3416   MacroAssembler _masm(&cbuf);
3417   address base = __ start_a_stub(size_exception_handler());
3418   if (base == NULL) {
3419     ciEnv::current()->record_failure("CodeCache is full");
3420     return 0;  // CodeBuffer::expand failed
3421   }
3422   int offset = __ offset();
3423   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3424   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3425   __ end_a_stub();
3426   return offset;
3427 }
3428 
3429 // Emit deopt handler code.
3430 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3431 {
3432   // Note that the code buffer's insts_mark is always relative to insts.
3433   // That's why we must use the macroassembler to generate a handler.
3434   MacroAssembler _masm(&cbuf);
3435   address base = __ start_a_stub(size_deopt_handler());
3436   if (base == NULL) {
3437     ciEnv::current()->record_failure("CodeCache is full");
3438     return 0;  // CodeBuffer::expand failed
3439   }
3440   int offset = __ offset();
3441 
3442   __ adr(lr, __ pc());
3443   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3444 
3445   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3446   __ end_a_stub();
3447   return offset;
3448 }
3449 
3450 // REQUIRED MATCHER CODE
3451 
3452 //=============================================================================
3453 
3454 const bool Matcher::match_rule_supported(int opcode) {
3455 
3456   // TODO
3457   // identify extra cases that we might want to provide match rules for
3458   // e.g. Op_StrEquals and other intrinsics
3459   if (!has_match_rule(opcode)) {
3460     return false;
3461   }
3462 
3463   return true;  // Per default match rules are supported.
3464 }
3465 
3466 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3467 
3468   // TODO
3469   // identify extra cases that we might want to provide match rules for
3470   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3471   if (!has_match_rule(opcode)) {
3472     return false;
3473   }
3474 
3475   bool ret_value = match_rule_supported(opcode);
3476   // Add rules here.
3477 
3478   return ret_value;  // Per default match rules are supported.
3479 }
3480 
3481 const int Matcher::float_pressure(int default_pressure_threshold) {
3482   return default_pressure_threshold;
3483 }
3484 
3485 int Matcher::regnum_to_fpu_offset(int regnum)
3486 {
3487   Unimplemented();
3488   return 0;
3489 }
3490 
3491 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3492 {
3493   Unimplemented();
3494   return false;
3495 }
3496 
3497 const bool Matcher::isSimpleConstant64(jlong value) {
3498   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3499   // Probably always true, even if a temp register is required.
3500   return true;
3501 }
3502 
3503 // true just means we have fast l2f conversion
3504 const bool Matcher::convL2FSupported(void) {
3505   return true;
3506 }
3507 
3508 // Vector width in bytes.
3509 const int Matcher::vector_width_in_bytes(BasicType bt) {
3510   int size = MIN2(16,(int)MaxVectorSize);
3511   // Minimum 2 values in vector
3512   if (size < 2*type2aelembytes(bt)) size = 0;
3513   // But never < 4
3514   if (size < 4) size = 0;
3515   return size;
3516 }
3517 
3518 // Limits on vector size (number of elements) loaded into vector.
3519 const int Matcher::max_vector_size(const BasicType bt) {
3520   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3521 }
3522 const int Matcher::min_vector_size(const BasicType bt) {
3523 //  For the moment limit the vector size to 8 bytes
3524     int size = 8 / type2aelembytes(bt);
3525     if (size < 2) size = 2;
3526     return size;
3527 }
3528 
3529 // Vector ideal reg.
3530 const int Matcher::vector_ideal_reg(int len) {
3531   switch(len) {
3532     case  8: return Op_VecD;
3533     case 16: return Op_VecX;
3534   }
3535   ShouldNotReachHere();
3536   return 0;
3537 }
3538 
3539 const int Matcher::vector_shift_count_ideal_reg(int size) {
3540   return Op_VecX;
3541 }
3542 
3543 // AES support not yet implemented
3544 const bool Matcher::pass_original_key_for_aes() {
3545   return false;
3546 }
3547 
3548 // x86 supports misaligned vectors store/load.
3549 const bool Matcher::misaligned_vectors_ok() {
3550   return !AlignVector; // can be changed by flag
3551 }
3552 
3553 // false => size gets scaled to BytesPerLong, ok.
3554 const bool Matcher::init_array_count_is_in_bytes = false;
3555 
3556 // Threshold size for cleararray.
3557 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3558 
3559 // Use conditional move (CMOVL)
3560 const int Matcher::long_cmove_cost() {
3561   // long cmoves are no more expensive than int cmoves
3562   return 0;
3563 }
3564 
3565 const int Matcher::float_cmove_cost() {
3566   // float cmoves are no more expensive than int cmoves
3567   return 0;
3568 }
3569 
3570 // Does the CPU require late expand (see block.cpp for description of late expand)?
3571 const bool Matcher::require_postalloc_expand = false;
3572 
3573 // Should the Matcher clone shifts on addressing modes, expecting them
3574 // to be subsumed into complex addressing expressions or compute them
3575 // into registers?  True for Intel but false for most RISCs
3576 const bool Matcher::clone_shift_expressions = false;
3577 
3578 // Do we need to mask the count passed to shift instructions or does
3579 // the cpu only look at the lower 5/6 bits anyway?
3580 const bool Matcher::need_masked_shift_count = false;
3581 
3582 // This affects two different things:
3583 //  - how Decode nodes are matched
3584 //  - how ImplicitNullCheck opportunities are recognized
3585 // If true, the matcher will try to remove all Decodes and match them
3586 // (as operands) into nodes. NullChecks are not prepared to deal with
3587 // Decodes by final_graph_reshaping().
3588 // If false, final_graph_reshaping() forces the decode behind the Cmp
3589 // for a NullCheck. The matcher matches the Decode node into a register.
3590 // Implicit_null_check optimization moves the Decode along with the
3591 // memory operation back up before the NullCheck.
3592 bool Matcher::narrow_oop_use_complex_address() {
3593   return Universe::narrow_oop_shift() == 0;
3594 }
3595 
3596 bool Matcher::narrow_klass_use_complex_address() {
3597 // TODO
3598 // decide whether we need to set this to true
3599   return false;
3600 }
3601 
3602 // Is it better to copy float constants, or load them directly from
3603 // memory?  Intel can load a float constant from a direct address,
3604 // requiring no extra registers.  Most RISCs will have to materialize
3605 // an address into a register first, so they would do better to copy
3606 // the constant from stack.
3607 const bool Matcher::rematerialize_float_constants = false;
3608 
3609 // If CPU can load and store mis-aligned doubles directly then no
3610 // fixup is needed.  Else we split the double into 2 integer pieces
3611 // and move it piece-by-piece.  Only happens when passing doubles into
3612 // C code as the Java calling convention forces doubles to be aligned.
3613 const bool Matcher::misaligned_doubles_ok = true;
3614 
3615 // No-op on amd64
3616 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3617   Unimplemented();
3618 }
3619 
3620 // Advertise here if the CPU requires explicit rounding operations to
3621 // implement the UseStrictFP mode.
3622 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3623 
3624 // Are floats converted to double when stored to stack during
3625 // deoptimization?
3626 bool Matcher::float_in_double() { return true; }
3627 
3628 // Do ints take an entire long register or just half?
3629 // The relevant question is how the int is callee-saved:
3630 // the whole long is written but de-opt'ing will have to extract
3631 // the relevant 32 bits.
3632 const bool Matcher::int_in_long = true;
3633 
3634 // Return whether or not this register is ever used as an argument.
3635 // This function is used on startup to build the trampoline stubs in
3636 // generateOptoStub.  Registers not mentioned will be killed by the VM
3637 // call in the trampoline, and arguments in those registers not be
3638 // available to the callee.
3639 bool Matcher::can_be_java_arg(int reg)
3640 {
3641   return
3642     reg ==  R0_num || reg == R0_H_num ||
3643     reg ==  R1_num || reg == R1_H_num ||
3644     reg ==  R2_num || reg == R2_H_num ||
3645     reg ==  R3_num || reg == R3_H_num ||
3646     reg ==  R4_num || reg == R4_H_num ||
3647     reg ==  R5_num || reg == R5_H_num ||
3648     reg ==  R6_num || reg == R6_H_num ||
3649     reg ==  R7_num || reg == R7_H_num ||
3650     reg ==  V0_num || reg == V0_H_num ||
3651     reg ==  V1_num || reg == V1_H_num ||
3652     reg ==  V2_num || reg == V2_H_num ||
3653     reg ==  V3_num || reg == V3_H_num ||
3654     reg ==  V4_num || reg == V4_H_num ||
3655     reg ==  V5_num || reg == V5_H_num ||
3656     reg ==  V6_num || reg == V6_H_num ||
3657     reg ==  V7_num || reg == V7_H_num;
3658 }
3659 
3660 bool Matcher::is_spillable_arg(int reg)
3661 {
3662   return can_be_java_arg(reg);
3663 }
3664 
3665 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3666   return false;
3667 }
3668 
3669 RegMask Matcher::divI_proj_mask() {
3670   ShouldNotReachHere();
3671   return RegMask();
3672 }
3673 
3674 // Register for MODI projection of divmodI.
3675 RegMask Matcher::modI_proj_mask() {
3676   ShouldNotReachHere();
3677   return RegMask();
3678 }
3679 
3680 // Register for DIVL projection of divmodL.
3681 RegMask Matcher::divL_proj_mask() {
3682   ShouldNotReachHere();
3683   return RegMask();
3684 }
3685 
3686 // Register for MODL projection of divmodL.
3687 RegMask Matcher::modL_proj_mask() {
3688   ShouldNotReachHere();
3689   return RegMask();
3690 }
3691 
3692 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3693   return FP_REG_mask();
3694 }
3695 
3696 // helper for encoding java_to_runtime calls on sim
3697 //
3698 // this is needed to compute the extra arguments required when
3699 // planting a call to the simulator blrt instruction. the TypeFunc
3700 // can be queried to identify the counts for integral, and floating
3701 // arguments and the return type
3702 
3703 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3704 {
3705   int gps = 0;
3706   int fps = 0;
3707   const TypeTuple *domain = tf->domain();
3708   int max = domain->cnt();
3709   for (int i = TypeFunc::Parms; i < max; i++) {
3710     const Type *t = domain->field_at(i);
3711     switch(t->basic_type()) {
3712     case T_FLOAT:
3713     case T_DOUBLE:
3714       fps++;
3715     default:
3716       gps++;
3717     }
3718   }
3719   gpcnt = gps;
3720   fpcnt = fps;
3721   BasicType rt = tf->return_type();
3722   switch (rt) {
3723   case T_VOID:
3724     rtype = MacroAssembler::ret_type_void;
3725     break;
3726   default:
3727     rtype = MacroAssembler::ret_type_integral;
3728     break;
3729   case T_FLOAT:
3730     rtype = MacroAssembler::ret_type_float;
3731     break;
3732   case T_DOUBLE:
3733     rtype = MacroAssembler::ret_type_double;
3734     break;
3735   }
3736 }
3737 
3738 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3739   MacroAssembler _masm(&cbuf);                                          \
3740   {                                                                     \
3741     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3742     guarantee(DISP == 0, "mode not permitted for volatile");            \
3743     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3744     __ INSN(REG, as_Register(BASE));                                    \
3745   }
3746 
3747 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3748 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3749 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3750                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3751 
3752   // Used for all non-volatile memory accesses.  The use of
3753   // $mem->opcode() to discover whether this pattern uses sign-extended
3754   // offsets is something of a kludge.
3755   static void loadStore(MacroAssembler masm, mem_insn insn,
3756                          Register reg, int opcode,
3757                          Register base, int index, int size, int disp)
3758   {
3759     Address::extend scale;
3760 
3761     // Hooboy, this is fugly.  We need a way to communicate to the
3762     // encoder that the index needs to be sign extended, so we have to
3763     // enumerate all the cases.
3764     switch (opcode) {
3765     case INDINDEXSCALEDOFFSETI2L:
3766     case INDINDEXSCALEDI2L:
3767     case INDINDEXSCALEDOFFSETI2LN:
3768     case INDINDEXSCALEDI2LN:
3769     case INDINDEXOFFSETI2L:
3770     case INDINDEXOFFSETI2LN:
3771       scale = Address::sxtw(size);
3772       break;
3773     default:
3774       scale = Address::lsl(size);
3775     }
3776 
3777     if (index == -1) {
3778       (masm.*insn)(reg, Address(base, disp));
3779     } else {
3780       if (disp == 0) {
3781         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3782       } else {
3783         masm.lea(rscratch1, Address(base, disp));
3784         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3785       }
3786     }
3787   }
3788 
3789   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3790                          FloatRegister reg, int opcode,
3791                          Register base, int index, int size, int disp)
3792   {
3793     Address::extend scale;
3794 
3795     switch (opcode) {
3796     case INDINDEXSCALEDOFFSETI2L:
3797     case INDINDEXSCALEDI2L:
3798     case INDINDEXSCALEDOFFSETI2LN:
3799     case INDINDEXSCALEDI2LN:
3800       scale = Address::sxtw(size);
3801       break;
3802     default:
3803       scale = Address::lsl(size);
3804     }
3805 
3806      if (index == -1) {
3807       (masm.*insn)(reg, Address(base, disp));
3808     } else {
3809       if (disp == 0) {
3810         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3811       } else {
3812         masm.lea(rscratch1, Address(base, disp));
3813         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3814       }
3815     }
3816   }
3817 
3818   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3819                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3820                          int opcode, Register base, int index, int size, int disp)
3821   {
3822     if (index == -1) {
3823       (masm.*insn)(reg, T, Address(base, disp));
3824     } else {
3825       assert(disp == 0, "unsupported address mode");
3826       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3827     }
3828   }
3829 
3830 %}
3831 
3832 
3833 
3834 //----------ENCODING BLOCK-----------------------------------------------------
3835 // This block specifies the encoding classes used by the compiler to
3836 // output byte streams.  Encoding classes are parameterized macros
3837 // used by Machine Instruction Nodes in order to generate the bit
3838 // encoding of the instruction.  Operands specify their base encoding
3839 // interface with the interface keyword.  There are currently
3840 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3841 // COND_INTER.  REG_INTER causes an operand to generate a function
3842 // which returns its register number when queried.  CONST_INTER causes
3843 // an operand to generate a function which returns the value of the
3844 // constant when queried.  MEMORY_INTER causes an operand to generate
3845 // four functions which return the Base Register, the Index Register,
3846 // the Scale Value, and the Offset Value of the operand when queried.
3847 // COND_INTER causes an operand to generate six functions which return
3848 // the encoding code (ie - encoding bits for the instruction)
3849 // associated with each basic boolean condition for a conditional
3850 // instruction.
3851 //
3852 // Instructions specify two basic values for encoding.  Again, a
3853 // function is available to check if the constant displacement is an
3854 // oop. They use the ins_encode keyword to specify their encoding
3855 // classes (which must be a sequence of enc_class names, and their
3856 // parameters, specified in the encoding block), and they use the
3857 // opcode keyword to specify, in order, their primary, secondary, and
3858 // tertiary opcode.  Only the opcode sections which a particular
3859 // instruction needs for encoding need to be specified.
3860 encode %{
3861   // Build emit functions for each basic byte or larger field in the
3862   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3863   // from C++ code in the enc_class source block.  Emit functions will
3864   // live in the main source block for now.  In future, we can
3865   // generalize this by adding a syntax that specifies the sizes of
3866   // fields in an order, so that the adlc can build the emit functions
3867   // automagically
3868 
3869   // catch all for unimplemented encodings
3870   enc_class enc_unimplemented %{
3871     MacroAssembler _masm(&cbuf);
3872     __ unimplemented("C2 catch all");
3873   %}
3874 
3875   // BEGIN Non-volatile memory access
3876 
3877   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3878     Register dst_reg = as_Register($dst$$reg);
3879     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3881   %}
3882 
3883   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3884     Register dst_reg = as_Register($dst$$reg);
3885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3887   %}
3888 
3889   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3890     Register dst_reg = as_Register($dst$$reg);
3891     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3892                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3893   %}
3894 
3895   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3896     Register dst_reg = as_Register($dst$$reg);
3897     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3898                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3899   %}
3900 
3901   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3902     Register dst_reg = as_Register($dst$$reg);
3903     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3904                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3905   %}
3906 
3907   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3908     Register dst_reg = as_Register($dst$$reg);
3909     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3911   %}
3912 
3913   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3914     Register dst_reg = as_Register($dst$$reg);
3915     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3916                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3917   %}
3918 
3919   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3920     Register dst_reg = as_Register($dst$$reg);
3921     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3922                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3923   %}
3924 
3925   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3926     Register dst_reg = as_Register($dst$$reg);
3927     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3928                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3929   %}
3930 
3931   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3932     Register dst_reg = as_Register($dst$$reg);
3933     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3935   %}
3936 
3937   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3938     Register dst_reg = as_Register($dst$$reg);
3939     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3940                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3941   %}
3942 
3943   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3944     Register dst_reg = as_Register($dst$$reg);
3945     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3946                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3947   %}
3948 
3949   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3950     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3951     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3952                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3953   %}
3954 
3955   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3956     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3957     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3958                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3959   %}
3960 
3961   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3962     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3963     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3964        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3965   %}
3966 
3967   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3968     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3969     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3970        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3971   %}
3972 
3973   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3974     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3975     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3976        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3977   %}
3978 
3979   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3980     Register src_reg = as_Register($src$$reg);
3981     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3982                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3983   %}
3984 
3985   enc_class aarch64_enc_strb0(memory mem) %{
3986     MacroAssembler _masm(&cbuf);
3987     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3992     MacroAssembler _masm(&cbuf);
3993     __ membar(Assembler::StoreStore);
3994     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3995                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3996   %}
3997 
3998   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3999     Register src_reg = as_Register($src$$reg);
4000     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4002   %}
4003 
4004   enc_class aarch64_enc_strh0(memory mem) %{
4005     MacroAssembler _masm(&cbuf);
4006     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4007                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4008   %}
4009 
4010   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4011     Register src_reg = as_Register($src$$reg);
4012     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4014   %}
4015 
4016   enc_class aarch64_enc_strw0(memory mem) %{
4017     MacroAssembler _masm(&cbuf);
4018     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4019                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4020   %}
4021 
4022   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4023     Register src_reg = as_Register($src$$reg);
4024     // we sometimes get asked to store the stack pointer into the
4025     // current thread -- we cannot do that directly on AArch64
4026     if (src_reg == r31_sp) {
4027       MacroAssembler _masm(&cbuf);
4028       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4029       __ mov(rscratch2, sp);
4030       src_reg = rscratch2;
4031     }
4032     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4034   %}
4035 
4036   enc_class aarch64_enc_str0(memory mem) %{
4037     MacroAssembler _masm(&cbuf);
4038     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4039                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4040   %}
4041 
4042   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4043     FloatRegister src_reg = as_FloatRegister($src$$reg);
4044     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4046   %}
4047 
4048   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4049     FloatRegister src_reg = as_FloatRegister($src$$reg);
4050     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4051                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4052   %}
4053 
4054   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4055     FloatRegister src_reg = as_FloatRegister($src$$reg);
4056     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4057        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4058   %}
4059 
4060   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4061     FloatRegister src_reg = as_FloatRegister($src$$reg);
4062     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4063        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4064   %}
4065 
4066   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4067     FloatRegister src_reg = as_FloatRegister($src$$reg);
4068     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4069        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4070   %}
4071 
4072   // END Non-volatile memory access
4073 
4074   // volatile loads and stores
4075 
4076   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4077     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4078                  rscratch1, stlrb);
4079   %}
4080 
4081   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4082     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4083                  rscratch1, stlrh);
4084   %}
4085 
4086   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4087     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4088                  rscratch1, stlrw);
4089   %}
4090 
4091 
4092   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4093     Register dst_reg = as_Register($dst$$reg);
4094     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4095              rscratch1, ldarb);
4096     __ sxtbw(dst_reg, dst_reg);
4097   %}
4098 
4099   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4100     Register dst_reg = as_Register($dst$$reg);
4101     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4102              rscratch1, ldarb);
4103     __ sxtb(dst_reg, dst_reg);
4104   %}
4105 
4106   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4107     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4108              rscratch1, ldarb);
4109   %}
4110 
4111   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4112     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4113              rscratch1, ldarb);
4114   %}
4115 
4116   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4117     Register dst_reg = as_Register($dst$$reg);
4118     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4119              rscratch1, ldarh);
4120     __ sxthw(dst_reg, dst_reg);
4121   %}
4122 
4123   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4124     Register dst_reg = as_Register($dst$$reg);
4125     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4126              rscratch1, ldarh);
4127     __ sxth(dst_reg, dst_reg);
4128   %}
4129 
4130   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4131     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4132              rscratch1, ldarh);
4133   %}
4134 
4135   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4136     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4137              rscratch1, ldarh);
4138   %}
4139 
4140   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4141     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4142              rscratch1, ldarw);
4143   %}
4144 
4145   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4146     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4147              rscratch1, ldarw);
4148   %}
4149 
4150   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4151     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4152              rscratch1, ldar);
4153   %}
4154 
4155   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4156     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4157              rscratch1, ldarw);
4158     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4159   %}
4160 
4161   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4162     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4163              rscratch1, ldar);
4164     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4165   %}
4166 
4167   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4168     Register src_reg = as_Register($src$$reg);
4169     // we sometimes get asked to store the stack pointer into the
4170     // current thread -- we cannot do that directly on AArch64
4171     if (src_reg == r31_sp) {
4172         MacroAssembler _masm(&cbuf);
4173       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4174       __ mov(rscratch2, sp);
4175       src_reg = rscratch2;
4176     }
4177     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4178                  rscratch1, stlr);
4179   %}
4180 
4181   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4182     {
4183       MacroAssembler _masm(&cbuf);
4184       FloatRegister src_reg = as_FloatRegister($src$$reg);
4185       __ fmovs(rscratch2, src_reg);
4186     }
4187     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4188                  rscratch1, stlrw);
4189   %}
4190 
4191   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4192     {
4193       MacroAssembler _masm(&cbuf);
4194       FloatRegister src_reg = as_FloatRegister($src$$reg);
4195       __ fmovd(rscratch2, src_reg);
4196     }
4197     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4198                  rscratch1, stlr);
4199   %}
4200 
4201   // synchronized read/update encodings
4202 
4203   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4204     MacroAssembler _masm(&cbuf);
4205     Register dst_reg = as_Register($dst$$reg);
4206     Register base = as_Register($mem$$base);
4207     int index = $mem$$index;
4208     int scale = $mem$$scale;
4209     int disp = $mem$$disp;
4210     if (index == -1) {
4211        if (disp != 0) {
4212         __ lea(rscratch1, Address(base, disp));
4213         __ ldaxr(dst_reg, rscratch1);
4214       } else {
4215         // TODO
4216         // should we ever get anything other than this case?
4217         __ ldaxr(dst_reg, base);
4218       }
4219     } else {
4220       Register index_reg = as_Register(index);
4221       if (disp == 0) {
4222         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4223         __ ldaxr(dst_reg, rscratch1);
4224       } else {
4225         __ lea(rscratch1, Address(base, disp));
4226         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4227         __ ldaxr(dst_reg, rscratch1);
4228       }
4229     }
4230   %}
4231 
4232   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4233     MacroAssembler _masm(&cbuf);
4234     Register src_reg = as_Register($src$$reg);
4235     Register base = as_Register($mem$$base);
4236     int index = $mem$$index;
4237     int scale = $mem$$scale;
4238     int disp = $mem$$disp;
4239     if (index == -1) {
4240        if (disp != 0) {
4241         __ lea(rscratch2, Address(base, disp));
4242         __ stlxr(rscratch1, src_reg, rscratch2);
4243       } else {
4244         // TODO
4245         // should we ever get anything other than this case?
4246         __ stlxr(rscratch1, src_reg, base);
4247       }
4248     } else {
4249       Register index_reg = as_Register(index);
4250       if (disp == 0) {
4251         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4252         __ stlxr(rscratch1, src_reg, rscratch2);
4253       } else {
4254         __ lea(rscratch2, Address(base, disp));
4255         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4256         __ stlxr(rscratch1, src_reg, rscratch2);
4257       }
4258     }
4259     __ cmpw(rscratch1, zr);
4260   %}
4261 
4262   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4263     MacroAssembler _masm(&cbuf);
4264     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4265     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4266                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4267   %}
4268 
4269   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4270     MacroAssembler _masm(&cbuf);
4271     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4272     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4273                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4274   %}
4275 
4276 
4277   // The only difference between aarch64_enc_cmpxchg and
4278   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4279   // CompareAndSwap sequence to serve as a barrier on acquiring a
4280   // lock.
4281   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4282     MacroAssembler _masm(&cbuf);
4283     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4284     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4285                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4286   %}
4287 
4288   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4289     MacroAssembler _masm(&cbuf);
4290     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4291     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4292                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4293   %}
4294 
4295 
4296   // auxiliary used for CompareAndSwapX to set result register
4297   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4298     MacroAssembler _masm(&cbuf);
4299     Register res_reg = as_Register($res$$reg);
4300     __ cset(res_reg, Assembler::EQ);
4301   %}
4302 
4303   // prefetch encodings
4304 
4305   enc_class aarch64_enc_prefetchw(memory mem) %{
4306     MacroAssembler _masm(&cbuf);
4307     Register base = as_Register($mem$$base);
4308     int index = $mem$$index;
4309     int scale = $mem$$scale;
4310     int disp = $mem$$disp;
4311     if (index == -1) {
4312       __ prfm(Address(base, disp), PSTL1KEEP);
4313       __ nop();
4314     } else {
4315       Register index_reg = as_Register(index);
4316       if (disp == 0) {
4317         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4318       } else {
4319         __ lea(rscratch1, Address(base, disp));
4320         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4321       }
4322     }
4323   %}
4324 
4325   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4326     MacroAssembler _masm(&cbuf);
4327     Register cnt_reg = as_Register($cnt$$reg);
4328     Register base_reg = as_Register($base$$reg);
4329     // base is word aligned
4330     // cnt is count of words
4331 
4332     Label loop;
4333     Label entry;
4334 
4335 //  Algorithm:
4336 //
4337 //    scratch1 = cnt & 7;
4338 //    cnt -= scratch1;
4339 //    p += scratch1;
4340 //    switch (scratch1) {
4341 //      do {
4342 //        cnt -= 8;
4343 //          p[-8] = 0;
4344 //        case 7:
4345 //          p[-7] = 0;
4346 //        case 6:
4347 //          p[-6] = 0;
4348 //          // ...
4349 //        case 1:
4350 //          p[-1] = 0;
4351 //        case 0:
4352 //          p += 8;
4353 //      } while (cnt);
4354 //    }
4355 
4356     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4357 
4358     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4359     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4360     // base_reg always points to the end of the region we're about to zero
4361     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4362     __ adr(rscratch2, entry);
4363     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4364     __ br(rscratch2);
4365     __ bind(loop);
4366     __ sub(cnt_reg, cnt_reg, unroll);
4367     for (int i = -unroll; i < 0; i++)
4368       __ str(zr, Address(base_reg, i * wordSize));
4369     __ bind(entry);
4370     __ add(base_reg, base_reg, unroll * wordSize);
4371     __ cbnz(cnt_reg, loop);
4372   %}
4373 
4374   /// mov envcodings
4375 
4376   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4377     MacroAssembler _masm(&cbuf);
4378     u_int32_t con = (u_int32_t)$src$$constant;
4379     Register dst_reg = as_Register($dst$$reg);
4380     if (con == 0) {
4381       __ movw(dst_reg, zr);
4382     } else {
4383       __ movw(dst_reg, con);
4384     }
4385   %}
4386 
4387   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4388     MacroAssembler _masm(&cbuf);
4389     Register dst_reg = as_Register($dst$$reg);
4390     u_int64_t con = (u_int64_t)$src$$constant;
4391     if (con == 0) {
4392       __ mov(dst_reg, zr);
4393     } else {
4394       __ mov(dst_reg, con);
4395     }
4396   %}
4397 
4398   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4399     MacroAssembler _masm(&cbuf);
4400     Register dst_reg = as_Register($dst$$reg);
4401     address con = (address)$src$$constant;
4402     if (con == NULL || con == (address)1) {
4403       ShouldNotReachHere();
4404     } else {
4405       relocInfo::relocType rtype = $src->constant_reloc();
4406       if (rtype == relocInfo::oop_type) {
4407         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4408       } else if (rtype == relocInfo::metadata_type) {
4409         __ mov_metadata(dst_reg, (Metadata*)con);
4410       } else {
4411         assert(rtype == relocInfo::none, "unexpected reloc type");
4412         if (con < (address)(uintptr_t)os::vm_page_size()) {
4413           __ mov(dst_reg, con);
4414         } else {
4415           unsigned long offset;
4416           __ adrp(dst_reg, con, offset);
4417           __ add(dst_reg, dst_reg, offset);
4418         }
4419       }
4420     }
4421   %}
4422 
4423   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4424     MacroAssembler _masm(&cbuf);
4425     Register dst_reg = as_Register($dst$$reg);
4426     __ mov(dst_reg, zr);
4427   %}
4428 
4429   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4430     MacroAssembler _masm(&cbuf);
4431     Register dst_reg = as_Register($dst$$reg);
4432     __ mov(dst_reg, (u_int64_t)1);
4433   %}
4434 
4435   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4436     MacroAssembler _masm(&cbuf);
4437     address page = (address)$src$$constant;
4438     Register dst_reg = as_Register($dst$$reg);
4439     unsigned long off;
4440     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4441     assert(off == 0, "assumed offset == 0");
4442   %}
4443 
4444   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4445     MacroAssembler _masm(&cbuf);
4446     address page = (address)$src$$constant;
4447     Register dst_reg = as_Register($dst$$reg);
4448     unsigned long off;
4449     __ adrp(dst_reg, ExternalAddress(page), off);
4450     assert(off == 0, "assumed offset == 0");
4451   %}
4452 
4453   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4454     MacroAssembler _masm(&cbuf);
4455     Register dst_reg = as_Register($dst$$reg);
4456     address con = (address)$src$$constant;
4457     if (con == NULL) {
4458       ShouldNotReachHere();
4459     } else {
4460       relocInfo::relocType rtype = $src->constant_reloc();
4461       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4462       __ set_narrow_oop(dst_reg, (jobject)con);
4463     }
4464   %}
4465 
4466   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4467     MacroAssembler _masm(&cbuf);
4468     Register dst_reg = as_Register($dst$$reg);
4469     __ mov(dst_reg, zr);
4470   %}
4471 
4472   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4473     MacroAssembler _masm(&cbuf);
4474     Register dst_reg = as_Register($dst$$reg);
4475     address con = (address)$src$$constant;
4476     if (con == NULL) {
4477       ShouldNotReachHere();
4478     } else {
4479       relocInfo::relocType rtype = $src->constant_reloc();
4480       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4481       __ set_narrow_klass(dst_reg, (Klass *)con);
4482     }
4483   %}
4484 
4485   // arithmetic encodings
4486 
4487   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4488     MacroAssembler _masm(&cbuf);
4489     Register dst_reg = as_Register($dst$$reg);
4490     Register src_reg = as_Register($src1$$reg);
4491     int32_t con = (int32_t)$src2$$constant;
4492     // add has primary == 0, subtract has primary == 1
4493     if ($primary) { con = -con; }
4494     if (con < 0) {
4495       __ subw(dst_reg, src_reg, -con);
4496     } else {
4497       __ addw(dst_reg, src_reg, con);
4498     }
4499   %}
4500 
4501   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4502     MacroAssembler _masm(&cbuf);
4503     Register dst_reg = as_Register($dst$$reg);
4504     Register src_reg = as_Register($src1$$reg);
4505     int32_t con = (int32_t)$src2$$constant;
4506     // add has primary == 0, subtract has primary == 1
4507     if ($primary) { con = -con; }
4508     if (con < 0) {
4509       __ sub(dst_reg, src_reg, -con);
4510     } else {
4511       __ add(dst_reg, src_reg, con);
4512     }
4513   %}
4514 
4515   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4516     MacroAssembler _masm(&cbuf);
4517    Register dst_reg = as_Register($dst$$reg);
4518    Register src1_reg = as_Register($src1$$reg);
4519    Register src2_reg = as_Register($src2$$reg);
4520     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4521   %}
4522 
4523   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4524     MacroAssembler _masm(&cbuf);
4525    Register dst_reg = as_Register($dst$$reg);
4526    Register src1_reg = as_Register($src1$$reg);
4527    Register src2_reg = as_Register($src2$$reg);
4528     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4529   %}
4530 
4531   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4532     MacroAssembler _masm(&cbuf);
4533    Register dst_reg = as_Register($dst$$reg);
4534    Register src1_reg = as_Register($src1$$reg);
4535    Register src2_reg = as_Register($src2$$reg);
4536     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4537   %}
4538 
4539   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4540     MacroAssembler _masm(&cbuf);
4541    Register dst_reg = as_Register($dst$$reg);
4542    Register src1_reg = as_Register($src1$$reg);
4543    Register src2_reg = as_Register($src2$$reg);
4544     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4545   %}
4546 
4547   // compare instruction encodings
4548 
4549   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4550     MacroAssembler _masm(&cbuf);
4551     Register reg1 = as_Register($src1$$reg);
4552     Register reg2 = as_Register($src2$$reg);
4553     __ cmpw(reg1, reg2);
4554   %}
4555 
4556   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4557     MacroAssembler _masm(&cbuf);
4558     Register reg = as_Register($src1$$reg);
4559     int32_t val = $src2$$constant;
4560     if (val >= 0) {
4561       __ subsw(zr, reg, val);
4562     } else {
4563       __ addsw(zr, reg, -val);
4564     }
4565   %}
4566 
4567   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4568     MacroAssembler _masm(&cbuf);
4569     Register reg1 = as_Register($src1$$reg);
4570     u_int32_t val = (u_int32_t)$src2$$constant;
4571     __ movw(rscratch1, val);
4572     __ cmpw(reg1, rscratch1);
4573   %}
4574 
4575   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4576     MacroAssembler _masm(&cbuf);
4577     Register reg1 = as_Register($src1$$reg);
4578     Register reg2 = as_Register($src2$$reg);
4579     __ cmp(reg1, reg2);
4580   %}
4581 
4582   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4583     MacroAssembler _masm(&cbuf);
4584     Register reg = as_Register($src1$$reg);
4585     int64_t val = $src2$$constant;
4586     if (val >= 0) {
4587       __ subs(zr, reg, val);
4588     } else if (val != -val) {
4589       __ adds(zr, reg, -val);
4590     } else {
4591     // aargh, Long.MIN_VALUE is a special case
4592       __ orr(rscratch1, zr, (u_int64_t)val);
4593       __ subs(zr, reg, rscratch1);
4594     }
4595   %}
4596 
4597   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4598     MacroAssembler _masm(&cbuf);
4599     Register reg1 = as_Register($src1$$reg);
4600     u_int64_t val = (u_int64_t)$src2$$constant;
4601     __ mov(rscratch1, val);
4602     __ cmp(reg1, rscratch1);
4603   %}
4604 
4605   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4606     MacroAssembler _masm(&cbuf);
4607     Register reg1 = as_Register($src1$$reg);
4608     Register reg2 = as_Register($src2$$reg);
4609     __ cmp(reg1, reg2);
4610   %}
4611 
4612   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4613     MacroAssembler _masm(&cbuf);
4614     Register reg1 = as_Register($src1$$reg);
4615     Register reg2 = as_Register($src2$$reg);
4616     __ cmpw(reg1, reg2);
4617   %}
4618 
4619   enc_class aarch64_enc_testp(iRegP src) %{
4620     MacroAssembler _masm(&cbuf);
4621     Register reg = as_Register($src$$reg);
4622     __ cmp(reg, zr);
4623   %}
4624 
4625   enc_class aarch64_enc_testn(iRegN src) %{
4626     MacroAssembler _masm(&cbuf);
4627     Register reg = as_Register($src$$reg);
4628     __ cmpw(reg, zr);
4629   %}
4630 
4631   enc_class aarch64_enc_b(label lbl) %{
4632     MacroAssembler _masm(&cbuf);
4633     Label *L = $lbl$$label;
4634     __ b(*L);
4635   %}
4636 
4637   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4638     MacroAssembler _masm(&cbuf);
4639     Label *L = $lbl$$label;
4640     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4641   %}
4642 
4643   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4644     MacroAssembler _masm(&cbuf);
4645     Label *L = $lbl$$label;
4646     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4647   %}
4648 
4649   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4650   %{
4651      Register sub_reg = as_Register($sub$$reg);
4652      Register super_reg = as_Register($super$$reg);
4653      Register temp_reg = as_Register($temp$$reg);
4654      Register result_reg = as_Register($result$$reg);
4655 
4656      Label miss;
4657      MacroAssembler _masm(&cbuf);
4658      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4659                                      NULL, &miss,
4660                                      /*set_cond_codes:*/ true);
4661      if ($primary) {
4662        __ mov(result_reg, zr);
4663      }
4664      __ bind(miss);
4665   %}
4666 
4667   enc_class aarch64_enc_java_static_call(method meth) %{
4668     MacroAssembler _masm(&cbuf);
4669 
4670     address addr = (address)$meth$$method;
4671     address call;
4672     if (!_method) {
4673       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4674       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4675     } else if (_optimized_virtual) {
4676       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4677     } else {
4678       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4679     }
4680     if (call == NULL) {
4681       ciEnv::current()->record_failure("CodeCache is full");
4682       return;
4683     }
4684 
4685     if (_method) {
4686       // Emit stub for static call
4687       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4688       if (stub == NULL) {
4689         ciEnv::current()->record_failure("CodeCache is full");
4690         return;
4691       }
4692     }
4693   %}
4694 
4695   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4696     MacroAssembler _masm(&cbuf);
4697     address call = __ ic_call((address)$meth$$method);
4698     if (call == NULL) {
4699       ciEnv::current()->record_failure("CodeCache is full");
4700       return;
4701     }
4702   %}
4703 
4704   enc_class aarch64_enc_call_epilog() %{
4705     MacroAssembler _masm(&cbuf);
4706     if (VerifyStackAtCalls) {
4707       // Check that stack depth is unchanged: find majik cookie on stack
4708       __ call_Unimplemented();
4709     }
4710   %}
4711 
4712   enc_class aarch64_enc_java_to_runtime(method meth) %{
4713     MacroAssembler _masm(&cbuf);
4714 
4715     // some calls to generated routines (arraycopy code) are scheduled
4716     // by C2 as runtime calls. if so we can call them using a br (they
4717     // will be in a reachable segment) otherwise we have to use a blrt
4718     // which loads the absolute address into a register.
4719     address entry = (address)$meth$$method;
4720     CodeBlob *cb = CodeCache::find_blob(entry);
4721     if (cb) {
4722       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4723       if (call == NULL) {
4724         ciEnv::current()->record_failure("CodeCache is full");
4725         return;
4726       }
4727     } else {
4728       int gpcnt;
4729       int fpcnt;
4730       int rtype;
4731       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4732       Label retaddr;
4733       __ adr(rscratch2, retaddr);
4734       __ lea(rscratch1, RuntimeAddress(entry));
4735       // Leave a breadcrumb for JavaThread::pd_last_frame().
4736       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4737       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4738       __ bind(retaddr);
4739       __ add(sp, sp, 2 * wordSize);
4740     }
4741   %}
4742 
4743   enc_class aarch64_enc_rethrow() %{
4744     MacroAssembler _masm(&cbuf);
4745     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4746   %}
4747 
4748   enc_class aarch64_enc_ret() %{
4749     MacroAssembler _masm(&cbuf);
4750     __ ret(lr);
4751   %}
4752 
4753   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4754     MacroAssembler _masm(&cbuf);
4755     Register target_reg = as_Register($jump_target$$reg);
4756     __ br(target_reg);
4757   %}
4758 
4759   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4760     MacroAssembler _masm(&cbuf);
4761     Register target_reg = as_Register($jump_target$$reg);
4762     // exception oop should be in r0
4763     // ret addr has been popped into lr
4764     // callee expects it in r3
4765     __ mov(r3, lr);
4766     __ br(target_reg);
4767   %}
4768 
4769   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4770     MacroAssembler _masm(&cbuf);
4771     Register oop = as_Register($object$$reg);
4772     Register box = as_Register($box$$reg);
4773     Register disp_hdr = as_Register($tmp$$reg);
4774     Register tmp = as_Register($tmp2$$reg);
4775     Label cont;
4776     Label object_has_monitor;
4777     Label cas_failed;
4778 
4779     assert_different_registers(oop, box, tmp, disp_hdr);
4780 
4781     // Load markOop from object into displaced_header.
4782     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4783 
4784     // Always do locking in runtime.
4785     if (EmitSync & 0x01) {
4786       __ cmp(oop, zr);
4787       return;
4788     }
4789 
4790     if (UseBiasedLocking && !UseOptoBiasInlining) {
4791       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4792     }
4793 
4794     // Handle existing monitor
4795     if ((EmitSync & 0x02) == 0) {
4796       // we can use AArch64's bit test and branch here but
4797       // markoopDesc does not define a bit index just the bit value
4798       // so assert in case the bit pos changes
4799 #     define __monitor_value_log2 1
4800       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4801       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4802 #     undef __monitor_value_log2
4803     }
4804 
4805     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4806     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4807 
4808     // Load Compare Value application register.
4809 
4810     // Initialize the box. (Must happen before we update the object mark!)
4811     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4812 
4813     // Compare object markOop with mark and if equal exchange scratch1
4814     // with object markOop.
4815     {
4816       Label retry_load;
4817       __ bind(retry_load);
4818       __ ldaxr(tmp, oop);
4819       __ cmp(tmp, disp_hdr);
4820       __ br(Assembler::NE, cas_failed);
4821       // use stlxr to ensure update is immediately visible
4822       __ stlxr(tmp, box, oop);
4823       __ cbzw(tmp, cont);
4824       __ b(retry_load);
4825     }
4826 
4827     // Formerly:
4828     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4829     //               /*newv=*/box,
4830     //               /*addr=*/oop,
4831     //               /*tmp=*/tmp,
4832     //               cont,
4833     //               /*fail*/NULL);
4834 
4835     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4836 
4837     // If the compare-and-exchange succeeded, then we found an unlocked
4838     // object, will have now locked it will continue at label cont
4839 
4840     __ bind(cas_failed);
4841     // We did not see an unlocked object so try the fast recursive case.
4842 
4843     // Check if the owner is self by comparing the value in the
4844     // markOop of object (disp_hdr) with the stack pointer.
4845     __ mov(rscratch1, sp);
4846     __ sub(disp_hdr, disp_hdr, rscratch1);
4847     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4848     // If condition is true we are cont and hence we can store 0 as the
4849     // displaced header in the box, which indicates that it is a recursive lock.
4850     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4851     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4852 
4853     // Handle existing monitor.
4854     if ((EmitSync & 0x02) == 0) {
4855       __ b(cont);
4856 
4857       __ bind(object_has_monitor);
4858       // The object's monitor m is unlocked iff m->owner == NULL,
4859       // otherwise m->owner may contain a thread or a stack address.
4860       //
4861       // Try to CAS m->owner from NULL to current thread.
4862       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4863       __ mov(disp_hdr, zr);
4864 
4865       {
4866         Label retry_load, fail;
4867         __ bind(retry_load);
4868         __ ldaxr(rscratch1, tmp);
4869         __ cmp(disp_hdr, rscratch1);
4870         __ br(Assembler::NE, fail);
4871         // use stlxr to ensure update is immediately visible
4872         __ stlxr(rscratch1, rthread, tmp);
4873         __ cbnzw(rscratch1, retry_load);
4874         __ bind(fail);
4875       }
4876 
4877       // Label next;
4878       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4879       //               /*newv=*/rthread,
4880       //               /*addr=*/tmp,
4881       //               /*tmp=*/rscratch1,
4882       //               /*succeed*/next,
4883       //               /*fail*/NULL);
4884       // __ bind(next);
4885 
4886       // store a non-null value into the box.
4887       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4888 
4889       // PPC port checks the following invariants
4890       // #ifdef ASSERT
4891       // bne(flag, cont);
4892       // We have acquired the monitor, check some invariants.
4893       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4894       // Invariant 1: _recursions should be 0.
4895       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4896       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4897       //                        "monitor->_recursions should be 0", -1);
4898       // Invariant 2: OwnerIsThread shouldn't be 0.
4899       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4900       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4901       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4902       // #endif
4903     }
4904 
4905     __ bind(cont);
4906     // flag == EQ indicates success
4907     // flag == NE indicates failure
4908 
4909   %}
4910 
4911   // TODO
4912   // reimplement this with custom cmpxchgptr code
4913   // which avoids some of the unnecessary branching
4914   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4915     MacroAssembler _masm(&cbuf);
4916     Register oop = as_Register($object$$reg);
4917     Register box = as_Register($box$$reg);
4918     Register disp_hdr = as_Register($tmp$$reg);
4919     Register tmp = as_Register($tmp2$$reg);
4920     Label cont;
4921     Label object_has_monitor;
4922     Label cas_failed;
4923 
4924     assert_different_registers(oop, box, tmp, disp_hdr);
4925 
4926     // Always do locking in runtime.
4927     if (EmitSync & 0x01) {
4928       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4929       return;
4930     }
4931 
4932     if (UseBiasedLocking && !UseOptoBiasInlining) {
4933       __ biased_locking_exit(oop, tmp, cont);
4934     }
4935 
4936     // Find the lock address and load the displaced header from the stack.
4937     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4938 
4939     // If the displaced header is 0, we have a recursive unlock.
4940     __ cmp(disp_hdr, zr);
4941     __ br(Assembler::EQ, cont);
4942 
4943 
4944     // Handle existing monitor.
4945     if ((EmitSync & 0x02) == 0) {
4946       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4947       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4948     }
4949 
4950     // Check if it is still a light weight lock, this is is true if we
4951     // see the stack address of the basicLock in the markOop of the
4952     // object.
4953 
4954       {
4955         Label retry_load;
4956         __ bind(retry_load);
4957         __ ldxr(tmp, oop);
4958         __ cmp(box, tmp);
4959         __ br(Assembler::NE, cas_failed);
4960         // use stlxr to ensure update is immediately visible
4961         __ stlxr(tmp, disp_hdr, oop);
4962         __ cbzw(tmp, cont);
4963         __ b(retry_load);
4964       }
4965 
4966     // __ cmpxchgptr(/*compare_value=*/box,
4967     //               /*exchange_value=*/disp_hdr,
4968     //               /*where=*/oop,
4969     //               /*result=*/tmp,
4970     //               cont,
4971     //               /*cas_failed*/NULL);
4972     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4973 
4974     __ bind(cas_failed);
4975 
4976     // Handle existing monitor.
4977     if ((EmitSync & 0x02) == 0) {
4978       __ b(cont);
4979 
4980       __ bind(object_has_monitor);
4981       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4982       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4983       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4984       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4985       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4986       __ cmp(rscratch1, zr);
4987       __ br(Assembler::NE, cont);
4988 
4989       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4990       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4991       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4992       __ cmp(rscratch1, zr);
4993       __ cbnz(rscratch1, cont);
4994       // need a release store here
4995       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4996       __ stlr(rscratch1, tmp); // rscratch1 is zero
4997     }
4998 
4999     __ bind(cont);
5000     // flag == EQ indicates success
5001     // flag == NE indicates failure
5002   %}
5003 
5004 %}
5005 
5006 //----------FRAME--------------------------------------------------------------
5007 // Definition of frame structure and management information.
5008 //
5009 //  S T A C K   L A Y O U T    Allocators stack-slot number
5010 //                             |   (to get allocators register number
5011 //  G  Owned by    |        |  v    add OptoReg::stack0())
5012 //  r   CALLER     |        |
5013 //  o     |        +--------+      pad to even-align allocators stack-slot
5014 //  w     V        |  pad0  |        numbers; owned by CALLER
5015 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5016 //  h     ^        |   in   |  5
5017 //        |        |  args  |  4   Holes in incoming args owned by SELF
5018 //  |     |        |        |  3
5019 //  |     |        +--------+
5020 //  V     |        | old out|      Empty on Intel, window on Sparc
5021 //        |    old |preserve|      Must be even aligned.
5022 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5023 //        |        |   in   |  3   area for Intel ret address
5024 //     Owned by    |preserve|      Empty on Sparc.
5025 //       SELF      +--------+
5026 //        |        |  pad2  |  2   pad to align old SP
5027 //        |        +--------+  1
5028 //        |        | locks  |  0
5029 //        |        +--------+----> OptoReg::stack0(), even aligned
5030 //        |        |  pad1  | 11   pad to align new SP
5031 //        |        +--------+
5032 //        |        |        | 10
5033 //        |        | spills |  9   spills
5034 //        V        |        |  8   (pad0 slot for callee)
5035 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5036 //        ^        |  out   |  7
5037 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5038 //     Owned by    +--------+
5039 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5040 //        |    new |preserve|      Must be even-aligned.
5041 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5042 //        |        |        |
5043 //
5044 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5045 //         known from SELF's arguments and the Java calling convention.
5046 //         Region 6-7 is determined per call site.
5047 // Note 2: If the calling convention leaves holes in the incoming argument
5048 //         area, those holes are owned by SELF.  Holes in the outgoing area
5049 //         are owned by the CALLEE.  Holes should not be nessecary in the
5050 //         incoming area, as the Java calling convention is completely under
5051 //         the control of the AD file.  Doubles can be sorted and packed to
5052 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5053 //         varargs C calling conventions.
5054 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5055 //         even aligned with pad0 as needed.
5056 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5057 //           (the latter is true on Intel but is it false on AArch64?)
5058 //         region 6-11 is even aligned; it may be padded out more so that
5059 //         the region from SP to FP meets the minimum stack alignment.
5060 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5061 //         alignment.  Region 11, pad1, may be dynamically extended so that
5062 //         SP meets the minimum alignment.
5063 
5064 frame %{
5065   // What direction does stack grow in (assumed to be same for C & Java)
5066   stack_direction(TOWARDS_LOW);
5067 
5068   // These three registers define part of the calling convention
5069   // between compiled code and the interpreter.
5070 
5071   // Inline Cache Register or methodOop for I2C.
5072   inline_cache_reg(R12);
5073 
5074   // Method Oop Register when calling interpreter.
5075   interpreter_method_oop_reg(R12);
5076 
5077   // Number of stack slots consumed by locking an object
5078   sync_stack_slots(2);
5079 
5080   // Compiled code's Frame Pointer
5081   frame_pointer(R31);
5082 
5083   // Interpreter stores its frame pointer in a register which is
5084   // stored to the stack by I2CAdaptors.
5085   // I2CAdaptors convert from interpreted java to compiled java.
5086   interpreter_frame_pointer(R29);
5087 
5088   // Stack alignment requirement
5089   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5090 
5091   // Number of stack slots between incoming argument block and the start of
5092   // a new frame.  The PROLOG must add this many slots to the stack.  The
5093   // EPILOG must remove this many slots. aarch64 needs two slots for
5094   // return address and fp.
5095   // TODO think this is correct but check
5096   in_preserve_stack_slots(4);
5097 
5098   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5099   // for calls to C.  Supports the var-args backing area for register parms.
5100   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5101 
5102   // The after-PROLOG location of the return address.  Location of
5103   // return address specifies a type (REG or STACK) and a number
5104   // representing the register number (i.e. - use a register name) or
5105   // stack slot.
5106   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5107   // Otherwise, it is above the locks and verification slot and alignment word
5108   // TODO this may well be correct but need to check why that - 2 is there
5109   // ppc port uses 0 but we definitely need to allow for fixed_slots
5110   // which folds in the space used for monitors
5111   return_addr(STACK - 2 +
5112               round_to((Compile::current()->in_preserve_stack_slots() +
5113                         Compile::current()->fixed_slots()),
5114                        stack_alignment_in_slots()));
5115 
5116   // Body of function which returns an integer array locating
5117   // arguments either in registers or in stack slots.  Passed an array
5118   // of ideal registers called "sig" and a "length" count.  Stack-slot
5119   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5120   // arguments for a CALLEE.  Incoming stack arguments are
5121   // automatically biased by the preserve_stack_slots field above.
5122 
5123   calling_convention
5124   %{
5125     // No difference between ingoing/outgoing just pass false
5126     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5127   %}
5128 
5129   c_calling_convention
5130   %{
5131     // This is obviously always outgoing
5132     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5133   %}
5134 
5135   // Location of compiled Java return values.  Same as C for now.
5136   return_value
5137   %{
5138     // TODO do we allow ideal_reg == Op_RegN???
5139     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5140            "only return normal values");
5141 
5142     static const int lo[Op_RegL + 1] = { // enum name
5143       0,                                 // Op_Node
5144       0,                                 // Op_Set
5145       R0_num,                            // Op_RegN
5146       R0_num,                            // Op_RegI
5147       R0_num,                            // Op_RegP
5148       V0_num,                            // Op_RegF
5149       V0_num,                            // Op_RegD
5150       R0_num                             // Op_RegL
5151     };
5152 
5153     static const int hi[Op_RegL + 1] = { // enum name
5154       0,                                 // Op_Node
5155       0,                                 // Op_Set
5156       OptoReg::Bad,                       // Op_RegN
5157       OptoReg::Bad,                      // Op_RegI
5158       R0_H_num,                          // Op_RegP
5159       OptoReg::Bad,                      // Op_RegF
5160       V0_H_num,                          // Op_RegD
5161       R0_H_num                           // Op_RegL
5162     };
5163 
5164     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5165   %}
5166 %}
5167 
5168 //----------ATTRIBUTES---------------------------------------------------------
5169 //----------Operand Attributes-------------------------------------------------
5170 op_attrib op_cost(1);        // Required cost attribute
5171 
5172 //----------Instruction Attributes---------------------------------------------
5173 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5174 ins_attrib ins_size(32);        // Required size attribute (in bits)
5175 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5176                                 // a non-matching short branch variant
5177                                 // of some long branch?
5178 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5179                                 // be a power of 2) specifies the
5180                                 // alignment that some part of the
5181                                 // instruction (not necessarily the
5182                                 // start) requires.  If > 1, a
5183                                 // compute_padding() function must be
5184                                 // provided for the instruction
5185 
5186 //----------OPERANDS-----------------------------------------------------------
5187 // Operand definitions must precede instruction definitions for correct parsing
5188 // in the ADLC because operands constitute user defined types which are used in
5189 // instruction definitions.
5190 
5191 //----------Simple Operands----------------------------------------------------
5192 
5193 // Integer operands 32 bit
5194 // 32 bit immediate
5195 operand immI()
5196 %{
5197   match(ConI);
5198 
5199   op_cost(0);
5200   format %{ %}
5201   interface(CONST_INTER);
5202 %}
5203 
5204 // 32 bit zero
5205 operand immI0()
5206 %{
5207   predicate(n->get_int() == 0);
5208   match(ConI);
5209 
5210   op_cost(0);
5211   format %{ %}
5212   interface(CONST_INTER);
5213 %}
5214 
5215 // 32 bit unit increment
5216 operand immI_1()
5217 %{
5218   predicate(n->get_int() == 1);
5219   match(ConI);
5220 
5221   op_cost(0);
5222   format %{ %}
5223   interface(CONST_INTER);
5224 %}
5225 
5226 // 32 bit unit decrement
5227 operand immI_M1()
5228 %{
5229   predicate(n->get_int() == -1);
5230   match(ConI);
5231 
5232   op_cost(0);
5233   format %{ %}
5234   interface(CONST_INTER);
5235 %}
5236 
5237 operand immI_le_4()
5238 %{
5239   predicate(n->get_int() <= 4);
5240   match(ConI);
5241 
5242   op_cost(0);
5243   format %{ %}
5244   interface(CONST_INTER);
5245 %}
5246 
5247 operand immI_31()
5248 %{
5249   predicate(n->get_int() == 31);
5250   match(ConI);
5251 
5252   op_cost(0);
5253   format %{ %}
5254   interface(CONST_INTER);
5255 %}
5256 
5257 operand immI_8()
5258 %{
5259   predicate(n->get_int() == 8);
5260   match(ConI);
5261 
5262   op_cost(0);
5263   format %{ %}
5264   interface(CONST_INTER);
5265 %}
5266 
5267 operand immI_16()
5268 %{
5269   predicate(n->get_int() == 16);
5270   match(ConI);
5271 
5272   op_cost(0);
5273   format %{ %}
5274   interface(CONST_INTER);
5275 %}
5276 
5277 operand immI_24()
5278 %{
5279   predicate(n->get_int() == 24);
5280   match(ConI);
5281 
5282   op_cost(0);
5283   format %{ %}
5284   interface(CONST_INTER);
5285 %}
5286 
5287 operand immI_32()
5288 %{
5289   predicate(n->get_int() == 32);
5290   match(ConI);
5291 
5292   op_cost(0);
5293   format %{ %}
5294   interface(CONST_INTER);
5295 %}
5296 
5297 operand immI_48()
5298 %{
5299   predicate(n->get_int() == 48);
5300   match(ConI);
5301 
5302   op_cost(0);
5303   format %{ %}
5304   interface(CONST_INTER);
5305 %}
5306 
5307 operand immI_56()
5308 %{
5309   predicate(n->get_int() == 56);
5310   match(ConI);
5311 
5312   op_cost(0);
5313   format %{ %}
5314   interface(CONST_INTER);
5315 %}
5316 
5317 operand immI_64()
5318 %{
5319   predicate(n->get_int() == 64);
5320   match(ConI);
5321 
5322   op_cost(0);
5323   format %{ %}
5324   interface(CONST_INTER);
5325 %}
5326 
5327 operand immI_255()
5328 %{
5329   predicate(n->get_int() == 255);
5330   match(ConI);
5331 
5332   op_cost(0);
5333   format %{ %}
5334   interface(CONST_INTER);
5335 %}
5336 
5337 operand immI_65535()
5338 %{
5339   predicate(n->get_int() == 65535);
5340   match(ConI);
5341 
5342   op_cost(0);
5343   format %{ %}
5344   interface(CONST_INTER);
5345 %}
5346 
5347 operand immL_63()
5348 %{
5349   predicate(n->get_int() == 63);
5350   match(ConI);
5351 
5352   op_cost(0);
5353   format %{ %}
5354   interface(CONST_INTER);
5355 %}
5356 
5357 operand immL_255()
5358 %{
5359   predicate(n->get_int() == 255);
5360   match(ConI);
5361 
5362   op_cost(0);
5363   format %{ %}
5364   interface(CONST_INTER);
5365 %}
5366 
5367 operand immL_65535()
5368 %{
5369   predicate(n->get_long() == 65535L);
5370   match(ConL);
5371 
5372   op_cost(0);
5373   format %{ %}
5374   interface(CONST_INTER);
5375 %}
5376 
5377 operand immL_4294967295()
5378 %{
5379   predicate(n->get_long() == 4294967295L);
5380   match(ConL);
5381 
5382   op_cost(0);
5383   format %{ %}
5384   interface(CONST_INTER);
5385 %}
5386 
5387 operand immL_bitmask()
5388 %{
5389   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5390             && is_power_of_2(n->get_long() + 1));
5391   match(ConL);
5392 
5393   op_cost(0);
5394   format %{ %}
5395   interface(CONST_INTER);
5396 %}
5397 
5398 operand immI_bitmask()
5399 %{
5400   predicate(((n->get_int() & 0xc0000000) == 0)
5401             && is_power_of_2(n->get_int() + 1));
5402   match(ConI);
5403 
5404   op_cost(0);
5405   format %{ %}
5406   interface(CONST_INTER);
5407 %}
5408 
5409 // Scale values for scaled offset addressing modes (up to long but not quad)
5410 operand immIScale()
5411 %{
5412   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5413   match(ConI);
5414 
5415   op_cost(0);
5416   format %{ %}
5417   interface(CONST_INTER);
5418 %}
5419 
5420 // 26 bit signed offset -- for pc-relative branches
5421 operand immI26()
5422 %{
5423   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5424   match(ConI);
5425 
5426   op_cost(0);
5427   format %{ %}
5428   interface(CONST_INTER);
5429 %}
5430 
5431 // 19 bit signed offset -- for pc-relative loads
5432 operand immI19()
5433 %{
5434   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5435   match(ConI);
5436 
5437   op_cost(0);
5438   format %{ %}
5439   interface(CONST_INTER);
5440 %}
5441 
5442 // 12 bit unsigned offset -- for base plus immediate loads
5443 operand immIU12()
5444 %{
5445   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5446   match(ConI);
5447 
5448   op_cost(0);
5449   format %{ %}
5450   interface(CONST_INTER);
5451 %}
5452 
5453 operand immLU12()
5454 %{
5455   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5456   match(ConL);
5457 
5458   op_cost(0);
5459   format %{ %}
5460   interface(CONST_INTER);
5461 %}
5462 
5463 // Offset for scaled or unscaled immediate loads and stores
5464 operand immIOffset()
5465 %{
5466   predicate(Address::offset_ok_for_immed(n->get_int()));
5467   match(ConI);
5468 
5469   op_cost(0);
5470   format %{ %}
5471   interface(CONST_INTER);
5472 %}
5473 
5474 operand immLoffset()
5475 %{
5476   predicate(Address::offset_ok_for_immed(n->get_long()));
5477   match(ConL);
5478 
5479   op_cost(0);
5480   format %{ %}
5481   interface(CONST_INTER);
5482 %}
5483 
5484 // 32 bit integer valid for add sub immediate
5485 operand immIAddSub()
5486 %{
5487   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5488   match(ConI);
5489   op_cost(0);
5490   format %{ %}
5491   interface(CONST_INTER);
5492 %}
5493 
5494 // 32 bit unsigned integer valid for logical immediate
5495 // TODO -- check this is right when e.g the mask is 0x80000000
5496 operand immILog()
5497 %{
5498   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5499   match(ConI);
5500 
5501   op_cost(0);
5502   format %{ %}
5503   interface(CONST_INTER);
5504 %}
5505 
5506 // Integer operands 64 bit
5507 // 64 bit immediate
5508 operand immL()
5509 %{
5510   match(ConL);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // 64 bit zero
5518 operand immL0()
5519 %{
5520   predicate(n->get_long() == 0);
5521   match(ConL);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 // 64 bit unit increment
5529 operand immL_1()
5530 %{
5531   predicate(n->get_long() == 1);
5532   match(ConL);
5533 
5534   op_cost(0);
5535   format %{ %}
5536   interface(CONST_INTER);
5537 %}
5538 
5539 // 64 bit unit decrement
5540 operand immL_M1()
5541 %{
5542   predicate(n->get_long() == -1);
5543   match(ConL);
5544 
5545   op_cost(0);
5546   format %{ %}
5547   interface(CONST_INTER);
5548 %}
5549 
5550 // 32 bit offset of pc in thread anchor
5551 
5552 operand immL_pc_off()
5553 %{
5554   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5555                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5556   match(ConL);
5557 
5558   op_cost(0);
5559   format %{ %}
5560   interface(CONST_INTER);
5561 %}
5562 
5563 // 64 bit integer valid for add sub immediate
5564 operand immLAddSub()
5565 %{
5566   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5567   match(ConL);
5568   op_cost(0);
5569   format %{ %}
5570   interface(CONST_INTER);
5571 %}
5572 
5573 // 64 bit integer valid for logical immediate
5574 operand immLLog()
5575 %{
5576   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5577   match(ConL);
5578   op_cost(0);
5579   format %{ %}
5580   interface(CONST_INTER);
5581 %}
5582 
5583 // Long Immediate: low 32-bit mask
5584 operand immL_32bits()
5585 %{
5586   predicate(n->get_long() == 0xFFFFFFFFL);
5587   match(ConL);
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 // Pointer operands
5594 // Pointer Immediate
5595 operand immP()
5596 %{
5597   match(ConP);
5598 
5599   op_cost(0);
5600   format %{ %}
5601   interface(CONST_INTER);
5602 %}
5603 
5604 // NULL Pointer Immediate
5605 operand immP0()
5606 %{
5607   predicate(n->get_ptr() == 0);
5608   match(ConP);
5609 
5610   op_cost(0);
5611   format %{ %}
5612   interface(CONST_INTER);
5613 %}
5614 
5615 // Pointer Immediate One
5616 // this is used in object initialization (initial object header)
5617 operand immP_1()
5618 %{
5619   predicate(n->get_ptr() == 1);
5620   match(ConP);
5621 
5622   op_cost(0);
5623   format %{ %}
5624   interface(CONST_INTER);
5625 %}
5626 
5627 // Polling Page Pointer Immediate
5628 operand immPollPage()
5629 %{
5630   predicate((address)n->get_ptr() == os::get_polling_page());
5631   match(ConP);
5632 
5633   op_cost(0);
5634   format %{ %}
5635   interface(CONST_INTER);
5636 %}
5637 
5638 // Card Table Byte Map Base
5639 operand immByteMapBase()
5640 %{
5641   // Get base of card map
5642   predicate((jbyte*)n->get_ptr() ==
5643         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5644   match(ConP);
5645 
5646   op_cost(0);
5647   format %{ %}
5648   interface(CONST_INTER);
5649 %}
5650 
5651 // Pointer Immediate Minus One
5652 // this is used when we want to write the current PC to the thread anchor
5653 operand immP_M1()
5654 %{
5655   predicate(n->get_ptr() == -1);
5656   match(ConP);
5657 
5658   op_cost(0);
5659   format %{ %}
5660   interface(CONST_INTER);
5661 %}
5662 
5663 // Pointer Immediate Minus Two
5664 // this is used when we want to write the current PC to the thread anchor
5665 operand immP_M2()
5666 %{
5667   predicate(n->get_ptr() == -2);
5668   match(ConP);
5669 
5670   op_cost(0);
5671   format %{ %}
5672   interface(CONST_INTER);
5673 %}
5674 
5675 // Float and Double operands
5676 // Double Immediate
5677 operand immD()
5678 %{
5679   match(ConD);
5680   op_cost(0);
5681   format %{ %}
5682   interface(CONST_INTER);
5683 %}
5684 
5685 // Double Immediate: +0.0d
5686 operand immD0()
5687 %{
5688   predicate(jlong_cast(n->getd()) == 0);
5689   match(ConD);
5690 
5691   op_cost(0);
5692   format %{ %}
5693   interface(CONST_INTER);
5694 %}
5695 
5696 // constant 'double +0.0'.
5697 operand immDPacked()
5698 %{
5699   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5700   match(ConD);
5701   op_cost(0);
5702   format %{ %}
5703   interface(CONST_INTER);
5704 %}
5705 
5706 // Float Immediate
5707 operand immF()
5708 %{
5709   match(ConF);
5710   op_cost(0);
5711   format %{ %}
5712   interface(CONST_INTER);
5713 %}
5714 
5715 // Float Immediate: +0.0f.
5716 operand immF0()
5717 %{
5718   predicate(jint_cast(n->getf()) == 0);
5719   match(ConF);
5720 
5721   op_cost(0);
5722   format %{ %}
5723   interface(CONST_INTER);
5724 %}
5725 
5726 //
5727 operand immFPacked()
5728 %{
5729   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5730   match(ConF);
5731   op_cost(0);
5732   format %{ %}
5733   interface(CONST_INTER);
5734 %}
5735 
5736 // Narrow pointer operands
5737 // Narrow Pointer Immediate
5738 operand immN()
5739 %{
5740   match(ConN);
5741 
5742   op_cost(0);
5743   format %{ %}
5744   interface(CONST_INTER);
5745 %}
5746 
5747 // Narrow NULL Pointer Immediate
5748 operand immN0()
5749 %{
5750   predicate(n->get_narrowcon() == 0);
5751   match(ConN);
5752 
5753   op_cost(0);
5754   format %{ %}
5755   interface(CONST_INTER);
5756 %}
5757 
5758 operand immNKlass()
5759 %{
5760   match(ConNKlass);
5761 
5762   op_cost(0);
5763   format %{ %}
5764   interface(CONST_INTER);
5765 %}
5766 
5767 // Integer 32 bit Register Operands
5768 // Integer 32 bitRegister (excludes SP)
5769 operand iRegI()
5770 %{
5771   constraint(ALLOC_IN_RC(any_reg32));
5772   match(RegI);
5773   match(iRegINoSp);
5774   op_cost(0);
5775   format %{ %}
5776   interface(REG_INTER);
5777 %}
5778 
5779 // Integer 32 bit Register not Special
5780 operand iRegINoSp()
5781 %{
5782   constraint(ALLOC_IN_RC(no_special_reg32));
5783   match(RegI);
5784   op_cost(0);
5785   format %{ %}
5786   interface(REG_INTER);
5787 %}
5788 
5789 // Integer 64 bit Register Operands
5790 // Integer 64 bit Register (includes SP)
5791 operand iRegL()
5792 %{
5793   constraint(ALLOC_IN_RC(any_reg));
5794   match(RegL);
5795   match(iRegLNoSp);
5796   op_cost(0);
5797   format %{ %}
5798   interface(REG_INTER);
5799 %}
5800 
5801 // Integer 64 bit Register not Special
5802 operand iRegLNoSp()
5803 %{
5804   constraint(ALLOC_IN_RC(no_special_reg));
5805   match(RegL);
5806   format %{ %}
5807   interface(REG_INTER);
5808 %}
5809 
5810 // Pointer Register Operands
5811 // Pointer Register
5812 operand iRegP()
5813 %{
5814   constraint(ALLOC_IN_RC(ptr_reg));
5815   match(RegP);
5816   match(iRegPNoSp);
5817   match(iRegP_R0);
5818   //match(iRegP_R2);
5819   //match(iRegP_R4);
5820   //match(iRegP_R5);
5821   match(thread_RegP);
5822   op_cost(0);
5823   format %{ %}
5824   interface(REG_INTER);
5825 %}
5826 
5827 // Pointer 64 bit Register not Special
5828 operand iRegPNoSp()
5829 %{
5830   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5831   match(RegP);
5832   // match(iRegP);
5833   // match(iRegP_R0);
5834   // match(iRegP_R2);
5835   // match(iRegP_R4);
5836   // match(iRegP_R5);
5837   // match(thread_RegP);
5838   op_cost(0);
5839   format %{ %}
5840   interface(REG_INTER);
5841 %}
5842 
5843 // Pointer 64 bit Register R0 only
5844 operand iRegP_R0()
5845 %{
5846   constraint(ALLOC_IN_RC(r0_reg));
5847   match(RegP);
5848   // match(iRegP);
5849   match(iRegPNoSp);
5850   op_cost(0);
5851   format %{ %}
5852   interface(REG_INTER);
5853 %}
5854 
5855 // Pointer 64 bit Register R1 only
5856 operand iRegP_R1()
5857 %{
5858   constraint(ALLOC_IN_RC(r1_reg));
5859   match(RegP);
5860   // match(iRegP);
5861   match(iRegPNoSp);
5862   op_cost(0);
5863   format %{ %}
5864   interface(REG_INTER);
5865 %}
5866 
5867 // Pointer 64 bit Register R2 only
5868 operand iRegP_R2()
5869 %{
5870   constraint(ALLOC_IN_RC(r2_reg));
5871   match(RegP);
5872   // match(iRegP);
5873   match(iRegPNoSp);
5874   op_cost(0);
5875   format %{ %}
5876   interface(REG_INTER);
5877 %}
5878 
5879 // Pointer 64 bit Register R3 only
5880 operand iRegP_R3()
5881 %{
5882   constraint(ALLOC_IN_RC(r3_reg));
5883   match(RegP);
5884   // match(iRegP);
5885   match(iRegPNoSp);
5886   op_cost(0);
5887   format %{ %}
5888   interface(REG_INTER);
5889 %}
5890 
5891 // Pointer 64 bit Register R4 only
5892 operand iRegP_R4()
5893 %{
5894   constraint(ALLOC_IN_RC(r4_reg));
5895   match(RegP);
5896   // match(iRegP);
5897   match(iRegPNoSp);
5898   op_cost(0);
5899   format %{ %}
5900   interface(REG_INTER);
5901 %}
5902 
5903 // Pointer 64 bit Register R5 only
5904 operand iRegP_R5()
5905 %{
5906   constraint(ALLOC_IN_RC(r5_reg));
5907   match(RegP);
5908   // match(iRegP);
5909   match(iRegPNoSp);
5910   op_cost(0);
5911   format %{ %}
5912   interface(REG_INTER);
5913 %}
5914 
5915 // Pointer 64 bit Register R10 only
5916 operand iRegP_R10()
5917 %{
5918   constraint(ALLOC_IN_RC(r10_reg));
5919   match(RegP);
5920   // match(iRegP);
5921   match(iRegPNoSp);
5922   op_cost(0);
5923   format %{ %}
5924   interface(REG_INTER);
5925 %}
5926 
5927 // Long 64 bit Register R11 only
5928 operand iRegL_R11()
5929 %{
5930   constraint(ALLOC_IN_RC(r11_reg));
5931   match(RegL);
5932   match(iRegLNoSp);
5933   op_cost(0);
5934   format %{ %}
5935   interface(REG_INTER);
5936 %}
5937 
5938 // Pointer 64 bit Register FP only
5939 operand iRegP_FP()
5940 %{
5941   constraint(ALLOC_IN_RC(fp_reg));
5942   match(RegP);
5943   // match(iRegP);
5944   op_cost(0);
5945   format %{ %}
5946   interface(REG_INTER);
5947 %}
5948 
5949 // Register R0 only
5950 operand iRegI_R0()
5951 %{
5952   constraint(ALLOC_IN_RC(int_r0_reg));
5953   match(RegI);
5954   match(iRegINoSp);
5955   op_cost(0);
5956   format %{ %}
5957   interface(REG_INTER);
5958 %}
5959 
5960 // Register R2 only
5961 operand iRegI_R2()
5962 %{
5963   constraint(ALLOC_IN_RC(int_r2_reg));
5964   match(RegI);
5965   match(iRegINoSp);
5966   op_cost(0);
5967   format %{ %}
5968   interface(REG_INTER);
5969 %}
5970 
5971 // Register R3 only
5972 operand iRegI_R3()
5973 %{
5974   constraint(ALLOC_IN_RC(int_r3_reg));
5975   match(RegI);
5976   match(iRegINoSp);
5977   op_cost(0);
5978   format %{ %}
5979   interface(REG_INTER);
5980 %}
5981 
5982 
5983 // Register R2 only
5984 operand iRegI_R4()
5985 %{
5986   constraint(ALLOC_IN_RC(int_r4_reg));
5987   match(RegI);
5988   match(iRegINoSp);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 
5995 // Pointer Register Operands
5996 // Narrow Pointer Register
5997 operand iRegN()
5998 %{
5999   constraint(ALLOC_IN_RC(any_reg32));
6000   match(RegN);
6001   match(iRegNNoSp);
6002   op_cost(0);
6003   format %{ %}
6004   interface(REG_INTER);
6005 %}
6006 
6007 // Integer 64 bit Register not Special
6008 operand iRegNNoSp()
6009 %{
6010   constraint(ALLOC_IN_RC(no_special_reg32));
6011   match(RegN);
6012   op_cost(0);
6013   format %{ %}
6014   interface(REG_INTER);
6015 %}
6016 
6017 // heap base register -- used for encoding immN0
6018 
6019 operand iRegIHeapbase()
6020 %{
6021   constraint(ALLOC_IN_RC(heapbase_reg));
6022   match(RegI);
6023   op_cost(0);
6024   format %{ %}
6025   interface(REG_INTER);
6026 %}
6027 
6028 // Float Register
6029 // Float register operands
6030 operand vRegF()
6031 %{
6032   constraint(ALLOC_IN_RC(float_reg));
6033   match(RegF);
6034 
6035   op_cost(0);
6036   format %{ %}
6037   interface(REG_INTER);
6038 %}
6039 
6040 // Double Register
6041 // Double register operands
6042 operand vRegD()
6043 %{
6044   constraint(ALLOC_IN_RC(double_reg));
6045   match(RegD);
6046 
6047   op_cost(0);
6048   format %{ %}
6049   interface(REG_INTER);
6050 %}
6051 
6052 operand vecD()
6053 %{
6054   constraint(ALLOC_IN_RC(vectord_reg));
6055   match(VecD);
6056 
6057   op_cost(0);
6058   format %{ %}
6059   interface(REG_INTER);
6060 %}
6061 
6062 operand vecX()
6063 %{
6064   constraint(ALLOC_IN_RC(vectorx_reg));
6065   match(VecX);
6066 
6067   op_cost(0);
6068   format %{ %}
6069   interface(REG_INTER);
6070 %}
6071 
6072 operand vRegD_V0()
6073 %{
6074   constraint(ALLOC_IN_RC(v0_reg));
6075   match(RegD);
6076   op_cost(0);
6077   format %{ %}
6078   interface(REG_INTER);
6079 %}
6080 
6081 operand vRegD_V1()
6082 %{
6083   constraint(ALLOC_IN_RC(v1_reg));
6084   match(RegD);
6085   op_cost(0);
6086   format %{ %}
6087   interface(REG_INTER);
6088 %}
6089 
6090 operand vRegD_V2()
6091 %{
6092   constraint(ALLOC_IN_RC(v2_reg));
6093   match(RegD);
6094   op_cost(0);
6095   format %{ %}
6096   interface(REG_INTER);
6097 %}
6098 
6099 operand vRegD_V3()
6100 %{
6101   constraint(ALLOC_IN_RC(v3_reg));
6102   match(RegD);
6103   op_cost(0);
6104   format %{ %}
6105   interface(REG_INTER);
6106 %}
6107 
6108 // Flags register, used as output of signed compare instructions
6109 
6110 // note that on AArch64 we also use this register as the output for
6111 // for floating point compare instructions (CmpF CmpD). this ensures
6112 // that ordered inequality tests use GT, GE, LT or LE none of which
6113 // pass through cases where the result is unordered i.e. one or both
6114 // inputs to the compare is a NaN. this means that the ideal code can
6115 // replace e.g. a GT with an LE and not end up capturing the NaN case
6116 // (where the comparison should always fail). EQ and NE tests are
6117 // always generated in ideal code so that unordered folds into the NE
6118 // case, matching the behaviour of AArch64 NE.
6119 //
6120 // This differs from x86 where the outputs of FP compares use a
6121 // special FP flags registers and where compares based on this
6122 // register are distinguished into ordered inequalities (cmpOpUCF) and
6123 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6124 // to explicitly handle the unordered case in branches. x86 also has
6125 // to include extra CMoveX rules to accept a cmpOpUCF input.
6126 
6127 operand rFlagsReg()
6128 %{
6129   constraint(ALLOC_IN_RC(int_flags));
6130   match(RegFlags);
6131 
6132   op_cost(0);
6133   format %{ "RFLAGS" %}
6134   interface(REG_INTER);
6135 %}
6136 
6137 // Flags register, used as output of unsigned compare instructions
6138 operand rFlagsRegU()
6139 %{
6140   constraint(ALLOC_IN_RC(int_flags));
6141   match(RegFlags);
6142 
6143   op_cost(0);
6144   format %{ "RFLAGSU" %}
6145   interface(REG_INTER);
6146 %}
6147 
6148 // Special Registers
6149 
6150 // Method Register
6151 operand inline_cache_RegP(iRegP reg)
6152 %{
6153   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6154   match(reg);
6155   match(iRegPNoSp);
6156   op_cost(0);
6157   format %{ %}
6158   interface(REG_INTER);
6159 %}
6160 
6161 operand interpreter_method_oop_RegP(iRegP reg)
6162 %{
6163   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6164   match(reg);
6165   match(iRegPNoSp);
6166   op_cost(0);
6167   format %{ %}
6168   interface(REG_INTER);
6169 %}
6170 
6171 // Thread Register
6172 operand thread_RegP(iRegP reg)
6173 %{
6174   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6175   match(reg);
6176   op_cost(0);
6177   format %{ %}
6178   interface(REG_INTER);
6179 %}
6180 
6181 operand lr_RegP(iRegP reg)
6182 %{
6183   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6184   match(reg);
6185   op_cost(0);
6186   format %{ %}
6187   interface(REG_INTER);
6188 %}
6189 
6190 //----------Memory Operands----------------------------------------------------
6191 
6192 operand indirect(iRegP reg)
6193 %{
6194   constraint(ALLOC_IN_RC(ptr_reg));
6195   match(reg);
6196   op_cost(0);
6197   format %{ "[$reg]" %}
6198   interface(MEMORY_INTER) %{
6199     base($reg);
6200     index(0xffffffff);
6201     scale(0x0);
6202     disp(0x0);
6203   %}
6204 %}
6205 
6206 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6207 %{
6208   constraint(ALLOC_IN_RC(ptr_reg));
6209   match(AddP (AddP reg (LShiftL lreg scale)) off);
6210   op_cost(INSN_COST);
6211   format %{ "$reg, $lreg lsl($scale), $off" %}
6212   interface(MEMORY_INTER) %{
6213     base($reg);
6214     index($lreg);
6215     scale($scale);
6216     disp($off);
6217   %}
6218 %}
6219 
6220 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6221 %{
6222   constraint(ALLOC_IN_RC(ptr_reg));
6223   match(AddP (AddP reg (LShiftL lreg scale)) off);
6224   op_cost(INSN_COST);
6225   format %{ "$reg, $lreg lsl($scale), $off" %}
6226   interface(MEMORY_INTER) %{
6227     base($reg);
6228     index($lreg);
6229     scale($scale);
6230     disp($off);
6231   %}
6232 %}
6233 
6234 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6235 %{
6236   constraint(ALLOC_IN_RC(ptr_reg));
6237   match(AddP (AddP reg (ConvI2L ireg)) off);
6238   op_cost(INSN_COST);
6239   format %{ "$reg, $ireg, $off I2L" %}
6240   interface(MEMORY_INTER) %{
6241     base($reg);
6242     index($ireg);
6243     scale(0x0);
6244     disp($off);
6245   %}
6246 %}
6247 
6248 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6249 %{
6250   constraint(ALLOC_IN_RC(ptr_reg));
6251   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6252   op_cost(INSN_COST);
6253   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6254   interface(MEMORY_INTER) %{
6255     base($reg);
6256     index($ireg);
6257     scale($scale);
6258     disp($off);
6259   %}
6260 %}
6261 
6262 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6263 %{
6264   constraint(ALLOC_IN_RC(ptr_reg));
6265   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6266   op_cost(0);
6267   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6268   interface(MEMORY_INTER) %{
6269     base($reg);
6270     index($ireg);
6271     scale($scale);
6272     disp(0x0);
6273   %}
6274 %}
6275 
6276 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6277 %{
6278   constraint(ALLOC_IN_RC(ptr_reg));
6279   match(AddP reg (LShiftL lreg scale));
6280   op_cost(0);
6281   format %{ "$reg, $lreg lsl($scale)" %}
6282   interface(MEMORY_INTER) %{
6283     base($reg);
6284     index($lreg);
6285     scale($scale);
6286     disp(0x0);
6287   %}
6288 %}
6289 
6290 operand indIndex(iRegP reg, iRegL lreg)
6291 %{
6292   constraint(ALLOC_IN_RC(ptr_reg));
6293   match(AddP reg lreg);
6294   op_cost(0);
6295   format %{ "$reg, $lreg" %}
6296   interface(MEMORY_INTER) %{
6297     base($reg);
6298     index($lreg);
6299     scale(0x0);
6300     disp(0x0);
6301   %}
6302 %}
6303 
6304 operand indOffI(iRegP reg, immIOffset off)
6305 %{
6306   constraint(ALLOC_IN_RC(ptr_reg));
6307   match(AddP reg off);
6308   op_cost(0);
6309   format %{ "[$reg, $off]" %}
6310   interface(MEMORY_INTER) %{
6311     base($reg);
6312     index(0xffffffff);
6313     scale(0x0);
6314     disp($off);
6315   %}
6316 %}
6317 
6318 operand indOffL(iRegP reg, immLoffset off)
6319 %{
6320   constraint(ALLOC_IN_RC(ptr_reg));
6321   match(AddP reg off);
6322   op_cost(0);
6323   format %{ "[$reg, $off]" %}
6324   interface(MEMORY_INTER) %{
6325     base($reg);
6326     index(0xffffffff);
6327     scale(0x0);
6328     disp($off);
6329   %}
6330 %}
6331 
6332 
6333 operand indirectN(iRegN reg)
6334 %{
6335   predicate(Universe::narrow_oop_shift() == 0);
6336   constraint(ALLOC_IN_RC(ptr_reg));
6337   match(DecodeN reg);
6338   op_cost(0);
6339   format %{ "[$reg]\t# narrow" %}
6340   interface(MEMORY_INTER) %{
6341     base($reg);
6342     index(0xffffffff);
6343     scale(0x0);
6344     disp(0x0);
6345   %}
6346 %}
6347 
6348 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6349 %{
6350   predicate(Universe::narrow_oop_shift() == 0);
6351   constraint(ALLOC_IN_RC(ptr_reg));
6352   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6353   op_cost(0);
6354   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6355   interface(MEMORY_INTER) %{
6356     base($reg);
6357     index($lreg);
6358     scale($scale);
6359     disp($off);
6360   %}
6361 %}
6362 
6363 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6364 %{
6365   predicate(Universe::narrow_oop_shift() == 0);
6366   constraint(ALLOC_IN_RC(ptr_reg));
6367   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6368   op_cost(INSN_COST);
6369   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6370   interface(MEMORY_INTER) %{
6371     base($reg);
6372     index($lreg);
6373     scale($scale);
6374     disp($off);
6375   %}
6376 %}
6377 
6378 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6379 %{
6380   predicate(Universe::narrow_oop_shift() == 0);
6381   constraint(ALLOC_IN_RC(ptr_reg));
6382   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6383   op_cost(INSN_COST);
6384   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6385   interface(MEMORY_INTER) %{
6386     base($reg);
6387     index($ireg);
6388     scale(0x0);
6389     disp($off);
6390   %}
6391 %}
6392 
6393 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6394 %{
6395   predicate(Universe::narrow_oop_shift() == 0);
6396   constraint(ALLOC_IN_RC(ptr_reg));
6397   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6398   op_cost(INSN_COST);
6399   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6400   interface(MEMORY_INTER) %{
6401     base($reg);
6402     index($ireg);
6403     scale($scale);
6404     disp($off);
6405   %}
6406 %}
6407 
6408 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6409 %{
6410   predicate(Universe::narrow_oop_shift() == 0);
6411   constraint(ALLOC_IN_RC(ptr_reg));
6412   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6413   op_cost(0);
6414   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6415   interface(MEMORY_INTER) %{
6416     base($reg);
6417     index($ireg);
6418     scale($scale);
6419     disp(0x0);
6420   %}
6421 %}
6422 
6423 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6424 %{
6425   predicate(Universe::narrow_oop_shift() == 0);
6426   constraint(ALLOC_IN_RC(ptr_reg));
6427   match(AddP (DecodeN reg) (LShiftL lreg scale));
6428   op_cost(0);
6429   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6430   interface(MEMORY_INTER) %{
6431     base($reg);
6432     index($lreg);
6433     scale($scale);
6434     disp(0x0);
6435   %}
6436 %}
6437 
6438 operand indIndexN(iRegN reg, iRegL lreg)
6439 %{
6440   predicate(Universe::narrow_oop_shift() == 0);
6441   constraint(ALLOC_IN_RC(ptr_reg));
6442   match(AddP (DecodeN reg) lreg);
6443   op_cost(0);
6444   format %{ "$reg, $lreg\t# narrow" %}
6445   interface(MEMORY_INTER) %{
6446     base($reg);
6447     index($lreg);
6448     scale(0x0);
6449     disp(0x0);
6450   %}
6451 %}
6452 
6453 operand indOffIN(iRegN reg, immIOffset off)
6454 %{
6455   predicate(Universe::narrow_oop_shift() == 0);
6456   constraint(ALLOC_IN_RC(ptr_reg));
6457   match(AddP (DecodeN reg) off);
6458   op_cost(0);
6459   format %{ "[$reg, $off]\t# narrow" %}
6460   interface(MEMORY_INTER) %{
6461     base($reg);
6462     index(0xffffffff);
6463     scale(0x0);
6464     disp($off);
6465   %}
6466 %}
6467 
6468 operand indOffLN(iRegN reg, immLoffset off)
6469 %{
6470   predicate(Universe::narrow_oop_shift() == 0);
6471   constraint(ALLOC_IN_RC(ptr_reg));
6472   match(AddP (DecodeN reg) off);
6473   op_cost(0);
6474   format %{ "[$reg, $off]\t# narrow" %}
6475   interface(MEMORY_INTER) %{
6476     base($reg);
6477     index(0xffffffff);
6478     scale(0x0);
6479     disp($off);
6480   %}
6481 %}
6482 
6483 
6484 
6485 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6486 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6487 %{
6488   constraint(ALLOC_IN_RC(ptr_reg));
6489   match(AddP reg off);
6490   op_cost(0);
6491   format %{ "[$reg, $off]" %}
6492   interface(MEMORY_INTER) %{
6493     base($reg);
6494     index(0xffffffff);
6495     scale(0x0);
6496     disp($off);
6497   %}
6498 %}
6499 
6500 //----------Special Memory Operands--------------------------------------------
6501 // Stack Slot Operand - This operand is used for loading and storing temporary
6502 //                      values on the stack where a match requires a value to
6503 //                      flow through memory.
6504 operand stackSlotP(sRegP reg)
6505 %{
6506   constraint(ALLOC_IN_RC(stack_slots));
6507   op_cost(100);
6508   // No match rule because this operand is only generated in matching
6509   // match(RegP);
6510   format %{ "[$reg]" %}
6511   interface(MEMORY_INTER) %{
6512     base(0x1e);  // RSP
6513     index(0x0);  // No Index
6514     scale(0x0);  // No Scale
6515     disp($reg);  // Stack Offset
6516   %}
6517 %}
6518 
6519 operand stackSlotI(sRegI reg)
6520 %{
6521   constraint(ALLOC_IN_RC(stack_slots));
6522   // No match rule because this operand is only generated in matching
6523   // match(RegI);
6524   format %{ "[$reg]" %}
6525   interface(MEMORY_INTER) %{
6526     base(0x1e);  // RSP
6527     index(0x0);  // No Index
6528     scale(0x0);  // No Scale
6529     disp($reg);  // Stack Offset
6530   %}
6531 %}
6532 
6533 operand stackSlotF(sRegF reg)
6534 %{
6535   constraint(ALLOC_IN_RC(stack_slots));
6536   // No match rule because this operand is only generated in matching
6537   // match(RegF);
6538   format %{ "[$reg]" %}
6539   interface(MEMORY_INTER) %{
6540     base(0x1e);  // RSP
6541     index(0x0);  // No Index
6542     scale(0x0);  // No Scale
6543     disp($reg);  // Stack Offset
6544   %}
6545 %}
6546 
6547 operand stackSlotD(sRegD reg)
6548 %{
6549   constraint(ALLOC_IN_RC(stack_slots));
6550   // No match rule because this operand is only generated in matching
6551   // match(RegD);
6552   format %{ "[$reg]" %}
6553   interface(MEMORY_INTER) %{
6554     base(0x1e);  // RSP
6555     index(0x0);  // No Index
6556     scale(0x0);  // No Scale
6557     disp($reg);  // Stack Offset
6558   %}
6559 %}
6560 
6561 operand stackSlotL(sRegL reg)
6562 %{
6563   constraint(ALLOC_IN_RC(stack_slots));
6564   // No match rule because this operand is only generated in matching
6565   // match(RegL);
6566   format %{ "[$reg]" %}
6567   interface(MEMORY_INTER) %{
6568     base(0x1e);  // RSP
6569     index(0x0);  // No Index
6570     scale(0x0);  // No Scale
6571     disp($reg);  // Stack Offset
6572   %}
6573 %}
6574 
6575 // Operands for expressing Control Flow
6576 // NOTE: Label is a predefined operand which should not be redefined in
6577 //       the AD file. It is generically handled within the ADLC.
6578 
6579 //----------Conditional Branch Operands----------------------------------------
6580 // Comparison Op  - This is the operation of the comparison, and is limited to
6581 //                  the following set of codes:
6582 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6583 //
6584 // Other attributes of the comparison, such as unsignedness, are specified
6585 // by the comparison instruction that sets a condition code flags register.
6586 // That result is represented by a flags operand whose subtype is appropriate
6587 // to the unsignedness (etc.) of the comparison.
6588 //
6589 // Later, the instruction which matches both the Comparison Op (a Bool) and
6590 // the flags (produced by the Cmp) specifies the coding of the comparison op
6591 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6592 
6593 // used for signed integral comparisons and fp comparisons
6594 
6595 operand cmpOp()
6596 %{
6597   match(Bool);
6598 
6599   format %{ "" %}
6600   interface(COND_INTER) %{
6601     equal(0x0, "eq");
6602     not_equal(0x1, "ne");
6603     less(0xb, "lt");
6604     greater_equal(0xa, "ge");
6605     less_equal(0xd, "le");
6606     greater(0xc, "gt");
6607     overflow(0x6, "vs");
6608     no_overflow(0x7, "vc");
6609   %}
6610 %}
6611 
6612 // used for unsigned integral comparisons
6613 
6614 operand cmpOpU()
6615 %{
6616   match(Bool);
6617 
6618   format %{ "" %}
6619   interface(COND_INTER) %{
6620     equal(0x0, "eq");
6621     not_equal(0x1, "ne");
6622     less(0x3, "lo");
6623     greater_equal(0x2, "hs");
6624     less_equal(0x9, "ls");
6625     greater(0x8, "hi");
6626     overflow(0x6, "vs");
6627     no_overflow(0x7, "vc");
6628   %}
6629 %}
6630 
6631 // Special operand allowing long args to int ops to be truncated for free
6632 
6633 operand iRegL2I(iRegL reg) %{
6634 
6635   op_cost(0);
6636 
6637   match(ConvL2I reg);
6638 
6639   format %{ "l2i($reg)" %}
6640 
6641   interface(REG_INTER)
6642 %}
6643 
6644 opclass vmem(indirect, indIndex, indOffI, indOffL);
6645 
6646 //----------OPERAND CLASSES----------------------------------------------------
6647 // Operand Classes are groups of operands that are used as to simplify
6648 // instruction definitions by not requiring the AD writer to specify
6649 // separate instructions for every form of operand when the
6650 // instruction accepts multiple operand types with the same basic
6651 // encoding and format. The classic case of this is memory operands.
6652 
6653 // memory is used to define read/write location for load/store
6654 // instruction defs. we can turn a memory op into an Address
6655 
6656 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6657                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6658 
6659 
6660 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6661 // operations. it allows the src to be either an iRegI or a (ConvL2I
6662 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6663 // can be elided because the 32-bit instruction will just employ the
6664 // lower 32 bits anyway.
6665 //
6666 // n.b. this does not elide all L2I conversions. if the truncated
6667 // value is consumed by more than one operation then the ConvL2I
6668 // cannot be bundled into the consuming nodes so an l2i gets planted
6669 // (actually a movw $dst $src) and the downstream instructions consume
6670 // the result of the l2i as an iRegI input. That's a shame since the
6671 // movw is actually redundant but its not too costly.
6672 
6673 opclass iRegIorL2I(iRegI, iRegL2I);
6674 
6675 //----------PIPELINE-----------------------------------------------------------
6676 // Rules which define the behavior of the target architectures pipeline.
6677 // Integer ALU reg operation
6678 pipeline %{
6679 
6680 attributes %{
6681   // ARM instructions are of fixed length
6682   fixed_size_instructions;        // Fixed size instructions TODO does
6683   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6684   // ARM instructions come in 32-bit word units
6685   instruction_unit_size = 4;         // An instruction is 4 bytes long
6686   instruction_fetch_unit_size = 64;  // The processor fetches one line
6687   instruction_fetch_units = 1;       // of 64 bytes
6688 
6689   // List of nop instructions
6690   nops( MachNop );
6691 %}
6692 
6693 // We don't use an actual pipeline model so don't care about resources
6694 // or description. we do use pipeline classes to introduce fixed
6695 // latencies
6696 
6697 //----------RESOURCES----------------------------------------------------------
6698 // Resources are the functional units available to the machine
6699 
6700 resources( INS0, INS1, INS01 = INS0 | INS1,
6701            ALU0, ALU1, ALU = ALU0 | ALU1,
6702            MAC,
6703            DIV,
6704            BRANCH,
6705            LDST,
6706            NEON_FP);
6707 
6708 //----------PIPELINE DESCRIPTION-----------------------------------------------
6709 // Pipeline Description specifies the stages in the machine's pipeline
6710 
6711 pipe_desc(ISS, EX1, EX2, WR);
6712 
6713 //----------PIPELINE CLASSES---------------------------------------------------
6714 // Pipeline Classes describe the stages in which input and output are
6715 // referenced by the hardware pipeline.
6716 
6717 //------- Integer ALU operations --------------------------
6718 
6719 // Integer ALU reg-reg operation
6720 // Operands needed in EX1, result generated in EX2
6721 // Eg.  ADD     x0, x1, x2
6722 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6723 %{
6724   single_instruction;
6725   dst    : EX2(write);
6726   src1   : EX1(read);
6727   src2   : EX1(read);
6728   INS01  : ISS; // Dual issue as instruction 0 or 1
6729   ALU    : EX2;
6730 %}
6731 
6732 // Integer ALU reg-reg operation with constant shift
6733 // Shifted register must be available in LATE_ISS instead of EX1
6734 // Eg.  ADD     x0, x1, x2, LSL #2
6735 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6736 %{
6737   single_instruction;
6738   dst    : EX2(write);
6739   src1   : EX1(read);
6740   src2   : ISS(read);
6741   INS01  : ISS;
6742   ALU    : EX2;
6743 %}
6744 
6745 // Integer ALU reg operation with constant shift
6746 // Eg.  LSL     x0, x1, #shift
6747 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6748 %{
6749   single_instruction;
6750   dst    : EX2(write);
6751   src1   : ISS(read);
6752   INS01  : ISS;
6753   ALU    : EX2;
6754 %}
6755 
6756 // Integer ALU reg-reg operation with variable shift
6757 // Both operands must be available in LATE_ISS instead of EX1
6758 // Result is available in EX1 instead of EX2
6759 // Eg.  LSLV    x0, x1, x2
6760 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6761 %{
6762   single_instruction;
6763   dst    : EX1(write);
6764   src1   : ISS(read);
6765   src2   : ISS(read);
6766   INS01  : ISS;
6767   ALU    : EX1;
6768 %}
6769 
6770 // Integer ALU reg-reg operation with extract
6771 // As for _vshift above, but result generated in EX2
6772 // Eg.  EXTR    x0, x1, x2, #N
6773 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6774 %{
6775   single_instruction;
6776   dst    : EX2(write);
6777   src1   : ISS(read);
6778   src2   : ISS(read);
6779   INS1   : ISS; // Can only dual issue as Instruction 1
6780   ALU    : EX1;
6781 %}
6782 
6783 // Integer ALU reg operation
6784 // Eg.  NEG     x0, x1
6785 pipe_class ialu_reg(iRegI dst, iRegI src)
6786 %{
6787   single_instruction;
6788   dst    : EX2(write);
6789   src    : EX1(read);
6790   INS01  : ISS;
6791   ALU    : EX2;
6792 %}
6793 
6794 // Integer ALU reg mmediate operation
6795 // Eg.  ADD     x0, x1, #N
6796 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6797 %{
6798   single_instruction;
6799   dst    : EX2(write);
6800   src1   : EX1(read);
6801   INS01  : ISS;
6802   ALU    : EX2;
6803 %}
6804 
6805 // Integer ALU immediate operation (no source operands)
6806 // Eg.  MOV     x0, #N
6807 pipe_class ialu_imm(iRegI dst)
6808 %{
6809   single_instruction;
6810   dst    : EX1(write);
6811   INS01  : ISS;
6812   ALU    : EX1;
6813 %}
6814 
6815 //------- Compare operation -------------------------------
6816 
6817 // Compare reg-reg
6818 // Eg.  CMP     x0, x1
6819 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6820 %{
6821   single_instruction;
6822 //  fixed_latency(16);
6823   cr     : EX2(write);
6824   op1    : EX1(read);
6825   op2    : EX1(read);
6826   INS01  : ISS;
6827   ALU    : EX2;
6828 %}
6829 
6830 // Compare reg-reg
6831 // Eg.  CMP     x0, #N
6832 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6833 %{
6834   single_instruction;
6835 //  fixed_latency(16);
6836   cr     : EX2(write);
6837   op1    : EX1(read);
6838   INS01  : ISS;
6839   ALU    : EX2;
6840 %}
6841 
6842 //------- Conditional instructions ------------------------
6843 
6844 // Conditional no operands
6845 // Eg.  CSINC   x0, zr, zr, <cond>
6846 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6847 %{
6848   single_instruction;
6849   cr     : EX1(read);
6850   dst    : EX2(write);
6851   INS01  : ISS;
6852   ALU    : EX2;
6853 %}
6854 
6855 // Conditional 2 operand
6856 // EG.  CSEL    X0, X1, X2, <cond>
6857 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6858 %{
6859   single_instruction;
6860   cr     : EX1(read);
6861   src1   : EX1(read);
6862   src2   : EX1(read);
6863   dst    : EX2(write);
6864   INS01  : ISS;
6865   ALU    : EX2;
6866 %}
6867 
6868 // Conditional 2 operand
6869 // EG.  CSEL    X0, X1, X2, <cond>
6870 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6871 %{
6872   single_instruction;
6873   cr     : EX1(read);
6874   src    : EX1(read);
6875   dst    : EX2(write);
6876   INS01  : ISS;
6877   ALU    : EX2;
6878 %}
6879 
6880 //------- Multiply pipeline operations --------------------
6881 
6882 // Multiply reg-reg
6883 // Eg.  MUL     w0, w1, w2
6884 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6885 %{
6886   single_instruction;
6887   dst    : WR(write);
6888   src1   : ISS(read);
6889   src2   : ISS(read);
6890   INS01  : ISS;
6891   MAC    : WR;
6892 %}
6893 
6894 // Multiply accumulate
6895 // Eg.  MADD    w0, w1, w2, w3
6896 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6897 %{
6898   single_instruction;
6899   dst    : WR(write);
6900   src1   : ISS(read);
6901   src2   : ISS(read);
6902   src3   : ISS(read);
6903   INS01  : ISS;
6904   MAC    : WR;
6905 %}
6906 
6907 // Eg.  MUL     w0, w1, w2
6908 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6909 %{
6910   single_instruction;
6911   fixed_latency(3); // Maximum latency for 64 bit mul
6912   dst    : WR(write);
6913   src1   : ISS(read);
6914   src2   : ISS(read);
6915   INS01  : ISS;
6916   MAC    : WR;
6917 %}
6918 
6919 // Multiply accumulate
6920 // Eg.  MADD    w0, w1, w2, w3
6921 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6922 %{
6923   single_instruction;
6924   fixed_latency(3); // Maximum latency for 64 bit mul
6925   dst    : WR(write);
6926   src1   : ISS(read);
6927   src2   : ISS(read);
6928   src3   : ISS(read);
6929   INS01  : ISS;
6930   MAC    : WR;
6931 %}
6932 
6933 //------- Divide pipeline operations --------------------
6934 
6935 // Eg.  SDIV    w0, w1, w2
6936 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6937 %{
6938   single_instruction;
6939   fixed_latency(8); // Maximum latency for 32 bit divide
6940   dst    : WR(write);
6941   src1   : ISS(read);
6942   src2   : ISS(read);
6943   INS0   : ISS; // Can only dual issue as instruction 0
6944   DIV    : WR;
6945 %}
6946 
6947 // Eg.  SDIV    x0, x1, x2
6948 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6949 %{
6950   single_instruction;
6951   fixed_latency(16); // Maximum latency for 64 bit divide
6952   dst    : WR(write);
6953   src1   : ISS(read);
6954   src2   : ISS(read);
6955   INS0   : ISS; // Can only dual issue as instruction 0
6956   DIV    : WR;
6957 %}
6958 
6959 //------- Load pipeline operations ------------------------
6960 
6961 // Load - prefetch
6962 // Eg.  PFRM    <mem>
6963 pipe_class iload_prefetch(memory mem)
6964 %{
6965   single_instruction;
6966   mem    : ISS(read);
6967   INS01  : ISS;
6968   LDST   : WR;
6969 %}
6970 
6971 // Load - reg, mem
6972 // Eg.  LDR     x0, <mem>
6973 pipe_class iload_reg_mem(iRegI dst, memory mem)
6974 %{
6975   single_instruction;
6976   dst    : WR(write);
6977   mem    : ISS(read);
6978   INS01  : ISS;
6979   LDST   : WR;
6980 %}
6981 
6982 // Load - reg, reg
6983 // Eg.  LDR     x0, [sp, x1]
6984 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6985 %{
6986   single_instruction;
6987   dst    : WR(write);
6988   src    : ISS(read);
6989   INS01  : ISS;
6990   LDST   : WR;
6991 %}
6992 
6993 //------- Store pipeline operations -----------------------
6994 
6995 // Store - zr, mem
6996 // Eg.  STR     zr, <mem>
6997 pipe_class istore_mem(memory mem)
6998 %{
6999   single_instruction;
7000   mem    : ISS(read);
7001   INS01  : ISS;
7002   LDST   : WR;
7003 %}
7004 
7005 // Store - reg, mem
7006 // Eg.  STR     x0, <mem>
7007 pipe_class istore_reg_mem(iRegI src, memory mem)
7008 %{
7009   single_instruction;
7010   mem    : ISS(read);
7011   src    : EX2(read);
7012   INS01  : ISS;
7013   LDST   : WR;
7014 %}
7015 
7016 // Store - reg, reg
7017 // Eg. STR      x0, [sp, x1]
7018 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7019 %{
7020   single_instruction;
7021   dst    : ISS(read);
7022   src    : EX2(read);
7023   INS01  : ISS;
7024   LDST   : WR;
7025 %}
7026 
7027 //------- Store pipeline operations -----------------------
7028 
7029 // Branch
7030 pipe_class pipe_branch()
7031 %{
7032   single_instruction;
7033   INS01  : ISS;
7034   BRANCH : EX1;
7035 %}
7036 
7037 // Conditional branch
7038 pipe_class pipe_branch_cond(rFlagsReg cr)
7039 %{
7040   single_instruction;
7041   cr     : EX1(read);
7042   INS01  : ISS;
7043   BRANCH : EX1;
7044 %}
7045 
7046 // Compare & Branch
7047 // EG.  CBZ/CBNZ
7048 pipe_class pipe_cmp_branch(iRegI op1)
7049 %{
7050   single_instruction;
7051   op1    : EX1(read);
7052   INS01  : ISS;
7053   BRANCH : EX1;
7054 %}
7055 
7056 //------- Synchronisation operations ----------------------
7057 
7058 // Any operation requiring serialization.
7059 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7060 pipe_class pipe_serial()
7061 %{
7062   single_instruction;
7063   force_serialization;
7064   fixed_latency(16);
7065   INS01  : ISS(2); // Cannot dual issue with any other instruction
7066   LDST   : WR;
7067 %}
7068 
7069 // Generic big/slow expanded idiom - also serialized
7070 pipe_class pipe_slow()
7071 %{
7072   instruction_count(10);
7073   multiple_bundles;
7074   force_serialization;
7075   fixed_latency(16);
7076   INS01  : ISS(2); // Cannot dual issue with any other instruction
7077   LDST   : WR;
7078 %}
7079 
7080 // Empty pipeline class
7081 pipe_class pipe_class_empty()
7082 %{
7083   single_instruction;
7084   fixed_latency(0);
7085 %}
7086 
7087 // Default pipeline class.
7088 pipe_class pipe_class_default()
7089 %{
7090   single_instruction;
7091   fixed_latency(2);
7092 %}
7093 
7094 // Pipeline class for compares.
7095 pipe_class pipe_class_compare()
7096 %{
7097   single_instruction;
7098   fixed_latency(16);
7099 %}
7100 
7101 // Pipeline class for memory operations.
7102 pipe_class pipe_class_memory()
7103 %{
7104   single_instruction;
7105   fixed_latency(16);
7106 %}
7107 
7108 // Pipeline class for call.
7109 pipe_class pipe_class_call()
7110 %{
7111   single_instruction;
7112   fixed_latency(100);
7113 %}
7114 
7115 // Define the class for the Nop node.
7116 define %{
7117    MachNop = pipe_class_empty;
7118 %}
7119 
7120 %}
7121 //----------INSTRUCTIONS-------------------------------------------------------
7122 //
7123 // match      -- States which machine-independent subtree may be replaced
7124 //               by this instruction.
7125 // ins_cost   -- The estimated cost of this instruction is used by instruction
7126 //               selection to identify a minimum cost tree of machine
7127 //               instructions that matches a tree of machine-independent
7128 //               instructions.
7129 // format     -- A string providing the disassembly for this instruction.
7130 //               The value of an instruction's operand may be inserted
7131 //               by referring to it with a '$' prefix.
7132 // opcode     -- Three instruction opcodes may be provided.  These are referred
7133 //               to within an encode class as $primary, $secondary, and $tertiary
7134 //               rrspectively.  The primary opcode is commonly used to
7135 //               indicate the type of machine instruction, while secondary
7136 //               and tertiary are often used for prefix options or addressing
7137 //               modes.
7138 // ins_encode -- A list of encode classes with parameters. The encode class
7139 //               name must have been defined in an 'enc_class' specification
7140 //               in the encode section of the architecture description.
7141 
7142 // ============================================================================
7143 // Memory (Load/Store) Instructions
7144 
7145 // Load Instructions
7146 
7147 // Load Byte (8 bit signed)
7148 instruct loadB(iRegINoSp dst, memory mem)
7149 %{
7150   match(Set dst (LoadB mem));
7151   predicate(!needs_acquiring_load(n));
7152 
7153   ins_cost(4 * INSN_COST);
7154   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7155 
7156   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7157 
7158   ins_pipe(iload_reg_mem);
7159 %}
7160 
7161 // Load Byte (8 bit signed) into long
7162 instruct loadB2L(iRegLNoSp dst, memory mem)
7163 %{
7164   match(Set dst (ConvI2L (LoadB mem)));
7165   predicate(!needs_acquiring_load(n->in(1)));
7166 
7167   ins_cost(4 * INSN_COST);
7168   format %{ "ldrsb  $dst, $mem\t# byte" %}
7169 
7170   ins_encode(aarch64_enc_ldrsb(dst, mem));
7171 
7172   ins_pipe(iload_reg_mem);
7173 %}
7174 
7175 // Load Byte (8 bit unsigned)
7176 instruct loadUB(iRegINoSp dst, memory mem)
7177 %{
7178   match(Set dst (LoadUB mem));
7179   predicate(!needs_acquiring_load(n));
7180 
7181   ins_cost(4 * INSN_COST);
7182   format %{ "ldrbw  $dst, $mem\t# byte" %}
7183 
7184   ins_encode(aarch64_enc_ldrb(dst, mem));
7185 
7186   ins_pipe(iload_reg_mem);
7187 %}
7188 
7189 // Load Byte (8 bit unsigned) into long
7190 instruct loadUB2L(iRegLNoSp dst, memory mem)
7191 %{
7192   match(Set dst (ConvI2L (LoadUB mem)));
7193   predicate(!needs_acquiring_load(n->in(1)));
7194 
7195   ins_cost(4 * INSN_COST);
7196   format %{ "ldrb  $dst, $mem\t# byte" %}
7197 
7198   ins_encode(aarch64_enc_ldrb(dst, mem));
7199 
7200   ins_pipe(iload_reg_mem);
7201 %}
7202 
7203 // Load Short (16 bit signed)
7204 instruct loadS(iRegINoSp dst, memory mem)
7205 %{
7206   match(Set dst (LoadS mem));
7207   predicate(!needs_acquiring_load(n));
7208 
7209   ins_cost(4 * INSN_COST);
7210   format %{ "ldrshw  $dst, $mem\t# short" %}
7211 
7212   ins_encode(aarch64_enc_ldrshw(dst, mem));
7213 
7214   ins_pipe(iload_reg_mem);
7215 %}
7216 
7217 // Load Short (16 bit signed) into long
7218 instruct loadS2L(iRegLNoSp dst, memory mem)
7219 %{
7220   match(Set dst (ConvI2L (LoadS mem)));
7221   predicate(!needs_acquiring_load(n->in(1)));
7222 
7223   ins_cost(4 * INSN_COST);
7224   format %{ "ldrsh  $dst, $mem\t# short" %}
7225 
7226   ins_encode(aarch64_enc_ldrsh(dst, mem));
7227 
7228   ins_pipe(iload_reg_mem);
7229 %}
7230 
7231 // Load Char (16 bit unsigned)
7232 instruct loadUS(iRegINoSp dst, memory mem)
7233 %{
7234   match(Set dst (LoadUS mem));
7235   predicate(!needs_acquiring_load(n));
7236 
7237   ins_cost(4 * INSN_COST);
7238   format %{ "ldrh  $dst, $mem\t# short" %}
7239 
7240   ins_encode(aarch64_enc_ldrh(dst, mem));
7241 
7242   ins_pipe(iload_reg_mem);
7243 %}
7244 
7245 // Load Short/Char (16 bit unsigned) into long
7246 instruct loadUS2L(iRegLNoSp dst, memory mem)
7247 %{
7248   match(Set dst (ConvI2L (LoadUS mem)));
7249   predicate(!needs_acquiring_load(n->in(1)));
7250 
7251   ins_cost(4 * INSN_COST);
7252   format %{ "ldrh  $dst, $mem\t# short" %}
7253 
7254   ins_encode(aarch64_enc_ldrh(dst, mem));
7255 
7256   ins_pipe(iload_reg_mem);
7257 %}
7258 
7259 // Load Integer (32 bit signed)
7260 instruct loadI(iRegINoSp dst, memory mem)
7261 %{
7262   match(Set dst (LoadI mem));
7263   predicate(!needs_acquiring_load(n));
7264 
7265   ins_cost(4 * INSN_COST);
7266   format %{ "ldrw  $dst, $mem\t# int" %}
7267 
7268   ins_encode(aarch64_enc_ldrw(dst, mem));
7269 
7270   ins_pipe(iload_reg_mem);
7271 %}
7272 
7273 // Load Integer (32 bit signed) into long
7274 instruct loadI2L(iRegLNoSp dst, memory mem)
7275 %{
7276   match(Set dst (ConvI2L (LoadI mem)));
7277   predicate(!needs_acquiring_load(n->in(1)));
7278 
7279   ins_cost(4 * INSN_COST);
7280   format %{ "ldrsw  $dst, $mem\t# int" %}
7281 
7282   ins_encode(aarch64_enc_ldrsw(dst, mem));
7283 
7284   ins_pipe(iload_reg_mem);
7285 %}
7286 
7287 // Load Integer (32 bit unsigned) into long
7288 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7289 %{
7290   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7291   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7292 
7293   ins_cost(4 * INSN_COST);
7294   format %{ "ldrw  $dst, $mem\t# int" %}
7295 
7296   ins_encode(aarch64_enc_ldrw(dst, mem));
7297 
7298   ins_pipe(iload_reg_mem);
7299 %}
7300 
7301 // Load Long (64 bit signed)
7302 instruct loadL(iRegLNoSp dst, memory mem)
7303 %{
7304   match(Set dst (LoadL mem));
7305   predicate(!needs_acquiring_load(n));
7306 
7307   ins_cost(4 * INSN_COST);
7308   format %{ "ldr  $dst, $mem\t# int" %}
7309 
7310   ins_encode(aarch64_enc_ldr(dst, mem));
7311 
7312   ins_pipe(iload_reg_mem);
7313 %}
7314 
7315 // Load Range
7316 instruct loadRange(iRegINoSp dst, memory mem)
7317 %{
7318   match(Set dst (LoadRange mem));
7319 
7320   ins_cost(4 * INSN_COST);
7321   format %{ "ldrw  $dst, $mem\t# range" %}
7322 
7323   ins_encode(aarch64_enc_ldrw(dst, mem));
7324 
7325   ins_pipe(iload_reg_mem);
7326 %}
7327 
7328 // Load Pointer
7329 instruct loadP(iRegPNoSp dst, memory mem)
7330 %{
7331   match(Set dst (LoadP mem));
7332   predicate(!needs_acquiring_load(n));
7333 
7334   ins_cost(4 * INSN_COST);
7335   format %{ "ldr  $dst, $mem\t# ptr" %}
7336 
7337   ins_encode(aarch64_enc_ldr(dst, mem));
7338 
7339   ins_pipe(iload_reg_mem);
7340 %}
7341 
7342 // Load Compressed Pointer
7343 instruct loadN(iRegNNoSp dst, memory mem)
7344 %{
7345   match(Set dst (LoadN mem));
7346   predicate(!needs_acquiring_load(n));
7347 
7348   ins_cost(4 * INSN_COST);
7349   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7350 
7351   ins_encode(aarch64_enc_ldrw(dst, mem));
7352 
7353   ins_pipe(iload_reg_mem);
7354 %}
7355 
7356 // Load Klass Pointer
7357 instruct loadKlass(iRegPNoSp dst, memory mem)
7358 %{
7359   match(Set dst (LoadKlass mem));
7360   predicate(!needs_acquiring_load(n));
7361 
7362   ins_cost(4 * INSN_COST);
7363   format %{ "ldr  $dst, $mem\t# class" %}
7364 
7365   ins_encode(aarch64_enc_ldr(dst, mem));
7366 
7367   ins_pipe(iload_reg_mem);
7368 %}
7369 
7370 // Load Narrow Klass Pointer
7371 instruct loadNKlass(iRegNNoSp dst, memory mem)
7372 %{
7373   match(Set dst (LoadNKlass mem));
7374   predicate(!needs_acquiring_load(n));
7375 
7376   ins_cost(4 * INSN_COST);
7377   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7378 
7379   ins_encode(aarch64_enc_ldrw(dst, mem));
7380 
7381   ins_pipe(iload_reg_mem);
7382 %}
7383 
7384 // Load Float
7385 instruct loadF(vRegF dst, memory mem)
7386 %{
7387   match(Set dst (LoadF mem));
7388   predicate(!needs_acquiring_load(n));
7389 
7390   ins_cost(4 * INSN_COST);
7391   format %{ "ldrs  $dst, $mem\t# float" %}
7392 
7393   ins_encode( aarch64_enc_ldrs(dst, mem) );
7394 
7395   ins_pipe(pipe_class_memory);
7396 %}
7397 
7398 // Load Double
7399 instruct loadD(vRegD dst, memory mem)
7400 %{
7401   match(Set dst (LoadD mem));
7402   predicate(!needs_acquiring_load(n));
7403 
7404   ins_cost(4 * INSN_COST);
7405   format %{ "ldrd  $dst, $mem\t# double" %}
7406 
7407   ins_encode( aarch64_enc_ldrd(dst, mem) );
7408 
7409   ins_pipe(pipe_class_memory);
7410 %}
7411 
7412 
7413 // Load Int Constant
7414 instruct loadConI(iRegINoSp dst, immI src)
7415 %{
7416   match(Set dst src);
7417 
7418   ins_cost(INSN_COST);
7419   format %{ "mov $dst, $src\t# int" %}
7420 
7421   ins_encode( aarch64_enc_movw_imm(dst, src) );
7422 
7423   ins_pipe(ialu_imm);
7424 %}
7425 
7426 // Load Long Constant
7427 instruct loadConL(iRegLNoSp dst, immL src)
7428 %{
7429   match(Set dst src);
7430 
7431   ins_cost(INSN_COST);
7432   format %{ "mov $dst, $src\t# long" %}
7433 
7434   ins_encode( aarch64_enc_mov_imm(dst, src) );
7435 
7436   ins_pipe(ialu_imm);
7437 %}
7438 
7439 // Load Pointer Constant
7440 
7441 instruct loadConP(iRegPNoSp dst, immP con)
7442 %{
7443   match(Set dst con);
7444 
7445   ins_cost(INSN_COST * 4);
7446   format %{
7447     "mov  $dst, $con\t# ptr\n\t"
7448   %}
7449 
7450   ins_encode(aarch64_enc_mov_p(dst, con));
7451 
7452   ins_pipe(ialu_imm);
7453 %}
7454 
7455 // Load Null Pointer Constant
7456 
7457 instruct loadConP0(iRegPNoSp dst, immP0 con)
7458 %{
7459   match(Set dst con);
7460 
7461   ins_cost(INSN_COST);
7462   format %{ "mov  $dst, $con\t# NULL ptr" %}
7463 
7464   ins_encode(aarch64_enc_mov_p0(dst, con));
7465 
7466   ins_pipe(ialu_imm);
7467 %}
7468 
7469 // Load Pointer Constant One
7470 
7471 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7472 %{
7473   match(Set dst con);
7474 
7475   ins_cost(INSN_COST);
7476   format %{ "mov  $dst, $con\t# NULL ptr" %}
7477 
7478   ins_encode(aarch64_enc_mov_p1(dst, con));
7479 
7480   ins_pipe(ialu_imm);
7481 %}
7482 
7483 // Load Poll Page Constant
7484 
7485 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7486 %{
7487   match(Set dst con);
7488 
7489   ins_cost(INSN_COST);
7490   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7491 
7492   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7493 
7494   ins_pipe(ialu_imm);
7495 %}
7496 
7497 // Load Byte Map Base Constant
7498 
7499 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7500 %{
7501   match(Set dst con);
7502 
7503   ins_cost(INSN_COST);
7504   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7505 
7506   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7507 
7508   ins_pipe(ialu_imm);
7509 %}
7510 
7511 // Load Narrow Pointer Constant
7512 
7513 instruct loadConN(iRegNNoSp dst, immN con)
7514 %{
7515   match(Set dst con);
7516 
7517   ins_cost(INSN_COST * 4);
7518   format %{ "mov  $dst, $con\t# compressed ptr" %}
7519 
7520   ins_encode(aarch64_enc_mov_n(dst, con));
7521 
7522   ins_pipe(ialu_imm);
7523 %}
7524 
7525 // Load Narrow Null Pointer Constant
7526 
7527 instruct loadConN0(iRegNNoSp dst, immN0 con)
7528 %{
7529   match(Set dst con);
7530 
7531   ins_cost(INSN_COST);
7532   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7533 
7534   ins_encode(aarch64_enc_mov_n0(dst, con));
7535 
7536   ins_pipe(ialu_imm);
7537 %}
7538 
7539 // Load Narrow Klass Constant
7540 
7541 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7542 %{
7543   match(Set dst con);
7544 
7545   ins_cost(INSN_COST);
7546   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7547 
7548   ins_encode(aarch64_enc_mov_nk(dst, con));
7549 
7550   ins_pipe(ialu_imm);
7551 %}
7552 
7553 // Load Packed Float Constant
7554 
7555 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7556   match(Set dst con);
7557   ins_cost(INSN_COST * 4);
7558   format %{ "fmovs  $dst, $con"%}
7559   ins_encode %{
7560     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7561   %}
7562 
7563   ins_pipe(pipe_class_default);
7564 %}
7565 
7566 // Load Float Constant
7567 
7568 instruct loadConF(vRegF dst, immF con) %{
7569   match(Set dst con);
7570 
7571   ins_cost(INSN_COST * 4);
7572 
7573   format %{
7574     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7575   %}
7576 
7577   ins_encode %{
7578     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7579   %}
7580 
7581   ins_pipe(pipe_class_default);
7582 %}
7583 
7584 // Load Packed Double Constant
7585 
7586 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7587   match(Set dst con);
7588   ins_cost(INSN_COST);
7589   format %{ "fmovd  $dst, $con"%}
7590   ins_encode %{
7591     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7592   %}
7593 
7594   ins_pipe(pipe_class_default);
7595 %}
7596 
7597 // Load Double Constant
7598 
7599 instruct loadConD(vRegD dst, immD con) %{
7600   match(Set dst con);
7601 
7602   ins_cost(INSN_COST * 5);
7603   format %{
7604     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7605   %}
7606 
7607   ins_encode %{
7608     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7609   %}
7610 
7611   ins_pipe(pipe_class_default);
7612 %}
7613 
7614 // Store Instructions
7615 
7616 // Store CMS card-mark Immediate
7617 instruct storeimmCM0(immI0 zero, memory mem)
7618 %{
7619   match(Set mem (StoreCM mem zero));
7620   predicate(unnecessary_storestore(n));
7621 
7622   ins_cost(INSN_COST);
7623   format %{ "strb zr, $mem\t# byte" %}
7624 
7625   ins_encode(aarch64_enc_strb0(mem));
7626 
7627   ins_pipe(istore_mem);
7628 %}
7629 
7630 // Store CMS card-mark Immediate with intervening StoreStore
7631 // needed when using CMS with no conditional card marking
7632 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7633 %{
7634   match(Set mem (StoreCM mem zero));
7635 
7636   ins_cost(INSN_COST * 2);
7637   format %{ "dmb ishst"
7638       "\n\tstrb zr, $mem\t# byte" %}
7639 
7640   ins_encode(aarch64_enc_strb0_ordered(mem));
7641 
7642   ins_pipe(istore_mem);
7643 %}
7644 
7645 // Store Byte
7646 instruct storeB(iRegIorL2I src, memory mem)
7647 %{
7648   match(Set mem (StoreB mem src));
7649   predicate(!needs_releasing_store(n));
7650 
7651   ins_cost(INSN_COST);
7652   format %{ "strb  $src, $mem\t# byte" %}
7653 
7654   ins_encode(aarch64_enc_strb(src, mem));
7655 
7656   ins_pipe(istore_reg_mem);
7657 %}
7658 
7659 
7660 instruct storeimmB0(immI0 zero, memory mem)
7661 %{
7662   match(Set mem (StoreB mem zero));
7663   predicate(!needs_releasing_store(n));
7664 
7665   ins_cost(INSN_COST);
7666   format %{ "strb rscractch2, $mem\t# byte" %}
7667 
7668   ins_encode(aarch64_enc_strb0(mem));
7669 
7670   ins_pipe(istore_mem);
7671 %}
7672 
7673 // Store Char/Short
7674 instruct storeC(iRegIorL2I src, memory mem)
7675 %{
7676   match(Set mem (StoreC mem src));
7677   predicate(!needs_releasing_store(n));
7678 
7679   ins_cost(INSN_COST);
7680   format %{ "strh  $src, $mem\t# short" %}
7681 
7682   ins_encode(aarch64_enc_strh(src, mem));
7683 
7684   ins_pipe(istore_reg_mem);
7685 %}
7686 
7687 instruct storeimmC0(immI0 zero, memory mem)
7688 %{
7689   match(Set mem (StoreC mem zero));
7690   predicate(!needs_releasing_store(n));
7691 
7692   ins_cost(INSN_COST);
7693   format %{ "strh  zr, $mem\t# short" %}
7694 
7695   ins_encode(aarch64_enc_strh0(mem));
7696 
7697   ins_pipe(istore_mem);
7698 %}
7699 
7700 // Store Integer
7701 
7702 instruct storeI(iRegIorL2I src, memory mem)
7703 %{
7704   match(Set mem(StoreI mem src));
7705   predicate(!needs_releasing_store(n));
7706 
7707   ins_cost(INSN_COST);
7708   format %{ "strw  $src, $mem\t# int" %}
7709 
7710   ins_encode(aarch64_enc_strw(src, mem));
7711 
7712   ins_pipe(istore_reg_mem);
7713 %}
7714 
7715 instruct storeimmI0(immI0 zero, memory mem)
7716 %{
7717   match(Set mem(StoreI mem zero));
7718   predicate(!needs_releasing_store(n));
7719 
7720   ins_cost(INSN_COST);
7721   format %{ "strw  zr, $mem\t# int" %}
7722 
7723   ins_encode(aarch64_enc_strw0(mem));
7724 
7725   ins_pipe(istore_mem);
7726 %}
7727 
7728 // Store Long (64 bit signed)
7729 instruct storeL(iRegL src, memory mem)
7730 %{
7731   match(Set mem (StoreL mem src));
7732   predicate(!needs_releasing_store(n));
7733 
7734   ins_cost(INSN_COST);
7735   format %{ "str  $src, $mem\t# int" %}
7736 
7737   ins_encode(aarch64_enc_str(src, mem));
7738 
7739   ins_pipe(istore_reg_mem);
7740 %}
7741 
7742 // Store Long (64 bit signed)
7743 instruct storeimmL0(immL0 zero, memory mem)
7744 %{
7745   match(Set mem (StoreL mem zero));
7746   predicate(!needs_releasing_store(n));
7747 
7748   ins_cost(INSN_COST);
7749   format %{ "str  zr, $mem\t# int" %}
7750 
7751   ins_encode(aarch64_enc_str0(mem));
7752 
7753   ins_pipe(istore_mem);
7754 %}
7755 
7756 // Store Pointer
7757 instruct storeP(iRegP src, memory mem)
7758 %{
7759   match(Set mem (StoreP mem src));
7760   predicate(!needs_releasing_store(n));
7761 
7762   ins_cost(INSN_COST);
7763   format %{ "str  $src, $mem\t# ptr" %}
7764 
7765   ins_encode(aarch64_enc_str(src, mem));
7766 
7767   ins_pipe(istore_reg_mem);
7768 %}
7769 
7770 // Store Pointer
7771 instruct storeimmP0(immP0 zero, memory mem)
7772 %{
7773   match(Set mem (StoreP mem zero));
7774   predicate(!needs_releasing_store(n));
7775 
7776   ins_cost(INSN_COST);
7777   format %{ "str zr, $mem\t# ptr" %}
7778 
7779   ins_encode(aarch64_enc_str0(mem));
7780 
7781   ins_pipe(istore_mem);
7782 %}
7783 
7784 // Store Compressed Pointer
7785 instruct storeN(iRegN src, memory mem)
7786 %{
7787   match(Set mem (StoreN mem src));
7788   predicate(!needs_releasing_store(n));
7789 
7790   ins_cost(INSN_COST);
7791   format %{ "strw  $src, $mem\t# compressed ptr" %}
7792 
7793   ins_encode(aarch64_enc_strw(src, mem));
7794 
7795   ins_pipe(istore_reg_mem);
7796 %}
7797 
7798 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7799 %{
7800   match(Set mem (StoreN mem zero));
7801   predicate(Universe::narrow_oop_base() == NULL &&
7802             Universe::narrow_klass_base() == NULL &&
7803             (!needs_releasing_store(n)));
7804 
7805   ins_cost(INSN_COST);
7806   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7807 
7808   ins_encode(aarch64_enc_strw(heapbase, mem));
7809 
7810   ins_pipe(istore_reg_mem);
7811 %}
7812 
7813 // Store Float
7814 instruct storeF(vRegF src, memory mem)
7815 %{
7816   match(Set mem (StoreF mem src));
7817   predicate(!needs_releasing_store(n));
7818 
7819   ins_cost(INSN_COST);
7820   format %{ "strs  $src, $mem\t# float" %}
7821 
7822   ins_encode( aarch64_enc_strs(src, mem) );
7823 
7824   ins_pipe(pipe_class_memory);
7825 %}
7826 
7827 // TODO
7828 // implement storeImmF0 and storeFImmPacked
7829 
7830 // Store Double
7831 instruct storeD(vRegD src, memory mem)
7832 %{
7833   match(Set mem (StoreD mem src));
7834   predicate(!needs_releasing_store(n));
7835 
7836   ins_cost(INSN_COST);
7837   format %{ "strd  $src, $mem\t# double" %}
7838 
7839   ins_encode( aarch64_enc_strd(src, mem) );
7840 
7841   ins_pipe(pipe_class_memory);
7842 %}
7843 
7844 // Store Compressed Klass Pointer
7845 instruct storeNKlass(iRegN src, memory mem)
7846 %{
7847   predicate(!needs_releasing_store(n));
7848   match(Set mem (StoreNKlass mem src));
7849 
7850   ins_cost(INSN_COST);
7851   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7852 
7853   ins_encode(aarch64_enc_strw(src, mem));
7854 
7855   ins_pipe(istore_reg_mem);
7856 %}
7857 
7858 // TODO
7859 // implement storeImmD0 and storeDImmPacked
7860 
7861 // prefetch instructions
7862 // Must be safe to execute with invalid address (cannot fault).
7863 
7864 instruct prefetchalloc( memory mem ) %{
7865   match(PrefetchAllocation mem);
7866 
7867   ins_cost(INSN_COST);
7868   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7869 
7870   ins_encode( aarch64_enc_prefetchw(mem) );
7871 
7872   ins_pipe(iload_prefetch);
7873 %}
7874 
7875 //  ---------------- volatile loads and stores ----------------
7876 
7877 // Load Byte (8 bit signed)
7878 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7879 %{
7880   match(Set dst (LoadB mem));
7881 
7882   ins_cost(VOLATILE_REF_COST);
7883   format %{ "ldarsb  $dst, $mem\t# byte" %}
7884 
7885   ins_encode(aarch64_enc_ldarsb(dst, mem));
7886 
7887   ins_pipe(pipe_serial);
7888 %}
7889 
7890 // Load Byte (8 bit signed) into long
7891 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7892 %{
7893   match(Set dst (ConvI2L (LoadB mem)));
7894 
7895   ins_cost(VOLATILE_REF_COST);
7896   format %{ "ldarsb  $dst, $mem\t# byte" %}
7897 
7898   ins_encode(aarch64_enc_ldarsb(dst, mem));
7899 
7900   ins_pipe(pipe_serial);
7901 %}
7902 
7903 // Load Byte (8 bit unsigned)
7904 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7905 %{
7906   match(Set dst (LoadUB mem));
7907 
7908   ins_cost(VOLATILE_REF_COST);
7909   format %{ "ldarb  $dst, $mem\t# byte" %}
7910 
7911   ins_encode(aarch64_enc_ldarb(dst, mem));
7912 
7913   ins_pipe(pipe_serial);
7914 %}
7915 
7916 // Load Byte (8 bit unsigned) into long
7917 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7918 %{
7919   match(Set dst (ConvI2L (LoadUB mem)));
7920 
7921   ins_cost(VOLATILE_REF_COST);
7922   format %{ "ldarb  $dst, $mem\t# byte" %}
7923 
7924   ins_encode(aarch64_enc_ldarb(dst, mem));
7925 
7926   ins_pipe(pipe_serial);
7927 %}
7928 
7929 // Load Short (16 bit signed)
7930 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7931 %{
7932   match(Set dst (LoadS mem));
7933 
7934   ins_cost(VOLATILE_REF_COST);
7935   format %{ "ldarshw  $dst, $mem\t# short" %}
7936 
7937   ins_encode(aarch64_enc_ldarshw(dst, mem));
7938 
7939   ins_pipe(pipe_serial);
7940 %}
7941 
7942 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7943 %{
7944   match(Set dst (LoadUS mem));
7945 
7946   ins_cost(VOLATILE_REF_COST);
7947   format %{ "ldarhw  $dst, $mem\t# short" %}
7948 
7949   ins_encode(aarch64_enc_ldarhw(dst, mem));
7950 
7951   ins_pipe(pipe_serial);
7952 %}
7953 
7954 // Load Short/Char (16 bit unsigned) into long
7955 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7956 %{
7957   match(Set dst (ConvI2L (LoadUS mem)));
7958 
7959   ins_cost(VOLATILE_REF_COST);
7960   format %{ "ldarh  $dst, $mem\t# short" %}
7961 
7962   ins_encode(aarch64_enc_ldarh(dst, mem));
7963 
7964   ins_pipe(pipe_serial);
7965 %}
7966 
7967 // Load Short/Char (16 bit signed) into long
7968 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7969 %{
7970   match(Set dst (ConvI2L (LoadS mem)));
7971 
7972   ins_cost(VOLATILE_REF_COST);
7973   format %{ "ldarh  $dst, $mem\t# short" %}
7974 
7975   ins_encode(aarch64_enc_ldarsh(dst, mem));
7976 
7977   ins_pipe(pipe_serial);
7978 %}
7979 
7980 // Load Integer (32 bit signed)
7981 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7982 %{
7983   match(Set dst (LoadI mem));
7984 
7985   ins_cost(VOLATILE_REF_COST);
7986   format %{ "ldarw  $dst, $mem\t# int" %}
7987 
7988   ins_encode(aarch64_enc_ldarw(dst, mem));
7989 
7990   ins_pipe(pipe_serial);
7991 %}
7992 
7993 // Load Integer (32 bit unsigned) into long
7994 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7995 %{
7996   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7997 
7998   ins_cost(VOLATILE_REF_COST);
7999   format %{ "ldarw  $dst, $mem\t# int" %}
8000 
8001   ins_encode(aarch64_enc_ldarw(dst, mem));
8002 
8003   ins_pipe(pipe_serial);
8004 %}
8005 
8006 // Load Long (64 bit signed)
8007 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8008 %{
8009   match(Set dst (LoadL mem));
8010 
8011   ins_cost(VOLATILE_REF_COST);
8012   format %{ "ldar  $dst, $mem\t# int" %}
8013 
8014   ins_encode(aarch64_enc_ldar(dst, mem));
8015 
8016   ins_pipe(pipe_serial);
8017 %}
8018 
8019 // Load Pointer
8020 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8021 %{
8022   match(Set dst (LoadP mem));
8023 
8024   ins_cost(VOLATILE_REF_COST);
8025   format %{ "ldar  $dst, $mem\t# ptr" %}
8026 
8027   ins_encode(aarch64_enc_ldar(dst, mem));
8028 
8029   ins_pipe(pipe_serial);
8030 %}
8031 
8032 // Load Compressed Pointer
8033 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8034 %{
8035   match(Set dst (LoadN mem));
8036 
8037   ins_cost(VOLATILE_REF_COST);
8038   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8039 
8040   ins_encode(aarch64_enc_ldarw(dst, mem));
8041 
8042   ins_pipe(pipe_serial);
8043 %}
8044 
8045 // Load Float
8046 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8047 %{
8048   match(Set dst (LoadF mem));
8049 
8050   ins_cost(VOLATILE_REF_COST);
8051   format %{ "ldars  $dst, $mem\t# float" %}
8052 
8053   ins_encode( aarch64_enc_fldars(dst, mem) );
8054 
8055   ins_pipe(pipe_serial);
8056 %}
8057 
8058 // Load Double
8059 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8060 %{
8061   match(Set dst (LoadD mem));
8062 
8063   ins_cost(VOLATILE_REF_COST);
8064   format %{ "ldard  $dst, $mem\t# double" %}
8065 
8066   ins_encode( aarch64_enc_fldard(dst, mem) );
8067 
8068   ins_pipe(pipe_serial);
8069 %}
8070 
8071 // Store Byte
8072 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8073 %{
8074   match(Set mem (StoreB mem src));
8075 
8076   ins_cost(VOLATILE_REF_COST);
8077   format %{ "stlrb  $src, $mem\t# byte" %}
8078 
8079   ins_encode(aarch64_enc_stlrb(src, mem));
8080 
8081   ins_pipe(pipe_class_memory);
8082 %}
8083 
8084 // Store Char/Short
8085 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8086 %{
8087   match(Set mem (StoreC mem src));
8088 
8089   ins_cost(VOLATILE_REF_COST);
8090   format %{ "stlrh  $src, $mem\t# short" %}
8091 
8092   ins_encode(aarch64_enc_stlrh(src, mem));
8093 
8094   ins_pipe(pipe_class_memory);
8095 %}
8096 
8097 // Store Integer
8098 
8099 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8100 %{
8101   match(Set mem(StoreI mem src));
8102 
8103   ins_cost(VOLATILE_REF_COST);
8104   format %{ "stlrw  $src, $mem\t# int" %}
8105 
8106   ins_encode(aarch64_enc_stlrw(src, mem));
8107 
8108   ins_pipe(pipe_class_memory);
8109 %}
8110 
8111 // Store Long (64 bit signed)
8112 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8113 %{
8114   match(Set mem (StoreL mem src));
8115 
8116   ins_cost(VOLATILE_REF_COST);
8117   format %{ "stlr  $src, $mem\t# int" %}
8118 
8119   ins_encode(aarch64_enc_stlr(src, mem));
8120 
8121   ins_pipe(pipe_class_memory);
8122 %}
8123 
8124 // Store Pointer
8125 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8126 %{
8127   match(Set mem (StoreP mem src));
8128 
8129   ins_cost(VOLATILE_REF_COST);
8130   format %{ "stlr  $src, $mem\t# ptr" %}
8131 
8132   ins_encode(aarch64_enc_stlr(src, mem));
8133 
8134   ins_pipe(pipe_class_memory);
8135 %}
8136 
8137 // Store Compressed Pointer
8138 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8139 %{
8140   match(Set mem (StoreN mem src));
8141 
8142   ins_cost(VOLATILE_REF_COST);
8143   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8144 
8145   ins_encode(aarch64_enc_stlrw(src, mem));
8146 
8147   ins_pipe(pipe_class_memory);
8148 %}
8149 
8150 // Store Float
8151 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8152 %{
8153   match(Set mem (StoreF mem src));
8154 
8155   ins_cost(VOLATILE_REF_COST);
8156   format %{ "stlrs  $src, $mem\t# float" %}
8157 
8158   ins_encode( aarch64_enc_fstlrs(src, mem) );
8159 
8160   ins_pipe(pipe_class_memory);
8161 %}
8162 
8163 // TODO
8164 // implement storeImmF0 and storeFImmPacked
8165 
8166 // Store Double
8167 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8168 %{
8169   match(Set mem (StoreD mem src));
8170 
8171   ins_cost(VOLATILE_REF_COST);
8172   format %{ "stlrd  $src, $mem\t# double" %}
8173 
8174   ins_encode( aarch64_enc_fstlrd(src, mem) );
8175 
8176   ins_pipe(pipe_class_memory);
8177 %}
8178 
8179 //  ---------------- end of volatile loads and stores ----------------
8180 
8181 // ============================================================================
8182 // BSWAP Instructions
8183 
8184 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8185   match(Set dst (ReverseBytesI src));
8186 
8187   ins_cost(INSN_COST);
8188   format %{ "revw  $dst, $src" %}
8189 
8190   ins_encode %{
8191     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8192   %}
8193 
8194   ins_pipe(ialu_reg);
8195 %}
8196 
8197 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8198   match(Set dst (ReverseBytesL src));
8199 
8200   ins_cost(INSN_COST);
8201   format %{ "rev  $dst, $src" %}
8202 
8203   ins_encode %{
8204     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8205   %}
8206 
8207   ins_pipe(ialu_reg);
8208 %}
8209 
8210 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8211   match(Set dst (ReverseBytesUS src));
8212 
8213   ins_cost(INSN_COST);
8214   format %{ "rev16w  $dst, $src" %}
8215 
8216   ins_encode %{
8217     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8218   %}
8219 
8220   ins_pipe(ialu_reg);
8221 %}
8222 
8223 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8224   match(Set dst (ReverseBytesS src));
8225 
8226   ins_cost(INSN_COST);
8227   format %{ "rev16w  $dst, $src\n\t"
8228             "sbfmw $dst, $dst, #0, #15" %}
8229 
8230   ins_encode %{
8231     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8232     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8233   %}
8234 
8235   ins_pipe(ialu_reg);
8236 %}
8237 
8238 // ============================================================================
8239 // Zero Count Instructions
8240 
8241 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8242   match(Set dst (CountLeadingZerosI src));
8243 
8244   ins_cost(INSN_COST);
8245   format %{ "clzw  $dst, $src" %}
8246   ins_encode %{
8247     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8248   %}
8249 
8250   ins_pipe(ialu_reg);
8251 %}
8252 
8253 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8254   match(Set dst (CountLeadingZerosL src));
8255 
8256   ins_cost(INSN_COST);
8257   format %{ "clz   $dst, $src" %}
8258   ins_encode %{
8259     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8260   %}
8261 
8262   ins_pipe(ialu_reg);
8263 %}
8264 
8265 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8266   match(Set dst (CountTrailingZerosI src));
8267 
8268   ins_cost(INSN_COST * 2);
8269   format %{ "rbitw  $dst, $src\n\t"
8270             "clzw   $dst, $dst" %}
8271   ins_encode %{
8272     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8273     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8274   %}
8275 
8276   ins_pipe(ialu_reg);
8277 %}
8278 
8279 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8280   match(Set dst (CountTrailingZerosL src));
8281 
8282   ins_cost(INSN_COST * 2);
8283   format %{ "rbit   $dst, $src\n\t"
8284             "clz    $dst, $dst" %}
8285   ins_encode %{
8286     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8287     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8288   %}
8289 
8290   ins_pipe(ialu_reg);
8291 %}
8292 
8293 //---------- Population Count Instructions -------------------------------------
8294 //
8295 
8296 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8297   predicate(UsePopCountInstruction);
8298   match(Set dst (PopCountI src));
8299   effect(TEMP tmp);
8300   ins_cost(INSN_COST * 13);
8301 
8302   format %{ "movw   $src, $src\n\t"
8303             "mov    $tmp, $src\t# vector (1D)\n\t"
8304             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8305             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8306             "mov    $dst, $tmp\t# vector (1D)" %}
8307   ins_encode %{
8308     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8309     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8310     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8311     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8312     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8313   %}
8314 
8315   ins_pipe(pipe_class_default);
8316 %}
8317 
8318 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8319   predicate(UsePopCountInstruction);
8320   match(Set dst (PopCountI (LoadI mem)));
8321   effect(TEMP tmp);
8322   ins_cost(INSN_COST * 13);
8323 
8324   format %{ "ldrs   $tmp, $mem\n\t"
8325             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8326             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8327             "mov    $dst, $tmp\t# vector (1D)" %}
8328   ins_encode %{
8329     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8330     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8331                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8332     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8333     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8334     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8335   %}
8336 
8337   ins_pipe(pipe_class_default);
8338 %}
8339 
8340 // Note: Long.bitCount(long) returns an int.
8341 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8342   predicate(UsePopCountInstruction);
8343   match(Set dst (PopCountL src));
8344   effect(TEMP tmp);
8345   ins_cost(INSN_COST * 13);
8346 
8347   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8348             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8349             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8350             "mov    $dst, $tmp\t# vector (1D)" %}
8351   ins_encode %{
8352     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8353     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8354     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8355     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8356   %}
8357 
8358   ins_pipe(pipe_class_default);
8359 %}
8360 
8361 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8362   predicate(UsePopCountInstruction);
8363   match(Set dst (PopCountL (LoadL mem)));
8364   effect(TEMP tmp);
8365   ins_cost(INSN_COST * 13);
8366 
8367   format %{ "ldrd   $tmp, $mem\n\t"
8368             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8369             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8370             "mov    $dst, $tmp\t# vector (1D)" %}
8371   ins_encode %{
8372     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8373     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8374                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8375     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8376     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8377     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8378   %}
8379 
8380   ins_pipe(pipe_class_default);
8381 %}
8382 
8383 // ============================================================================
8384 // MemBar Instruction
8385 
8386 instruct load_fence() %{
8387   match(LoadFence);
8388   ins_cost(VOLATILE_REF_COST);
8389 
8390   format %{ "load_fence" %}
8391 
8392   ins_encode %{
8393     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8394   %}
8395   ins_pipe(pipe_serial);
8396 %}
8397 
8398 instruct unnecessary_membar_acquire() %{
8399   predicate(unnecessary_acquire(n));
8400   match(MemBarAcquire);
8401   ins_cost(0);
8402 
8403   format %{ "membar_acquire (elided)" %}
8404 
8405   ins_encode %{
8406     __ block_comment("membar_acquire (elided)");
8407   %}
8408 
8409   ins_pipe(pipe_class_empty);
8410 %}
8411 
8412 instruct membar_acquire() %{
8413   match(MemBarAcquire);
8414   ins_cost(VOLATILE_REF_COST);
8415 
8416   format %{ "membar_acquire" %}
8417 
8418   ins_encode %{
8419     __ block_comment("membar_acquire");
8420     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8421   %}
8422 
8423   ins_pipe(pipe_serial);
8424 %}
8425 
8426 
8427 instruct membar_acquire_lock() %{
8428   match(MemBarAcquireLock);
8429   ins_cost(VOLATILE_REF_COST);
8430 
8431   format %{ "membar_acquire_lock (elided)" %}
8432 
8433   ins_encode %{
8434     __ block_comment("membar_acquire_lock (elided)");
8435   %}
8436 
8437   ins_pipe(pipe_serial);
8438 %}
8439 
8440 instruct store_fence() %{
8441   match(StoreFence);
8442   ins_cost(VOLATILE_REF_COST);
8443 
8444   format %{ "store_fence" %}
8445 
8446   ins_encode %{
8447     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8448   %}
8449   ins_pipe(pipe_serial);
8450 %}
8451 
8452 instruct unnecessary_membar_release() %{
8453   predicate(unnecessary_release(n));
8454   match(MemBarRelease);
8455   ins_cost(0);
8456 
8457   format %{ "membar_release (elided)" %}
8458 
8459   ins_encode %{
8460     __ block_comment("membar_release (elided)");
8461   %}
8462   ins_pipe(pipe_serial);
8463 %}
8464 
8465 instruct membar_release() %{
8466   match(MemBarRelease);
8467   ins_cost(VOLATILE_REF_COST);
8468 
8469   format %{ "membar_release" %}
8470 
8471   ins_encode %{
8472     __ block_comment("membar_release");
8473     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8474   %}
8475   ins_pipe(pipe_serial);
8476 %}
8477 
8478 instruct membar_storestore() %{
8479   match(MemBarStoreStore);
8480   ins_cost(VOLATILE_REF_COST);
8481 
8482   format %{ "MEMBAR-store-store" %}
8483 
8484   ins_encode %{
8485     __ membar(Assembler::StoreStore);
8486   %}
8487   ins_pipe(pipe_serial);
8488 %}
8489 
8490 instruct membar_release_lock() %{
8491   match(MemBarReleaseLock);
8492   ins_cost(VOLATILE_REF_COST);
8493 
8494   format %{ "membar_release_lock (elided)" %}
8495 
8496   ins_encode %{
8497     __ block_comment("membar_release_lock (elided)");
8498   %}
8499 
8500   ins_pipe(pipe_serial);
8501 %}
8502 
8503 instruct unnecessary_membar_volatile() %{
8504   predicate(unnecessary_volatile(n));
8505   match(MemBarVolatile);
8506   ins_cost(0);
8507 
8508   format %{ "membar_volatile (elided)" %}
8509 
8510   ins_encode %{
8511     __ block_comment("membar_volatile (elided)");
8512   %}
8513 
8514   ins_pipe(pipe_serial);
8515 %}
8516 
8517 instruct membar_volatile() %{
8518   match(MemBarVolatile);
8519   ins_cost(VOLATILE_REF_COST*100);
8520 
8521   format %{ "membar_volatile" %}
8522 
8523   ins_encode %{
8524     __ block_comment("membar_volatile");
8525     __ membar(Assembler::StoreLoad);
8526   %}
8527 
8528   ins_pipe(pipe_serial);
8529 %}
8530 
8531 // ============================================================================
8532 // Cast/Convert Instructions
8533 
8534 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8535   match(Set dst (CastX2P src));
8536 
8537   ins_cost(INSN_COST);
8538   format %{ "mov $dst, $src\t# long -> ptr" %}
8539 
8540   ins_encode %{
8541     if ($dst$$reg != $src$$reg) {
8542       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8543     }
8544   %}
8545 
8546   ins_pipe(ialu_reg);
8547 %}
8548 
8549 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8550   match(Set dst (CastP2X src));
8551 
8552   ins_cost(INSN_COST);
8553   format %{ "mov $dst, $src\t# ptr -> long" %}
8554 
8555   ins_encode %{
8556     if ($dst$$reg != $src$$reg) {
8557       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8558     }
8559   %}
8560 
8561   ins_pipe(ialu_reg);
8562 %}
8563 
8564 // Convert oop into int for vectors alignment masking
8565 instruct convP2I(iRegINoSp dst, iRegP src) %{
8566   match(Set dst (ConvL2I (CastP2X src)));
8567 
8568   ins_cost(INSN_COST);
8569   format %{ "movw $dst, $src\t# ptr -> int" %}
8570   ins_encode %{
8571     __ movw($dst$$Register, $src$$Register);
8572   %}
8573 
8574   ins_pipe(ialu_reg);
8575 %}
8576 
8577 // Convert compressed oop into int for vectors alignment masking
8578 // in case of 32bit oops (heap < 4Gb).
8579 instruct convN2I(iRegINoSp dst, iRegN src)
8580 %{
8581   predicate(Universe::narrow_oop_shift() == 0);
8582   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8583 
8584   ins_cost(INSN_COST);
8585   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8586   ins_encode %{
8587     __ movw($dst$$Register, $src$$Register);
8588   %}
8589 
8590   ins_pipe(ialu_reg);
8591 %}
8592 
8593 
8594 // Convert oop pointer into compressed form
8595 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8596   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8597   match(Set dst (EncodeP src));
8598   effect(KILL cr);
8599   ins_cost(INSN_COST * 3);
8600   format %{ "encode_heap_oop $dst, $src" %}
8601   ins_encode %{
8602     Register s = $src$$Register;
8603     Register d = $dst$$Register;
8604     __ encode_heap_oop(d, s);
8605   %}
8606   ins_pipe(ialu_reg);
8607 %}
8608 
8609 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8610   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8611   match(Set dst (EncodeP src));
8612   ins_cost(INSN_COST * 3);
8613   format %{ "encode_heap_oop_not_null $dst, $src" %}
8614   ins_encode %{
8615     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8616   %}
8617   ins_pipe(ialu_reg);
8618 %}
8619 
8620 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8621   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8622             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8623   match(Set dst (DecodeN src));
8624   ins_cost(INSN_COST * 3);
8625   format %{ "decode_heap_oop $dst, $src" %}
8626   ins_encode %{
8627     Register s = $src$$Register;
8628     Register d = $dst$$Register;
8629     __ decode_heap_oop(d, s);
8630   %}
8631   ins_pipe(ialu_reg);
8632 %}
8633 
8634 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8635   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8636             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8637   match(Set dst (DecodeN src));
8638   ins_cost(INSN_COST * 3);
8639   format %{ "decode_heap_oop_not_null $dst, $src" %}
8640   ins_encode %{
8641     Register s = $src$$Register;
8642     Register d = $dst$$Register;
8643     __ decode_heap_oop_not_null(d, s);
8644   %}
8645   ins_pipe(ialu_reg);
8646 %}
8647 
8648 // n.b. AArch64 implementations of encode_klass_not_null and
8649 // decode_klass_not_null do not modify the flags register so, unlike
8650 // Intel, we don't kill CR as a side effect here
8651 
8652 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8653   match(Set dst (EncodePKlass src));
8654 
8655   ins_cost(INSN_COST * 3);
8656   format %{ "encode_klass_not_null $dst,$src" %}
8657 
8658   ins_encode %{
8659     Register src_reg = as_Register($src$$reg);
8660     Register dst_reg = as_Register($dst$$reg);
8661     __ encode_klass_not_null(dst_reg, src_reg);
8662   %}
8663 
8664    ins_pipe(ialu_reg);
8665 %}
8666 
8667 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8668   match(Set dst (DecodeNKlass src));
8669 
8670   ins_cost(INSN_COST * 3);
8671   format %{ "decode_klass_not_null $dst,$src" %}
8672 
8673   ins_encode %{
8674     Register src_reg = as_Register($src$$reg);
8675     Register dst_reg = as_Register($dst$$reg);
8676     if (dst_reg != src_reg) {
8677       __ decode_klass_not_null(dst_reg, src_reg);
8678     } else {
8679       __ decode_klass_not_null(dst_reg);
8680     }
8681   %}
8682 
8683    ins_pipe(ialu_reg);
8684 %}
8685 
8686 instruct checkCastPP(iRegPNoSp dst)
8687 %{
8688   match(Set dst (CheckCastPP dst));
8689 
8690   size(0);
8691   format %{ "# checkcastPP of $dst" %}
8692   ins_encode(/* empty encoding */);
8693   ins_pipe(pipe_class_empty);
8694 %}
8695 
8696 instruct castPP(iRegPNoSp dst)
8697 %{
8698   match(Set dst (CastPP dst));
8699 
8700   size(0);
8701   format %{ "# castPP of $dst" %}
8702   ins_encode(/* empty encoding */);
8703   ins_pipe(pipe_class_empty);
8704 %}
8705 
8706 instruct castII(iRegI dst)
8707 %{
8708   match(Set dst (CastII dst));
8709 
8710   size(0);
8711   format %{ "# castII of $dst" %}
8712   ins_encode(/* empty encoding */);
8713   ins_cost(0);
8714   ins_pipe(pipe_class_empty);
8715 %}
8716 
8717 // ============================================================================
8718 // Atomic operation instructions
8719 //
8720 // Intel and SPARC both implement Ideal Node LoadPLocked and
8721 // Store{PIL}Conditional instructions using a normal load for the
8722 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8723 //
8724 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8725 // pair to lock object allocations from Eden space when not using
8726 // TLABs.
8727 //
8728 // There does not appear to be a Load{IL}Locked Ideal Node and the
8729 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8730 // and to use StoreIConditional only for 32-bit and StoreLConditional
8731 // only for 64-bit.
8732 //
8733 // We implement LoadPLocked and StorePLocked instructions using,
8734 // respectively the AArch64 hw load-exclusive and store-conditional
8735 // instructions. Whereas we must implement each of
8736 // Store{IL}Conditional using a CAS which employs a pair of
8737 // instructions comprising a load-exclusive followed by a
8738 // store-conditional.
8739 
8740 
8741 // Locked-load (linked load) of the current heap-top
8742 // used when updating the eden heap top
8743 // implemented using ldaxr on AArch64
8744 
8745 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8746 %{
8747   match(Set dst (LoadPLocked mem));
8748 
8749   ins_cost(VOLATILE_REF_COST);
8750 
8751   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8752 
8753   ins_encode(aarch64_enc_ldaxr(dst, mem));
8754 
8755   ins_pipe(pipe_serial);
8756 %}
8757 
8758 // Conditional-store of the updated heap-top.
8759 // Used during allocation of the shared heap.
8760 // Sets flag (EQ) on success.
8761 // implemented using stlxr on AArch64.
8762 
8763 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8764 %{
8765   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8766 
8767   ins_cost(VOLATILE_REF_COST);
8768 
8769  // TODO
8770  // do we need to do a store-conditional release or can we just use a
8771  // plain store-conditional?
8772 
8773   format %{
8774     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8775     "cmpw rscratch1, zr\t# EQ on successful write"
8776   %}
8777 
8778   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8779 
8780   ins_pipe(pipe_serial);
8781 %}
8782 
8783 
8784 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8785 // when attempting to rebias a lock towards the current thread.  We
8786 // must use the acquire form of cmpxchg in order to guarantee acquire
8787 // semantics in this case.
8788 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8789 %{
8790   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8791 
8792   ins_cost(VOLATILE_REF_COST);
8793 
8794   format %{
8795     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8796     "cmpw rscratch1, zr\t# EQ on successful write"
8797   %}
8798 
8799   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8800 
8801   ins_pipe(pipe_slow);
8802 %}
8803 
8804 // storeIConditional also has acquire semantics, for no better reason
8805 // than matching storeLConditional.  At the time of writing this
8806 // comment storeIConditional was not used anywhere by AArch64.
8807 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8808 %{
8809   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8810 
8811   ins_cost(VOLATILE_REF_COST);
8812 
8813   format %{
8814     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8815     "cmpw rscratch1, zr\t# EQ on successful write"
8816   %}
8817 
8818   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8819 
8820   ins_pipe(pipe_slow);
8821 %}
8822 
8823 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8824 // can't match them
8825 
8826 // standard CompareAndSwapX when we are using barriers
8827 // these have higher priority than the rules selected by a predicate
8828 
8829 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8830 
8831   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8832   ins_cost(2 * VOLATILE_REF_COST);
8833 
8834   effect(KILL cr);
8835 
8836  format %{
8837     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8838     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8839  %}
8840 
8841  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8842             aarch64_enc_cset_eq(res));
8843 
8844   ins_pipe(pipe_slow);
8845 %}
8846 
8847 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8848 
8849   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8850   ins_cost(2 * VOLATILE_REF_COST);
8851 
8852   effect(KILL cr);
8853 
8854  format %{
8855     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8856     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8857  %}
8858 
8859  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8860             aarch64_enc_cset_eq(res));
8861 
8862   ins_pipe(pipe_slow);
8863 %}
8864 
8865 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8866 
8867   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8868   ins_cost(2 * VOLATILE_REF_COST);
8869 
8870   effect(KILL cr);
8871 
8872  format %{
8873     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8874     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8875  %}
8876 
8877  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8878             aarch64_enc_cset_eq(res));
8879 
8880   ins_pipe(pipe_slow);
8881 %}
8882 
8883 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8884 
8885   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8886   ins_cost(2 * VOLATILE_REF_COST);
8887 
8888   effect(KILL cr);
8889 
8890  format %{
8891     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8892     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8893  %}
8894 
8895  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8896             aarch64_enc_cset_eq(res));
8897 
8898   ins_pipe(pipe_slow);
8899 %}
8900 
8901 // alternative CompareAndSwapX when we are eliding barriers
8902 
8903 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8904 
8905   predicate(needs_acquiring_load_exclusive(n));
8906   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8907   ins_cost(VOLATILE_REF_COST);
8908 
8909   effect(KILL cr);
8910 
8911  format %{
8912     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8913     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8914  %}
8915 
8916  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8917             aarch64_enc_cset_eq(res));
8918 
8919   ins_pipe(pipe_slow);
8920 %}
8921 
8922 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8923 
8924   predicate(needs_acquiring_load_exclusive(n));
8925   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8926   ins_cost(VOLATILE_REF_COST);
8927 
8928   effect(KILL cr);
8929 
8930  format %{
8931     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8932     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8933  %}
8934 
8935  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8936             aarch64_enc_cset_eq(res));
8937 
8938   ins_pipe(pipe_slow);
8939 %}
8940 
8941 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8942 
8943   predicate(needs_acquiring_load_exclusive(n));
8944   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8945   ins_cost(VOLATILE_REF_COST);
8946 
8947   effect(KILL cr);
8948 
8949  format %{
8950     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8951     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8952  %}
8953 
8954  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8955             aarch64_enc_cset_eq(res));
8956 
8957   ins_pipe(pipe_slow);
8958 %}
8959 
8960 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8961 
8962   predicate(needs_acquiring_load_exclusive(n));
8963   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8964   ins_cost(VOLATILE_REF_COST);
8965 
8966   effect(KILL cr);
8967 
8968  format %{
8969     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8970     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8971  %}
8972 
8973  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8974             aarch64_enc_cset_eq(res));
8975 
8976   ins_pipe(pipe_slow);
8977 %}
8978 
8979 
8980 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
8981   match(Set prev (GetAndSetI mem newv));
8982   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8983   ins_encode %{
8984     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8985   %}
8986   ins_pipe(pipe_serial);
8987 %}
8988 
8989 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8990   match(Set prev (GetAndSetL mem newv));
8991   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8992   ins_encode %{
8993     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8994   %}
8995   ins_pipe(pipe_serial);
8996 %}
8997 
8998 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8999   match(Set prev (GetAndSetN mem newv));
9000   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9001   ins_encode %{
9002     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9003   %}
9004   ins_pipe(pipe_serial);
9005 %}
9006 
9007 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9008   match(Set prev (GetAndSetP mem newv));
9009   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9010   ins_encode %{
9011     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9012   %}
9013   ins_pipe(pipe_serial);
9014 %}
9015 
9016 
9017 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9018   match(Set newval (GetAndAddL mem incr));
9019   ins_cost(INSN_COST * 10);
9020   format %{ "get_and_addL $newval, [$mem], $incr" %}
9021   ins_encode %{
9022     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9023   %}
9024   ins_pipe(pipe_serial);
9025 %}
9026 
9027 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9028   predicate(n->as_LoadStore()->result_not_used());
9029   match(Set dummy (GetAndAddL mem incr));
9030   ins_cost(INSN_COST * 9);
9031   format %{ "get_and_addL [$mem], $incr" %}
9032   ins_encode %{
9033     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9034   %}
9035   ins_pipe(pipe_serial);
9036 %}
9037 
9038 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9039   match(Set newval (GetAndAddL mem incr));
9040   ins_cost(INSN_COST * 10);
9041   format %{ "get_and_addL $newval, [$mem], $incr" %}
9042   ins_encode %{
9043     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9044   %}
9045   ins_pipe(pipe_serial);
9046 %}
9047 
9048 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9049   predicate(n->as_LoadStore()->result_not_used());
9050   match(Set dummy (GetAndAddL mem incr));
9051   ins_cost(INSN_COST * 9);
9052   format %{ "get_and_addL [$mem], $incr" %}
9053   ins_encode %{
9054     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9055   %}
9056   ins_pipe(pipe_serial);
9057 %}
9058 
9059 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9060   match(Set newval (GetAndAddI mem incr));
9061   ins_cost(INSN_COST * 10);
9062   format %{ "get_and_addI $newval, [$mem], $incr" %}
9063   ins_encode %{
9064     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9065   %}
9066   ins_pipe(pipe_serial);
9067 %}
9068 
9069 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9070   predicate(n->as_LoadStore()->result_not_used());
9071   match(Set dummy (GetAndAddI mem incr));
9072   ins_cost(INSN_COST * 9);
9073   format %{ "get_and_addI [$mem], $incr" %}
9074   ins_encode %{
9075     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9076   %}
9077   ins_pipe(pipe_serial);
9078 %}
9079 
9080 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9081   match(Set newval (GetAndAddI mem incr));
9082   ins_cost(INSN_COST * 10);
9083   format %{ "get_and_addI $newval, [$mem], $incr" %}
9084   ins_encode %{
9085     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9086   %}
9087   ins_pipe(pipe_serial);
9088 %}
9089 
9090 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9091   predicate(n->as_LoadStore()->result_not_used());
9092   match(Set dummy (GetAndAddI mem incr));
9093   ins_cost(INSN_COST * 9);
9094   format %{ "get_and_addI [$mem], $incr" %}
9095   ins_encode %{
9096     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9097   %}
9098   ins_pipe(pipe_serial);
9099 %}
9100 
9101 // Manifest a CmpL result in an integer register.
9102 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9103 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9104 %{
9105   match(Set dst (CmpL3 src1 src2));
9106   effect(KILL flags);
9107 
9108   ins_cost(INSN_COST * 6);
9109   format %{
9110       "cmp $src1, $src2"
9111       "csetw $dst, ne"
9112       "cnegw $dst, lt"
9113   %}
9114   // format %{ "CmpL3 $dst, $src1, $src2" %}
9115   ins_encode %{
9116     __ cmp($src1$$Register, $src2$$Register);
9117     __ csetw($dst$$Register, Assembler::NE);
9118     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9119   %}
9120 
9121   ins_pipe(pipe_class_default);
9122 %}
9123 
9124 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9125 %{
9126   match(Set dst (CmpL3 src1 src2));
9127   effect(KILL flags);
9128 
9129   ins_cost(INSN_COST * 6);
9130   format %{
9131       "cmp $src1, $src2"
9132       "csetw $dst, ne"
9133       "cnegw $dst, lt"
9134   %}
9135   ins_encode %{
9136     int32_t con = (int32_t)$src2$$constant;
9137      if (con < 0) {
9138       __ adds(zr, $src1$$Register, -con);
9139     } else {
9140       __ subs(zr, $src1$$Register, con);
9141     }
9142     __ csetw($dst$$Register, Assembler::NE);
9143     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9144   %}
9145 
9146   ins_pipe(pipe_class_default);
9147 %}
9148 
9149 // ============================================================================
9150 // Conditional Move Instructions
9151 
9152 // n.b. we have identical rules for both a signed compare op (cmpOp)
9153 // and an unsigned compare op (cmpOpU). it would be nice if we could
9154 // define an op class which merged both inputs and use it to type the
9155 // argument to a single rule. unfortunatelyt his fails because the
9156 // opclass does not live up to the COND_INTER interface of its
9157 // component operands. When the generic code tries to negate the
9158 // operand it ends up running the generci Machoper::negate method
9159 // which throws a ShouldNotHappen. So, we have to provide two flavours
9160 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9161 
9162 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9163   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9164 
9165   ins_cost(INSN_COST * 2);
9166   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9167 
9168   ins_encode %{
9169     __ cselw(as_Register($dst$$reg),
9170              as_Register($src2$$reg),
9171              as_Register($src1$$reg),
9172              (Assembler::Condition)$cmp$$cmpcode);
9173   %}
9174 
9175   ins_pipe(icond_reg_reg);
9176 %}
9177 
9178 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9179   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9180 
9181   ins_cost(INSN_COST * 2);
9182   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9183 
9184   ins_encode %{
9185     __ cselw(as_Register($dst$$reg),
9186              as_Register($src2$$reg),
9187              as_Register($src1$$reg),
9188              (Assembler::Condition)$cmp$$cmpcode);
9189   %}
9190 
9191   ins_pipe(icond_reg_reg);
9192 %}
9193 
9194 // special cases where one arg is zero
9195 
9196 // n.b. this is selected in preference to the rule above because it
9197 // avoids loading constant 0 into a source register
9198 
9199 // TODO
9200 // we ought only to be able to cull one of these variants as the ideal
9201 // transforms ought always to order the zero consistently (to left/right?)
9202 
9203 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9204   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9205 
9206   ins_cost(INSN_COST * 2);
9207   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9208 
9209   ins_encode %{
9210     __ cselw(as_Register($dst$$reg),
9211              as_Register($src$$reg),
9212              zr,
9213              (Assembler::Condition)$cmp$$cmpcode);
9214   %}
9215 
9216   ins_pipe(icond_reg);
9217 %}
9218 
9219 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9220   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9221 
9222   ins_cost(INSN_COST * 2);
9223   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9224 
9225   ins_encode %{
9226     __ cselw(as_Register($dst$$reg),
9227              as_Register($src$$reg),
9228              zr,
9229              (Assembler::Condition)$cmp$$cmpcode);
9230   %}
9231 
9232   ins_pipe(icond_reg);
9233 %}
9234 
9235 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9236   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9237 
9238   ins_cost(INSN_COST * 2);
9239   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9240 
9241   ins_encode %{
9242     __ cselw(as_Register($dst$$reg),
9243              zr,
9244              as_Register($src$$reg),
9245              (Assembler::Condition)$cmp$$cmpcode);
9246   %}
9247 
9248   ins_pipe(icond_reg);
9249 %}
9250 
9251 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9252   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9253 
9254   ins_cost(INSN_COST * 2);
9255   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9256 
9257   ins_encode %{
9258     __ cselw(as_Register($dst$$reg),
9259              zr,
9260              as_Register($src$$reg),
9261              (Assembler::Condition)$cmp$$cmpcode);
9262   %}
9263 
9264   ins_pipe(icond_reg);
9265 %}
9266 
9267 // special case for creating a boolean 0 or 1
9268 
9269 // n.b. this is selected in preference to the rule above because it
9270 // avoids loading constants 0 and 1 into a source register
9271 
9272 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9273   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9274 
9275   ins_cost(INSN_COST * 2);
9276   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9277 
9278   ins_encode %{
9279     // equivalently
9280     // cset(as_Register($dst$$reg),
9281     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9282     __ csincw(as_Register($dst$$reg),
9283              zr,
9284              zr,
9285              (Assembler::Condition)$cmp$$cmpcode);
9286   %}
9287 
9288   ins_pipe(icond_none);
9289 %}
9290 
9291 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9292   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9293 
9294   ins_cost(INSN_COST * 2);
9295   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9296 
9297   ins_encode %{
9298     // equivalently
9299     // cset(as_Register($dst$$reg),
9300     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9301     __ csincw(as_Register($dst$$reg),
9302              zr,
9303              zr,
9304              (Assembler::Condition)$cmp$$cmpcode);
9305   %}
9306 
9307   ins_pipe(icond_none);
9308 %}
9309 
9310 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9311   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9312 
9313   ins_cost(INSN_COST * 2);
9314   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9315 
9316   ins_encode %{
9317     __ csel(as_Register($dst$$reg),
9318             as_Register($src2$$reg),
9319             as_Register($src1$$reg),
9320             (Assembler::Condition)$cmp$$cmpcode);
9321   %}
9322 
9323   ins_pipe(icond_reg_reg);
9324 %}
9325 
9326 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9327   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9328 
9329   ins_cost(INSN_COST * 2);
9330   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9331 
9332   ins_encode %{
9333     __ csel(as_Register($dst$$reg),
9334             as_Register($src2$$reg),
9335             as_Register($src1$$reg),
9336             (Assembler::Condition)$cmp$$cmpcode);
9337   %}
9338 
9339   ins_pipe(icond_reg_reg);
9340 %}
9341 
9342 // special cases where one arg is zero
9343 
9344 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9345   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9346 
9347   ins_cost(INSN_COST * 2);
9348   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9349 
9350   ins_encode %{
9351     __ csel(as_Register($dst$$reg),
9352             zr,
9353             as_Register($src$$reg),
9354             (Assembler::Condition)$cmp$$cmpcode);
9355   %}
9356 
9357   ins_pipe(icond_reg);
9358 %}
9359 
9360 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9361   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9362 
9363   ins_cost(INSN_COST * 2);
9364   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9365 
9366   ins_encode %{
9367     __ csel(as_Register($dst$$reg),
9368             zr,
9369             as_Register($src$$reg),
9370             (Assembler::Condition)$cmp$$cmpcode);
9371   %}
9372 
9373   ins_pipe(icond_reg);
9374 %}
9375 
9376 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9377   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9378 
9379   ins_cost(INSN_COST * 2);
9380   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9381 
9382   ins_encode %{
9383     __ csel(as_Register($dst$$reg),
9384             as_Register($src$$reg),
9385             zr,
9386             (Assembler::Condition)$cmp$$cmpcode);
9387   %}
9388 
9389   ins_pipe(icond_reg);
9390 %}
9391 
9392 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9393   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9394 
9395   ins_cost(INSN_COST * 2);
9396   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9397 
9398   ins_encode %{
9399     __ csel(as_Register($dst$$reg),
9400             as_Register($src$$reg),
9401             zr,
9402             (Assembler::Condition)$cmp$$cmpcode);
9403   %}
9404 
9405   ins_pipe(icond_reg);
9406 %}
9407 
9408 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9409   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9410 
9411   ins_cost(INSN_COST * 2);
9412   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9413 
9414   ins_encode %{
9415     __ csel(as_Register($dst$$reg),
9416             as_Register($src2$$reg),
9417             as_Register($src1$$reg),
9418             (Assembler::Condition)$cmp$$cmpcode);
9419   %}
9420 
9421   ins_pipe(icond_reg_reg);
9422 %}
9423 
9424 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9425   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9426 
9427   ins_cost(INSN_COST * 2);
9428   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9429 
9430   ins_encode %{
9431     __ csel(as_Register($dst$$reg),
9432             as_Register($src2$$reg),
9433             as_Register($src1$$reg),
9434             (Assembler::Condition)$cmp$$cmpcode);
9435   %}
9436 
9437   ins_pipe(icond_reg_reg);
9438 %}
9439 
9440 // special cases where one arg is zero
9441 
9442 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9443   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9444 
9445   ins_cost(INSN_COST * 2);
9446   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9447 
9448   ins_encode %{
9449     __ csel(as_Register($dst$$reg),
9450             zr,
9451             as_Register($src$$reg),
9452             (Assembler::Condition)$cmp$$cmpcode);
9453   %}
9454 
9455   ins_pipe(icond_reg);
9456 %}
9457 
9458 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9459   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9460 
9461   ins_cost(INSN_COST * 2);
9462   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9463 
9464   ins_encode %{
9465     __ csel(as_Register($dst$$reg),
9466             zr,
9467             as_Register($src$$reg),
9468             (Assembler::Condition)$cmp$$cmpcode);
9469   %}
9470 
9471   ins_pipe(icond_reg);
9472 %}
9473 
9474 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9475   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9476 
9477   ins_cost(INSN_COST * 2);
9478   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9479 
9480   ins_encode %{
9481     __ csel(as_Register($dst$$reg),
9482             as_Register($src$$reg),
9483             zr,
9484             (Assembler::Condition)$cmp$$cmpcode);
9485   %}
9486 
9487   ins_pipe(icond_reg);
9488 %}
9489 
9490 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9491   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9492 
9493   ins_cost(INSN_COST * 2);
9494   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9495 
9496   ins_encode %{
9497     __ csel(as_Register($dst$$reg),
9498             as_Register($src$$reg),
9499             zr,
9500             (Assembler::Condition)$cmp$$cmpcode);
9501   %}
9502 
9503   ins_pipe(icond_reg);
9504 %}
9505 
9506 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9507   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9508 
9509   ins_cost(INSN_COST * 2);
9510   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9511 
9512   ins_encode %{
9513     __ cselw(as_Register($dst$$reg),
9514              as_Register($src2$$reg),
9515              as_Register($src1$$reg),
9516              (Assembler::Condition)$cmp$$cmpcode);
9517   %}
9518 
9519   ins_pipe(icond_reg_reg);
9520 %}
9521 
9522 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9523   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9524 
9525   ins_cost(INSN_COST * 2);
9526   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9527 
9528   ins_encode %{
9529     __ cselw(as_Register($dst$$reg),
9530              as_Register($src2$$reg),
9531              as_Register($src1$$reg),
9532              (Assembler::Condition)$cmp$$cmpcode);
9533   %}
9534 
9535   ins_pipe(icond_reg_reg);
9536 %}
9537 
9538 // special cases where one arg is zero
9539 
9540 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9541   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9542 
9543   ins_cost(INSN_COST * 2);
9544   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9545 
9546   ins_encode %{
9547     __ cselw(as_Register($dst$$reg),
9548              zr,
9549              as_Register($src$$reg),
9550              (Assembler::Condition)$cmp$$cmpcode);
9551   %}
9552 
9553   ins_pipe(icond_reg);
9554 %}
9555 
9556 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9557   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9558 
9559   ins_cost(INSN_COST * 2);
9560   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9561 
9562   ins_encode %{
9563     __ cselw(as_Register($dst$$reg),
9564              zr,
9565              as_Register($src$$reg),
9566              (Assembler::Condition)$cmp$$cmpcode);
9567   %}
9568 
9569   ins_pipe(icond_reg);
9570 %}
9571 
9572 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9573   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9574 
9575   ins_cost(INSN_COST * 2);
9576   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9577 
9578   ins_encode %{
9579     __ cselw(as_Register($dst$$reg),
9580              as_Register($src$$reg),
9581              zr,
9582              (Assembler::Condition)$cmp$$cmpcode);
9583   %}
9584 
9585   ins_pipe(icond_reg);
9586 %}
9587 
9588 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9589   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9590 
9591   ins_cost(INSN_COST * 2);
9592   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9593 
9594   ins_encode %{
9595     __ cselw(as_Register($dst$$reg),
9596              as_Register($src$$reg),
9597              zr,
9598              (Assembler::Condition)$cmp$$cmpcode);
9599   %}
9600 
9601   ins_pipe(icond_reg);
9602 %}
9603 
9604 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9605 %{
9606   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9607 
9608   ins_cost(INSN_COST * 3);
9609 
9610   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9611   ins_encode %{
9612     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9613     __ fcsels(as_FloatRegister($dst$$reg),
9614               as_FloatRegister($src2$$reg),
9615               as_FloatRegister($src1$$reg),
9616               cond);
9617   %}
9618 
9619   ins_pipe(pipe_class_default);
9620 %}
9621 
9622 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9623 %{
9624   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9625 
9626   ins_cost(INSN_COST * 3);
9627 
9628   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9629   ins_encode %{
9630     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9631     __ fcsels(as_FloatRegister($dst$$reg),
9632               as_FloatRegister($src2$$reg),
9633               as_FloatRegister($src1$$reg),
9634               cond);
9635   %}
9636 
9637   ins_pipe(pipe_class_default);
9638 %}
9639 
9640 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9641 %{
9642   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9643 
9644   ins_cost(INSN_COST * 3);
9645 
9646   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9647   ins_encode %{
9648     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9649     __ fcseld(as_FloatRegister($dst$$reg),
9650               as_FloatRegister($src2$$reg),
9651               as_FloatRegister($src1$$reg),
9652               cond);
9653   %}
9654 
9655   ins_pipe(pipe_class_default);
9656 %}
9657 
9658 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9659 %{
9660   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9661 
9662   ins_cost(INSN_COST * 3);
9663 
9664   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9665   ins_encode %{
9666     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9667     __ fcseld(as_FloatRegister($dst$$reg),
9668               as_FloatRegister($src2$$reg),
9669               as_FloatRegister($src1$$reg),
9670               cond);
9671   %}
9672 
9673   ins_pipe(pipe_class_default);
9674 %}
9675 
9676 // ============================================================================
9677 // Arithmetic Instructions
9678 //
9679 
9680 // Integer Addition
9681 
9682 // TODO
9683 // these currently employ operations which do not set CR and hence are
9684 // not flagged as killing CR but we would like to isolate the cases
9685 // where we want to set flags from those where we don't. need to work
9686 // out how to do that.
9687 
9688 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9689   match(Set dst (AddI src1 src2));
9690 
9691   ins_cost(INSN_COST);
9692   format %{ "addw  $dst, $src1, $src2" %}
9693 
9694   ins_encode %{
9695     __ addw(as_Register($dst$$reg),
9696             as_Register($src1$$reg),
9697             as_Register($src2$$reg));
9698   %}
9699 
9700   ins_pipe(ialu_reg_reg);
9701 %}
9702 
9703 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9704   match(Set dst (AddI src1 src2));
9705 
9706   ins_cost(INSN_COST);
9707   format %{ "addw $dst, $src1, $src2" %}
9708 
9709   // use opcode to indicate that this is an add not a sub
9710   opcode(0x0);
9711 
9712   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9713 
9714   ins_pipe(ialu_reg_imm);
9715 %}
9716 
9717 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9718   match(Set dst (AddI (ConvL2I src1) src2));
9719 
9720   ins_cost(INSN_COST);
9721   format %{ "addw $dst, $src1, $src2" %}
9722 
9723   // use opcode to indicate that this is an add not a sub
9724   opcode(0x0);
9725 
9726   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9727 
9728   ins_pipe(ialu_reg_imm);
9729 %}
9730 
9731 // Pointer Addition
9732 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9733   match(Set dst (AddP src1 src2));
9734 
9735   ins_cost(INSN_COST);
9736   format %{ "add $dst, $src1, $src2\t# ptr" %}
9737 
9738   ins_encode %{
9739     __ add(as_Register($dst$$reg),
9740            as_Register($src1$$reg),
9741            as_Register($src2$$reg));
9742   %}
9743 
9744   ins_pipe(ialu_reg_reg);
9745 %}
9746 
9747 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9748   match(Set dst (AddP src1 (ConvI2L src2)));
9749 
9750   ins_cost(1.9 * INSN_COST);
9751   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9752 
9753   ins_encode %{
9754     __ add(as_Register($dst$$reg),
9755            as_Register($src1$$reg),
9756            as_Register($src2$$reg), ext::sxtw);
9757   %}
9758 
9759   ins_pipe(ialu_reg_reg);
9760 %}
9761 
9762 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9763   match(Set dst (AddP src1 (LShiftL src2 scale)));
9764 
9765   ins_cost(1.9 * INSN_COST);
9766   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9767 
9768   ins_encode %{
9769     __ lea(as_Register($dst$$reg),
9770            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9771                    Address::lsl($scale$$constant)));
9772   %}
9773 
9774   ins_pipe(ialu_reg_reg_shift);
9775 %}
9776 
9777 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9778   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9779 
9780   ins_cost(1.9 * INSN_COST);
9781   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9782 
9783   ins_encode %{
9784     __ lea(as_Register($dst$$reg),
9785            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9786                    Address::sxtw($scale$$constant)));
9787   %}
9788 
9789   ins_pipe(ialu_reg_reg_shift);
9790 %}
9791 
9792 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9793   match(Set dst (LShiftL (ConvI2L src) scale));
9794 
9795   ins_cost(INSN_COST);
9796   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9797 
9798   ins_encode %{
9799     __ sbfiz(as_Register($dst$$reg),
9800           as_Register($src$$reg),
9801           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9802   %}
9803 
9804   ins_pipe(ialu_reg_shift);
9805 %}
9806 
9807 // Pointer Immediate Addition
9808 // n.b. this needs to be more expensive than using an indirect memory
9809 // operand
9810 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9811   match(Set dst (AddP src1 src2));
9812 
9813   ins_cost(INSN_COST);
9814   format %{ "add $dst, $src1, $src2\t# ptr" %}
9815 
9816   // use opcode to indicate that this is an add not a sub
9817   opcode(0x0);
9818 
9819   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9820 
9821   ins_pipe(ialu_reg_imm);
9822 %}
9823 
9824 // Long Addition
9825 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9826 
9827   match(Set dst (AddL src1 src2));
9828 
9829   ins_cost(INSN_COST);
9830   format %{ "add  $dst, $src1, $src2" %}
9831 
9832   ins_encode %{
9833     __ add(as_Register($dst$$reg),
9834            as_Register($src1$$reg),
9835            as_Register($src2$$reg));
9836   %}
9837 
9838   ins_pipe(ialu_reg_reg);
9839 %}
9840 
9841 // No constant pool entries requiredLong Immediate Addition.
9842 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9843   match(Set dst (AddL src1 src2));
9844 
9845   ins_cost(INSN_COST);
9846   format %{ "add $dst, $src1, $src2" %}
9847 
9848   // use opcode to indicate that this is an add not a sub
9849   opcode(0x0);
9850 
9851   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9852 
9853   ins_pipe(ialu_reg_imm);
9854 %}
9855 
9856 // Integer Subtraction
9857 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9858   match(Set dst (SubI src1 src2));
9859 
9860   ins_cost(INSN_COST);
9861   format %{ "subw  $dst, $src1, $src2" %}
9862 
9863   ins_encode %{
9864     __ subw(as_Register($dst$$reg),
9865             as_Register($src1$$reg),
9866             as_Register($src2$$reg));
9867   %}
9868 
9869   ins_pipe(ialu_reg_reg);
9870 %}
9871 
9872 // Immediate Subtraction
9873 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9874   match(Set dst (SubI src1 src2));
9875 
9876   ins_cost(INSN_COST);
9877   format %{ "subw $dst, $src1, $src2" %}
9878 
9879   // use opcode to indicate that this is a sub not an add
9880   opcode(0x1);
9881 
9882   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9883 
9884   ins_pipe(ialu_reg_imm);
9885 %}
9886 
9887 // Long Subtraction
9888 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9889 
9890   match(Set dst (SubL src1 src2));
9891 
9892   ins_cost(INSN_COST);
9893   format %{ "sub  $dst, $src1, $src2" %}
9894 
9895   ins_encode %{
9896     __ sub(as_Register($dst$$reg),
9897            as_Register($src1$$reg),
9898            as_Register($src2$$reg));
9899   %}
9900 
9901   ins_pipe(ialu_reg_reg);
9902 %}
9903 
9904 // No constant pool entries requiredLong Immediate Subtraction.
9905 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9906   match(Set dst (SubL src1 src2));
9907 
9908   ins_cost(INSN_COST);
9909   format %{ "sub$dst, $src1, $src2" %}
9910 
9911   // use opcode to indicate that this is a sub not an add
9912   opcode(0x1);
9913 
9914   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9915 
9916   ins_pipe(ialu_reg_imm);
9917 %}
9918 
9919 // Integer Negation (special case for sub)
9920 
9921 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9922   match(Set dst (SubI zero src));
9923 
9924   ins_cost(INSN_COST);
9925   format %{ "negw $dst, $src\t# int" %}
9926 
9927   ins_encode %{
9928     __ negw(as_Register($dst$$reg),
9929             as_Register($src$$reg));
9930   %}
9931 
9932   ins_pipe(ialu_reg);
9933 %}
9934 
9935 // Long Negation
9936 
9937 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9938   match(Set dst (SubL zero src));
9939 
9940   ins_cost(INSN_COST);
9941   format %{ "neg $dst, $src\t# long" %}
9942 
9943   ins_encode %{
9944     __ neg(as_Register($dst$$reg),
9945            as_Register($src$$reg));
9946   %}
9947 
9948   ins_pipe(ialu_reg);
9949 %}
9950 
9951 // Integer Multiply
9952 
9953 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9954   match(Set dst (MulI src1 src2));
9955 
9956   ins_cost(INSN_COST * 3);
9957   format %{ "mulw  $dst, $src1, $src2" %}
9958 
9959   ins_encode %{
9960     __ mulw(as_Register($dst$$reg),
9961             as_Register($src1$$reg),
9962             as_Register($src2$$reg));
9963   %}
9964 
9965   ins_pipe(imul_reg_reg);
9966 %}
9967 
9968 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9969   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9970 
9971   ins_cost(INSN_COST * 3);
9972   format %{ "smull  $dst, $src1, $src2" %}
9973 
9974   ins_encode %{
9975     __ smull(as_Register($dst$$reg),
9976              as_Register($src1$$reg),
9977              as_Register($src2$$reg));
9978   %}
9979 
9980   ins_pipe(imul_reg_reg);
9981 %}
9982 
9983 // Long Multiply
9984 
9985 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9986   match(Set dst (MulL src1 src2));
9987 
9988   ins_cost(INSN_COST * 5);
9989   format %{ "mul  $dst, $src1, $src2" %}
9990 
9991   ins_encode %{
9992     __ mul(as_Register($dst$$reg),
9993            as_Register($src1$$reg),
9994            as_Register($src2$$reg));
9995   %}
9996 
9997   ins_pipe(lmul_reg_reg);
9998 %}
9999 
10000 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10001 %{
10002   match(Set dst (MulHiL src1 src2));
10003 
10004   ins_cost(INSN_COST * 7);
10005   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10006 
10007   ins_encode %{
10008     __ smulh(as_Register($dst$$reg),
10009              as_Register($src1$$reg),
10010              as_Register($src2$$reg));
10011   %}
10012 
10013   ins_pipe(lmul_reg_reg);
10014 %}
10015 
10016 // Combined Integer Multiply & Add/Sub
10017 
10018 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10019   match(Set dst (AddI src3 (MulI src1 src2)));
10020 
10021   ins_cost(INSN_COST * 3);
10022   format %{ "madd  $dst, $src1, $src2, $src3" %}
10023 
10024   ins_encode %{
10025     __ maddw(as_Register($dst$$reg),
10026              as_Register($src1$$reg),
10027              as_Register($src2$$reg),
10028              as_Register($src3$$reg));
10029   %}
10030 
10031   ins_pipe(imac_reg_reg);
10032 %}
10033 
10034 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10035   match(Set dst (SubI src3 (MulI src1 src2)));
10036 
10037   ins_cost(INSN_COST * 3);
10038   format %{ "msub  $dst, $src1, $src2, $src3" %}
10039 
10040   ins_encode %{
10041     __ msubw(as_Register($dst$$reg),
10042              as_Register($src1$$reg),
10043              as_Register($src2$$reg),
10044              as_Register($src3$$reg));
10045   %}
10046 
10047   ins_pipe(imac_reg_reg);
10048 %}
10049 
10050 // Combined Long Multiply & Add/Sub
10051 
10052 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10053   match(Set dst (AddL src3 (MulL src1 src2)));
10054 
10055   ins_cost(INSN_COST * 5);
10056   format %{ "madd  $dst, $src1, $src2, $src3" %}
10057 
10058   ins_encode %{
10059     __ madd(as_Register($dst$$reg),
10060             as_Register($src1$$reg),
10061             as_Register($src2$$reg),
10062             as_Register($src3$$reg));
10063   %}
10064 
10065   ins_pipe(lmac_reg_reg);
10066 %}
10067 
10068 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10069   match(Set dst (SubL src3 (MulL src1 src2)));
10070 
10071   ins_cost(INSN_COST * 5);
10072   format %{ "msub  $dst, $src1, $src2, $src3" %}
10073 
10074   ins_encode %{
10075     __ msub(as_Register($dst$$reg),
10076             as_Register($src1$$reg),
10077             as_Register($src2$$reg),
10078             as_Register($src3$$reg));
10079   %}
10080 
10081   ins_pipe(lmac_reg_reg);
10082 %}
10083 
10084 // Integer Divide
10085 
10086 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10087   match(Set dst (DivI src1 src2));
10088 
10089   ins_cost(INSN_COST * 19);
10090   format %{ "sdivw  $dst, $src1, $src2" %}
10091 
10092   ins_encode(aarch64_enc_divw(dst, src1, src2));
10093   ins_pipe(idiv_reg_reg);
10094 %}
10095 
10096 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10097   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10098   ins_cost(INSN_COST);
10099   format %{ "lsrw $dst, $src1, $div1" %}
10100   ins_encode %{
10101     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10102   %}
10103   ins_pipe(ialu_reg_shift);
10104 %}
10105 
10106 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10107   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10108   ins_cost(INSN_COST);
10109   format %{ "addw $dst, $src, LSR $div1" %}
10110 
10111   ins_encode %{
10112     __ addw(as_Register($dst$$reg),
10113               as_Register($src$$reg),
10114               as_Register($src$$reg),
10115               Assembler::LSR, 31);
10116   %}
10117   ins_pipe(ialu_reg);
10118 %}
10119 
10120 // Long Divide
10121 
10122 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10123   match(Set dst (DivL src1 src2));
10124 
10125   ins_cost(INSN_COST * 35);
10126   format %{ "sdiv   $dst, $src1, $src2" %}
10127 
10128   ins_encode(aarch64_enc_div(dst, src1, src2));
10129   ins_pipe(ldiv_reg_reg);
10130 %}
10131 
10132 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10133   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10134   ins_cost(INSN_COST);
10135   format %{ "lsr $dst, $src1, $div1" %}
10136   ins_encode %{
10137     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10138   %}
10139   ins_pipe(ialu_reg_shift);
10140 %}
10141 
10142 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10143   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10144   ins_cost(INSN_COST);
10145   format %{ "add $dst, $src, $div1" %}
10146 
10147   ins_encode %{
10148     __ add(as_Register($dst$$reg),
10149               as_Register($src$$reg),
10150               as_Register($src$$reg),
10151               Assembler::LSR, 63);
10152   %}
10153   ins_pipe(ialu_reg);
10154 %}
10155 
10156 // Integer Remainder
10157 
10158 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10159   match(Set dst (ModI src1 src2));
10160 
10161   ins_cost(INSN_COST * 22);
10162   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10163             "msubw($dst, rscratch1, $src2, $src1" %}
10164 
10165   ins_encode(aarch64_enc_modw(dst, src1, src2));
10166   ins_pipe(idiv_reg_reg);
10167 %}
10168 
10169 // Long Remainder
10170 
10171 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10172   match(Set dst (ModL src1 src2));
10173 
10174   ins_cost(INSN_COST * 38);
10175   format %{ "sdiv   rscratch1, $src1, $src2\n"
10176             "msub($dst, rscratch1, $src2, $src1" %}
10177 
10178   ins_encode(aarch64_enc_mod(dst, src1, src2));
10179   ins_pipe(ldiv_reg_reg);
10180 %}
10181 
10182 // Integer Shifts
10183 
10184 // Shift Left Register
10185 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10186   match(Set dst (LShiftI src1 src2));
10187 
10188   ins_cost(INSN_COST * 2);
10189   format %{ "lslvw  $dst, $src1, $src2" %}
10190 
10191   ins_encode %{
10192     __ lslvw(as_Register($dst$$reg),
10193              as_Register($src1$$reg),
10194              as_Register($src2$$reg));
10195   %}
10196 
10197   ins_pipe(ialu_reg_reg_vshift);
10198 %}
10199 
10200 // Shift Left Immediate
10201 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10202   match(Set dst (LShiftI src1 src2));
10203 
10204   ins_cost(INSN_COST);
10205   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10206 
10207   ins_encode %{
10208     __ lslw(as_Register($dst$$reg),
10209             as_Register($src1$$reg),
10210             $src2$$constant & 0x1f);
10211   %}
10212 
10213   ins_pipe(ialu_reg_shift);
10214 %}
10215 
10216 // Shift Right Logical Register
10217 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10218   match(Set dst (URShiftI src1 src2));
10219 
10220   ins_cost(INSN_COST * 2);
10221   format %{ "lsrvw  $dst, $src1, $src2" %}
10222 
10223   ins_encode %{
10224     __ lsrvw(as_Register($dst$$reg),
10225              as_Register($src1$$reg),
10226              as_Register($src2$$reg));
10227   %}
10228 
10229   ins_pipe(ialu_reg_reg_vshift);
10230 %}
10231 
10232 // Shift Right Logical Immediate
10233 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10234   match(Set dst (URShiftI src1 src2));
10235 
10236   ins_cost(INSN_COST);
10237   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10238 
10239   ins_encode %{
10240     __ lsrw(as_Register($dst$$reg),
10241             as_Register($src1$$reg),
10242             $src2$$constant & 0x1f);
10243   %}
10244 
10245   ins_pipe(ialu_reg_shift);
10246 %}
10247 
10248 // Shift Right Arithmetic Register
10249 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10250   match(Set dst (RShiftI src1 src2));
10251 
10252   ins_cost(INSN_COST * 2);
10253   format %{ "asrvw  $dst, $src1, $src2" %}
10254 
10255   ins_encode %{
10256     __ asrvw(as_Register($dst$$reg),
10257              as_Register($src1$$reg),
10258              as_Register($src2$$reg));
10259   %}
10260 
10261   ins_pipe(ialu_reg_reg_vshift);
10262 %}
10263 
10264 // Shift Right Arithmetic Immediate
10265 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10266   match(Set dst (RShiftI src1 src2));
10267 
10268   ins_cost(INSN_COST);
10269   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10270 
10271   ins_encode %{
10272     __ asrw(as_Register($dst$$reg),
10273             as_Register($src1$$reg),
10274             $src2$$constant & 0x1f);
10275   %}
10276 
10277   ins_pipe(ialu_reg_shift);
10278 %}
10279 
10280 // Combined Int Mask and Right Shift (using UBFM)
10281 // TODO
10282 
10283 // Long Shifts
10284 
10285 // Shift Left Register
10286 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10287   match(Set dst (LShiftL src1 src2));
10288 
10289   ins_cost(INSN_COST * 2);
10290   format %{ "lslv  $dst, $src1, $src2" %}
10291 
10292   ins_encode %{
10293     __ lslv(as_Register($dst$$reg),
10294             as_Register($src1$$reg),
10295             as_Register($src2$$reg));
10296   %}
10297 
10298   ins_pipe(ialu_reg_reg_vshift);
10299 %}
10300 
10301 // Shift Left Immediate
10302 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10303   match(Set dst (LShiftL src1 src2));
10304 
10305   ins_cost(INSN_COST);
10306   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10307 
10308   ins_encode %{
10309     __ lsl(as_Register($dst$$reg),
10310             as_Register($src1$$reg),
10311             $src2$$constant & 0x3f);
10312   %}
10313 
10314   ins_pipe(ialu_reg_shift);
10315 %}
10316 
10317 // Shift Right Logical Register
10318 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10319   match(Set dst (URShiftL src1 src2));
10320 
10321   ins_cost(INSN_COST * 2);
10322   format %{ "lsrv  $dst, $src1, $src2" %}
10323 
10324   ins_encode %{
10325     __ lsrv(as_Register($dst$$reg),
10326             as_Register($src1$$reg),
10327             as_Register($src2$$reg));
10328   %}
10329 
10330   ins_pipe(ialu_reg_reg_vshift);
10331 %}
10332 
10333 // Shift Right Logical Immediate
10334 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10335   match(Set dst (URShiftL src1 src2));
10336 
10337   ins_cost(INSN_COST);
10338   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10339 
10340   ins_encode %{
10341     __ lsr(as_Register($dst$$reg),
10342            as_Register($src1$$reg),
10343            $src2$$constant & 0x3f);
10344   %}
10345 
10346   ins_pipe(ialu_reg_shift);
10347 %}
10348 
10349 // A special-case pattern for card table stores.
10350 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10351   match(Set dst (URShiftL (CastP2X src1) src2));
10352 
10353   ins_cost(INSN_COST);
10354   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10355 
10356   ins_encode %{
10357     __ lsr(as_Register($dst$$reg),
10358            as_Register($src1$$reg),
10359            $src2$$constant & 0x3f);
10360   %}
10361 
10362   ins_pipe(ialu_reg_shift);
10363 %}
10364 
10365 // Shift Right Arithmetic Register
10366 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10367   match(Set dst (RShiftL src1 src2));
10368 
10369   ins_cost(INSN_COST * 2);
10370   format %{ "asrv  $dst, $src1, $src2" %}
10371 
10372   ins_encode %{
10373     __ asrv(as_Register($dst$$reg),
10374             as_Register($src1$$reg),
10375             as_Register($src2$$reg));
10376   %}
10377 
10378   ins_pipe(ialu_reg_reg_vshift);
10379 %}
10380 
10381 // Shift Right Arithmetic Immediate
10382 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10383   match(Set dst (RShiftL src1 src2));
10384 
10385   ins_cost(INSN_COST);
10386   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10387 
10388   ins_encode %{
10389     __ asr(as_Register($dst$$reg),
10390            as_Register($src1$$reg),
10391            $src2$$constant & 0x3f);
10392   %}
10393 
10394   ins_pipe(ialu_reg_shift);
10395 %}
10396 
10397 // BEGIN This section of the file is automatically generated. Do not edit --------------
10398 
10399 instruct regL_not_reg(iRegLNoSp dst,
10400                          iRegL src1, immL_M1 m1,
10401                          rFlagsReg cr) %{
10402   match(Set dst (XorL src1 m1));
10403   ins_cost(INSN_COST);
10404   format %{ "eon  $dst, $src1, zr" %}
10405 
10406   ins_encode %{
10407     __ eon(as_Register($dst$$reg),
10408               as_Register($src1$$reg),
10409               zr,
10410               Assembler::LSL, 0);
10411   %}
10412 
10413   ins_pipe(ialu_reg);
10414 %}
10415 instruct regI_not_reg(iRegINoSp dst,
10416                          iRegIorL2I src1, immI_M1 m1,
10417                          rFlagsReg cr) %{
10418   match(Set dst (XorI src1 m1));
10419   ins_cost(INSN_COST);
10420   format %{ "eonw  $dst, $src1, zr" %}
10421 
10422   ins_encode %{
10423     __ eonw(as_Register($dst$$reg),
10424               as_Register($src1$$reg),
10425               zr,
10426               Assembler::LSL, 0);
10427   %}
10428 
10429   ins_pipe(ialu_reg);
10430 %}
10431 
10432 instruct AndI_reg_not_reg(iRegINoSp dst,
10433                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10434                          rFlagsReg cr) %{
10435   match(Set dst (AndI src1 (XorI src2 m1)));
10436   ins_cost(INSN_COST);
10437   format %{ "bicw  $dst, $src1, $src2" %}
10438 
10439   ins_encode %{
10440     __ bicw(as_Register($dst$$reg),
10441               as_Register($src1$$reg),
10442               as_Register($src2$$reg),
10443               Assembler::LSL, 0);
10444   %}
10445 
10446   ins_pipe(ialu_reg_reg);
10447 %}
10448 
10449 instruct AndL_reg_not_reg(iRegLNoSp dst,
10450                          iRegL src1, iRegL src2, immL_M1 m1,
10451                          rFlagsReg cr) %{
10452   match(Set dst (AndL src1 (XorL src2 m1)));
10453   ins_cost(INSN_COST);
10454   format %{ "bic  $dst, $src1, $src2" %}
10455 
10456   ins_encode %{
10457     __ bic(as_Register($dst$$reg),
10458               as_Register($src1$$reg),
10459               as_Register($src2$$reg),
10460               Assembler::LSL, 0);
10461   %}
10462 
10463   ins_pipe(ialu_reg_reg);
10464 %}
10465 
10466 instruct OrI_reg_not_reg(iRegINoSp dst,
10467                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10468                          rFlagsReg cr) %{
10469   match(Set dst (OrI src1 (XorI src2 m1)));
10470   ins_cost(INSN_COST);
10471   format %{ "ornw  $dst, $src1, $src2" %}
10472 
10473   ins_encode %{
10474     __ ornw(as_Register($dst$$reg),
10475               as_Register($src1$$reg),
10476               as_Register($src2$$reg),
10477               Assembler::LSL, 0);
10478   %}
10479 
10480   ins_pipe(ialu_reg_reg);
10481 %}
10482 
10483 instruct OrL_reg_not_reg(iRegLNoSp dst,
10484                          iRegL src1, iRegL src2, immL_M1 m1,
10485                          rFlagsReg cr) %{
10486   match(Set dst (OrL src1 (XorL src2 m1)));
10487   ins_cost(INSN_COST);
10488   format %{ "orn  $dst, $src1, $src2" %}
10489 
10490   ins_encode %{
10491     __ orn(as_Register($dst$$reg),
10492               as_Register($src1$$reg),
10493               as_Register($src2$$reg),
10494               Assembler::LSL, 0);
10495   %}
10496 
10497   ins_pipe(ialu_reg_reg);
10498 %}
10499 
10500 instruct XorI_reg_not_reg(iRegINoSp dst,
10501                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10502                          rFlagsReg cr) %{
10503   match(Set dst (XorI m1 (XorI src2 src1)));
10504   ins_cost(INSN_COST);
10505   format %{ "eonw  $dst, $src1, $src2" %}
10506 
10507   ins_encode %{
10508     __ eonw(as_Register($dst$$reg),
10509               as_Register($src1$$reg),
10510               as_Register($src2$$reg),
10511               Assembler::LSL, 0);
10512   %}
10513 
10514   ins_pipe(ialu_reg_reg);
10515 %}
10516 
10517 instruct XorL_reg_not_reg(iRegLNoSp dst,
10518                          iRegL src1, iRegL src2, immL_M1 m1,
10519                          rFlagsReg cr) %{
10520   match(Set dst (XorL m1 (XorL src2 src1)));
10521   ins_cost(INSN_COST);
10522   format %{ "eon  $dst, $src1, $src2" %}
10523 
10524   ins_encode %{
10525     __ eon(as_Register($dst$$reg),
10526               as_Register($src1$$reg),
10527               as_Register($src2$$reg),
10528               Assembler::LSL, 0);
10529   %}
10530 
10531   ins_pipe(ialu_reg_reg);
10532 %}
10533 
10534 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10535                          iRegIorL2I src1, iRegIorL2I src2,
10536                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10537   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10538   ins_cost(1.9 * INSN_COST);
10539   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10540 
10541   ins_encode %{
10542     __ bicw(as_Register($dst$$reg),
10543               as_Register($src1$$reg),
10544               as_Register($src2$$reg),
10545               Assembler::LSR,
10546               $src3$$constant & 0x1f);
10547   %}
10548 
10549   ins_pipe(ialu_reg_reg_shift);
10550 %}
10551 
10552 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10553                          iRegL src1, iRegL src2,
10554                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10555   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10556   ins_cost(1.9 * INSN_COST);
10557   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10558 
10559   ins_encode %{
10560     __ bic(as_Register($dst$$reg),
10561               as_Register($src1$$reg),
10562               as_Register($src2$$reg),
10563               Assembler::LSR,
10564               $src3$$constant & 0x3f);
10565   %}
10566 
10567   ins_pipe(ialu_reg_reg_shift);
10568 %}
10569 
10570 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10571                          iRegIorL2I src1, iRegIorL2I src2,
10572                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10573   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10574   ins_cost(1.9 * INSN_COST);
10575   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10576 
10577   ins_encode %{
10578     __ bicw(as_Register($dst$$reg),
10579               as_Register($src1$$reg),
10580               as_Register($src2$$reg),
10581               Assembler::ASR,
10582               $src3$$constant & 0x1f);
10583   %}
10584 
10585   ins_pipe(ialu_reg_reg_shift);
10586 %}
10587 
10588 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10589                          iRegL src1, iRegL src2,
10590                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10591   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10592   ins_cost(1.9 * INSN_COST);
10593   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10594 
10595   ins_encode %{
10596     __ bic(as_Register($dst$$reg),
10597               as_Register($src1$$reg),
10598               as_Register($src2$$reg),
10599               Assembler::ASR,
10600               $src3$$constant & 0x3f);
10601   %}
10602 
10603   ins_pipe(ialu_reg_reg_shift);
10604 %}
10605 
10606 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10607                          iRegIorL2I src1, iRegIorL2I src2,
10608                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10609   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10610   ins_cost(1.9 * INSN_COST);
10611   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10612 
10613   ins_encode %{
10614     __ bicw(as_Register($dst$$reg),
10615               as_Register($src1$$reg),
10616               as_Register($src2$$reg),
10617               Assembler::LSL,
10618               $src3$$constant & 0x1f);
10619   %}
10620 
10621   ins_pipe(ialu_reg_reg_shift);
10622 %}
10623 
10624 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10625                          iRegL src1, iRegL src2,
10626                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10627   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10628   ins_cost(1.9 * INSN_COST);
10629   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10630 
10631   ins_encode %{
10632     __ bic(as_Register($dst$$reg),
10633               as_Register($src1$$reg),
10634               as_Register($src2$$reg),
10635               Assembler::LSL,
10636               $src3$$constant & 0x3f);
10637   %}
10638 
10639   ins_pipe(ialu_reg_reg_shift);
10640 %}
10641 
10642 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10643                          iRegIorL2I src1, iRegIorL2I src2,
10644                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10645   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10646   ins_cost(1.9 * INSN_COST);
10647   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10648 
10649   ins_encode %{
10650     __ eonw(as_Register($dst$$reg),
10651               as_Register($src1$$reg),
10652               as_Register($src2$$reg),
10653               Assembler::LSR,
10654               $src3$$constant & 0x1f);
10655   %}
10656 
10657   ins_pipe(ialu_reg_reg_shift);
10658 %}
10659 
10660 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10661                          iRegL src1, iRegL src2,
10662                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10663   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10664   ins_cost(1.9 * INSN_COST);
10665   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10666 
10667   ins_encode %{
10668     __ eon(as_Register($dst$$reg),
10669               as_Register($src1$$reg),
10670               as_Register($src2$$reg),
10671               Assembler::LSR,
10672               $src3$$constant & 0x3f);
10673   %}
10674 
10675   ins_pipe(ialu_reg_reg_shift);
10676 %}
10677 
10678 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10679                          iRegIorL2I src1, iRegIorL2I src2,
10680                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10681   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10682   ins_cost(1.9 * INSN_COST);
10683   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10684 
10685   ins_encode %{
10686     __ eonw(as_Register($dst$$reg),
10687               as_Register($src1$$reg),
10688               as_Register($src2$$reg),
10689               Assembler::ASR,
10690               $src3$$constant & 0x1f);
10691   %}
10692 
10693   ins_pipe(ialu_reg_reg_shift);
10694 %}
10695 
10696 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10697                          iRegL src1, iRegL src2,
10698                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10699   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10700   ins_cost(1.9 * INSN_COST);
10701   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10702 
10703   ins_encode %{
10704     __ eon(as_Register($dst$$reg),
10705               as_Register($src1$$reg),
10706               as_Register($src2$$reg),
10707               Assembler::ASR,
10708               $src3$$constant & 0x3f);
10709   %}
10710 
10711   ins_pipe(ialu_reg_reg_shift);
10712 %}
10713 
10714 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10715                          iRegIorL2I src1, iRegIorL2I src2,
10716                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10717   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10718   ins_cost(1.9 * INSN_COST);
10719   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10720 
10721   ins_encode %{
10722     __ eonw(as_Register($dst$$reg),
10723               as_Register($src1$$reg),
10724               as_Register($src2$$reg),
10725               Assembler::LSL,
10726               $src3$$constant & 0x1f);
10727   %}
10728 
10729   ins_pipe(ialu_reg_reg_shift);
10730 %}
10731 
10732 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10733                          iRegL src1, iRegL src2,
10734                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10735   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10736   ins_cost(1.9 * INSN_COST);
10737   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10738 
10739   ins_encode %{
10740     __ eon(as_Register($dst$$reg),
10741               as_Register($src1$$reg),
10742               as_Register($src2$$reg),
10743               Assembler::LSL,
10744               $src3$$constant & 0x3f);
10745   %}
10746 
10747   ins_pipe(ialu_reg_reg_shift);
10748 %}
10749 
10750 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10751                          iRegIorL2I src1, iRegIorL2I src2,
10752                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10753   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10754   ins_cost(1.9 * INSN_COST);
10755   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10756 
10757   ins_encode %{
10758     __ ornw(as_Register($dst$$reg),
10759               as_Register($src1$$reg),
10760               as_Register($src2$$reg),
10761               Assembler::LSR,
10762               $src3$$constant & 0x1f);
10763   %}
10764 
10765   ins_pipe(ialu_reg_reg_shift);
10766 %}
10767 
10768 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10769                          iRegL src1, iRegL src2,
10770                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10771   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10772   ins_cost(1.9 * INSN_COST);
10773   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10774 
10775   ins_encode %{
10776     __ orn(as_Register($dst$$reg),
10777               as_Register($src1$$reg),
10778               as_Register($src2$$reg),
10779               Assembler::LSR,
10780               $src3$$constant & 0x3f);
10781   %}
10782 
10783   ins_pipe(ialu_reg_reg_shift);
10784 %}
10785 
10786 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10787                          iRegIorL2I src1, iRegIorL2I src2,
10788                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10789   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10790   ins_cost(1.9 * INSN_COST);
10791   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10792 
10793   ins_encode %{
10794     __ ornw(as_Register($dst$$reg),
10795               as_Register($src1$$reg),
10796               as_Register($src2$$reg),
10797               Assembler::ASR,
10798               $src3$$constant & 0x1f);
10799   %}
10800 
10801   ins_pipe(ialu_reg_reg_shift);
10802 %}
10803 
10804 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10805                          iRegL src1, iRegL src2,
10806                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10807   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10808   ins_cost(1.9 * INSN_COST);
10809   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10810 
10811   ins_encode %{
10812     __ orn(as_Register($dst$$reg),
10813               as_Register($src1$$reg),
10814               as_Register($src2$$reg),
10815               Assembler::ASR,
10816               $src3$$constant & 0x3f);
10817   %}
10818 
10819   ins_pipe(ialu_reg_reg_shift);
10820 %}
10821 
10822 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10823                          iRegIorL2I src1, iRegIorL2I src2,
10824                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10825   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10826   ins_cost(1.9 * INSN_COST);
10827   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10828 
10829   ins_encode %{
10830     __ ornw(as_Register($dst$$reg),
10831               as_Register($src1$$reg),
10832               as_Register($src2$$reg),
10833               Assembler::LSL,
10834               $src3$$constant & 0x1f);
10835   %}
10836 
10837   ins_pipe(ialu_reg_reg_shift);
10838 %}
10839 
10840 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10841                          iRegL src1, iRegL src2,
10842                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10843   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10844   ins_cost(1.9 * INSN_COST);
10845   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10846 
10847   ins_encode %{
10848     __ orn(as_Register($dst$$reg),
10849               as_Register($src1$$reg),
10850               as_Register($src2$$reg),
10851               Assembler::LSL,
10852               $src3$$constant & 0x3f);
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg_shift);
10856 %}
10857 
10858 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10859                          iRegIorL2I src1, iRegIorL2I src2,
10860                          immI src3, rFlagsReg cr) %{
10861   match(Set dst (AndI src1 (URShiftI src2 src3)));
10862 
10863   ins_cost(1.9 * INSN_COST);
10864   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10865 
10866   ins_encode %{
10867     __ andw(as_Register($dst$$reg),
10868               as_Register($src1$$reg),
10869               as_Register($src2$$reg),
10870               Assembler::LSR,
10871               $src3$$constant & 0x1f);
10872   %}
10873 
10874   ins_pipe(ialu_reg_reg_shift);
10875 %}
10876 
10877 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10878                          iRegL src1, iRegL src2,
10879                          immI src3, rFlagsReg cr) %{
10880   match(Set dst (AndL src1 (URShiftL src2 src3)));
10881 
10882   ins_cost(1.9 * INSN_COST);
10883   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10884 
10885   ins_encode %{
10886     __ andr(as_Register($dst$$reg),
10887               as_Register($src1$$reg),
10888               as_Register($src2$$reg),
10889               Assembler::LSR,
10890               $src3$$constant & 0x3f);
10891   %}
10892 
10893   ins_pipe(ialu_reg_reg_shift);
10894 %}
10895 
10896 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10897                          iRegIorL2I src1, iRegIorL2I src2,
10898                          immI src3, rFlagsReg cr) %{
10899   match(Set dst (AndI src1 (RShiftI src2 src3)));
10900 
10901   ins_cost(1.9 * INSN_COST);
10902   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10903 
10904   ins_encode %{
10905     __ andw(as_Register($dst$$reg),
10906               as_Register($src1$$reg),
10907               as_Register($src2$$reg),
10908               Assembler::ASR,
10909               $src3$$constant & 0x1f);
10910   %}
10911 
10912   ins_pipe(ialu_reg_reg_shift);
10913 %}
10914 
10915 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10916                          iRegL src1, iRegL src2,
10917                          immI src3, rFlagsReg cr) %{
10918   match(Set dst (AndL src1 (RShiftL src2 src3)));
10919 
10920   ins_cost(1.9 * INSN_COST);
10921   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10922 
10923   ins_encode %{
10924     __ andr(as_Register($dst$$reg),
10925               as_Register($src1$$reg),
10926               as_Register($src2$$reg),
10927               Assembler::ASR,
10928               $src3$$constant & 0x3f);
10929   %}
10930 
10931   ins_pipe(ialu_reg_reg_shift);
10932 %}
10933 
10934 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10935                          iRegIorL2I src1, iRegIorL2I src2,
10936                          immI src3, rFlagsReg cr) %{
10937   match(Set dst (AndI src1 (LShiftI src2 src3)));
10938 
10939   ins_cost(1.9 * INSN_COST);
10940   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10941 
10942   ins_encode %{
10943     __ andw(as_Register($dst$$reg),
10944               as_Register($src1$$reg),
10945               as_Register($src2$$reg),
10946               Assembler::LSL,
10947               $src3$$constant & 0x1f);
10948   %}
10949 
10950   ins_pipe(ialu_reg_reg_shift);
10951 %}
10952 
10953 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10954                          iRegL src1, iRegL src2,
10955                          immI src3, rFlagsReg cr) %{
10956   match(Set dst (AndL src1 (LShiftL src2 src3)));
10957 
10958   ins_cost(1.9 * INSN_COST);
10959   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10960 
10961   ins_encode %{
10962     __ andr(as_Register($dst$$reg),
10963               as_Register($src1$$reg),
10964               as_Register($src2$$reg),
10965               Assembler::LSL,
10966               $src3$$constant & 0x3f);
10967   %}
10968 
10969   ins_pipe(ialu_reg_reg_shift);
10970 %}
10971 
10972 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10973                          iRegIorL2I src1, iRegIorL2I src2,
10974                          immI src3, rFlagsReg cr) %{
10975   match(Set dst (XorI src1 (URShiftI src2 src3)));
10976 
10977   ins_cost(1.9 * INSN_COST);
10978   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10979 
10980   ins_encode %{
10981     __ eorw(as_Register($dst$$reg),
10982               as_Register($src1$$reg),
10983               as_Register($src2$$reg),
10984               Assembler::LSR,
10985               $src3$$constant & 0x1f);
10986   %}
10987 
10988   ins_pipe(ialu_reg_reg_shift);
10989 %}
10990 
10991 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10992                          iRegL src1, iRegL src2,
10993                          immI src3, rFlagsReg cr) %{
10994   match(Set dst (XorL src1 (URShiftL src2 src3)));
10995 
10996   ins_cost(1.9 * INSN_COST);
10997   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10998 
10999   ins_encode %{
11000     __ eor(as_Register($dst$$reg),
11001               as_Register($src1$$reg),
11002               as_Register($src2$$reg),
11003               Assembler::LSR,
11004               $src3$$constant & 0x3f);
11005   %}
11006 
11007   ins_pipe(ialu_reg_reg_shift);
11008 %}
11009 
11010 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11011                          iRegIorL2I src1, iRegIorL2I src2,
11012                          immI src3, rFlagsReg cr) %{
11013   match(Set dst (XorI src1 (RShiftI src2 src3)));
11014 
11015   ins_cost(1.9 * INSN_COST);
11016   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11017 
11018   ins_encode %{
11019     __ eorw(as_Register($dst$$reg),
11020               as_Register($src1$$reg),
11021               as_Register($src2$$reg),
11022               Assembler::ASR,
11023               $src3$$constant & 0x1f);
11024   %}
11025 
11026   ins_pipe(ialu_reg_reg_shift);
11027 %}
11028 
11029 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11030                          iRegL src1, iRegL src2,
11031                          immI src3, rFlagsReg cr) %{
11032   match(Set dst (XorL src1 (RShiftL src2 src3)));
11033 
11034   ins_cost(1.9 * INSN_COST);
11035   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11036 
11037   ins_encode %{
11038     __ eor(as_Register($dst$$reg),
11039               as_Register($src1$$reg),
11040               as_Register($src2$$reg),
11041               Assembler::ASR,
11042               $src3$$constant & 0x3f);
11043   %}
11044 
11045   ins_pipe(ialu_reg_reg_shift);
11046 %}
11047 
11048 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11049                          iRegIorL2I src1, iRegIorL2I src2,
11050                          immI src3, rFlagsReg cr) %{
11051   match(Set dst (XorI src1 (LShiftI src2 src3)));
11052 
11053   ins_cost(1.9 * INSN_COST);
11054   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11055 
11056   ins_encode %{
11057     __ eorw(as_Register($dst$$reg),
11058               as_Register($src1$$reg),
11059               as_Register($src2$$reg),
11060               Assembler::LSL,
11061               $src3$$constant & 0x1f);
11062   %}
11063 
11064   ins_pipe(ialu_reg_reg_shift);
11065 %}
11066 
11067 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11068                          iRegL src1, iRegL src2,
11069                          immI src3, rFlagsReg cr) %{
11070   match(Set dst (XorL src1 (LShiftL src2 src3)));
11071 
11072   ins_cost(1.9 * INSN_COST);
11073   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11074 
11075   ins_encode %{
11076     __ eor(as_Register($dst$$reg),
11077               as_Register($src1$$reg),
11078               as_Register($src2$$reg),
11079               Assembler::LSL,
11080               $src3$$constant & 0x3f);
11081   %}
11082 
11083   ins_pipe(ialu_reg_reg_shift);
11084 %}
11085 
11086 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11087                          iRegIorL2I src1, iRegIorL2I src2,
11088                          immI src3, rFlagsReg cr) %{
11089   match(Set dst (OrI src1 (URShiftI src2 src3)));
11090 
11091   ins_cost(1.9 * INSN_COST);
11092   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11093 
11094   ins_encode %{
11095     __ orrw(as_Register($dst$$reg),
11096               as_Register($src1$$reg),
11097               as_Register($src2$$reg),
11098               Assembler::LSR,
11099               $src3$$constant & 0x1f);
11100   %}
11101 
11102   ins_pipe(ialu_reg_reg_shift);
11103 %}
11104 
11105 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11106                          iRegL src1, iRegL src2,
11107                          immI src3, rFlagsReg cr) %{
11108   match(Set dst (OrL src1 (URShiftL src2 src3)));
11109 
11110   ins_cost(1.9 * INSN_COST);
11111   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11112 
11113   ins_encode %{
11114     __ orr(as_Register($dst$$reg),
11115               as_Register($src1$$reg),
11116               as_Register($src2$$reg),
11117               Assembler::LSR,
11118               $src3$$constant & 0x3f);
11119   %}
11120 
11121   ins_pipe(ialu_reg_reg_shift);
11122 %}
11123 
11124 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11125                          iRegIorL2I src1, iRegIorL2I src2,
11126                          immI src3, rFlagsReg cr) %{
11127   match(Set dst (OrI src1 (RShiftI src2 src3)));
11128 
11129   ins_cost(1.9 * INSN_COST);
11130   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11131 
11132   ins_encode %{
11133     __ orrw(as_Register($dst$$reg),
11134               as_Register($src1$$reg),
11135               as_Register($src2$$reg),
11136               Assembler::ASR,
11137               $src3$$constant & 0x1f);
11138   %}
11139 
11140   ins_pipe(ialu_reg_reg_shift);
11141 %}
11142 
11143 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11144                          iRegL src1, iRegL src2,
11145                          immI src3, rFlagsReg cr) %{
11146   match(Set dst (OrL src1 (RShiftL src2 src3)));
11147 
11148   ins_cost(1.9 * INSN_COST);
11149   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11150 
11151   ins_encode %{
11152     __ orr(as_Register($dst$$reg),
11153               as_Register($src1$$reg),
11154               as_Register($src2$$reg),
11155               Assembler::ASR,
11156               $src3$$constant & 0x3f);
11157   %}
11158 
11159   ins_pipe(ialu_reg_reg_shift);
11160 %}
11161 
11162 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11163                          iRegIorL2I src1, iRegIorL2I src2,
11164                          immI src3, rFlagsReg cr) %{
11165   match(Set dst (OrI src1 (LShiftI src2 src3)));
11166 
11167   ins_cost(1.9 * INSN_COST);
11168   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11169 
11170   ins_encode %{
11171     __ orrw(as_Register($dst$$reg),
11172               as_Register($src1$$reg),
11173               as_Register($src2$$reg),
11174               Assembler::LSL,
11175               $src3$$constant & 0x1f);
11176   %}
11177 
11178   ins_pipe(ialu_reg_reg_shift);
11179 %}
11180 
11181 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11182                          iRegL src1, iRegL src2,
11183                          immI src3, rFlagsReg cr) %{
11184   match(Set dst (OrL src1 (LShiftL src2 src3)));
11185 
11186   ins_cost(1.9 * INSN_COST);
11187   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11188 
11189   ins_encode %{
11190     __ orr(as_Register($dst$$reg),
11191               as_Register($src1$$reg),
11192               as_Register($src2$$reg),
11193               Assembler::LSL,
11194               $src3$$constant & 0x3f);
11195   %}
11196 
11197   ins_pipe(ialu_reg_reg_shift);
11198 %}
11199 
11200 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11201                          iRegIorL2I src1, iRegIorL2I src2,
11202                          immI src3, rFlagsReg cr) %{
11203   match(Set dst (AddI src1 (URShiftI src2 src3)));
11204 
11205   ins_cost(1.9 * INSN_COST);
11206   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11207 
11208   ins_encode %{
11209     __ addw(as_Register($dst$$reg),
11210               as_Register($src1$$reg),
11211               as_Register($src2$$reg),
11212               Assembler::LSR,
11213               $src3$$constant & 0x1f);
11214   %}
11215 
11216   ins_pipe(ialu_reg_reg_shift);
11217 %}
11218 
11219 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11220                          iRegL src1, iRegL src2,
11221                          immI src3, rFlagsReg cr) %{
11222   match(Set dst (AddL src1 (URShiftL src2 src3)));
11223 
11224   ins_cost(1.9 * INSN_COST);
11225   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11226 
11227   ins_encode %{
11228     __ add(as_Register($dst$$reg),
11229               as_Register($src1$$reg),
11230               as_Register($src2$$reg),
11231               Assembler::LSR,
11232               $src3$$constant & 0x3f);
11233   %}
11234 
11235   ins_pipe(ialu_reg_reg_shift);
11236 %}
11237 
11238 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11239                          iRegIorL2I src1, iRegIorL2I src2,
11240                          immI src3, rFlagsReg cr) %{
11241   match(Set dst (AddI src1 (RShiftI src2 src3)));
11242 
11243   ins_cost(1.9 * INSN_COST);
11244   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11245 
11246   ins_encode %{
11247     __ addw(as_Register($dst$$reg),
11248               as_Register($src1$$reg),
11249               as_Register($src2$$reg),
11250               Assembler::ASR,
11251               $src3$$constant & 0x1f);
11252   %}
11253 
11254   ins_pipe(ialu_reg_reg_shift);
11255 %}
11256 
11257 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11258                          iRegL src1, iRegL src2,
11259                          immI src3, rFlagsReg cr) %{
11260   match(Set dst (AddL src1 (RShiftL src2 src3)));
11261 
11262   ins_cost(1.9 * INSN_COST);
11263   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11264 
11265   ins_encode %{
11266     __ add(as_Register($dst$$reg),
11267               as_Register($src1$$reg),
11268               as_Register($src2$$reg),
11269               Assembler::ASR,
11270               $src3$$constant & 0x3f);
11271   %}
11272 
11273   ins_pipe(ialu_reg_reg_shift);
11274 %}
11275 
11276 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11277                          iRegIorL2I src1, iRegIorL2I src2,
11278                          immI src3, rFlagsReg cr) %{
11279   match(Set dst (AddI src1 (LShiftI src2 src3)));
11280 
11281   ins_cost(1.9 * INSN_COST);
11282   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11283 
11284   ins_encode %{
11285     __ addw(as_Register($dst$$reg),
11286               as_Register($src1$$reg),
11287               as_Register($src2$$reg),
11288               Assembler::LSL,
11289               $src3$$constant & 0x1f);
11290   %}
11291 
11292   ins_pipe(ialu_reg_reg_shift);
11293 %}
11294 
11295 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11296                          iRegL src1, iRegL src2,
11297                          immI src3, rFlagsReg cr) %{
11298   match(Set dst (AddL src1 (LShiftL src2 src3)));
11299 
11300   ins_cost(1.9 * INSN_COST);
11301   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11302 
11303   ins_encode %{
11304     __ add(as_Register($dst$$reg),
11305               as_Register($src1$$reg),
11306               as_Register($src2$$reg),
11307               Assembler::LSL,
11308               $src3$$constant & 0x3f);
11309   %}
11310 
11311   ins_pipe(ialu_reg_reg_shift);
11312 %}
11313 
11314 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11315                          iRegIorL2I src1, iRegIorL2I src2,
11316                          immI src3, rFlagsReg cr) %{
11317   match(Set dst (SubI src1 (URShiftI src2 src3)));
11318 
11319   ins_cost(1.9 * INSN_COST);
11320   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11321 
11322   ins_encode %{
11323     __ subw(as_Register($dst$$reg),
11324               as_Register($src1$$reg),
11325               as_Register($src2$$reg),
11326               Assembler::LSR,
11327               $src3$$constant & 0x1f);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg_shift);
11331 %}
11332 
11333 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11334                          iRegL src1, iRegL src2,
11335                          immI src3, rFlagsReg cr) %{
11336   match(Set dst (SubL src1 (URShiftL src2 src3)));
11337 
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11340 
11341   ins_encode %{
11342     __ sub(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::LSR,
11346               $src3$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11353                          iRegIorL2I src1, iRegIorL2I src2,
11354                          immI src3, rFlagsReg cr) %{
11355   match(Set dst (SubI src1 (RShiftI src2 src3)));
11356 
11357   ins_cost(1.9 * INSN_COST);
11358   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11359 
11360   ins_encode %{
11361     __ subw(as_Register($dst$$reg),
11362               as_Register($src1$$reg),
11363               as_Register($src2$$reg),
11364               Assembler::ASR,
11365               $src3$$constant & 0x1f);
11366   %}
11367 
11368   ins_pipe(ialu_reg_reg_shift);
11369 %}
11370 
11371 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11372                          iRegL src1, iRegL src2,
11373                          immI src3, rFlagsReg cr) %{
11374   match(Set dst (SubL src1 (RShiftL src2 src3)));
11375 
11376   ins_cost(1.9 * INSN_COST);
11377   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11378 
11379   ins_encode %{
11380     __ sub(as_Register($dst$$reg),
11381               as_Register($src1$$reg),
11382               as_Register($src2$$reg),
11383               Assembler::ASR,
11384               $src3$$constant & 0x3f);
11385   %}
11386 
11387   ins_pipe(ialu_reg_reg_shift);
11388 %}
11389 
11390 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11391                          iRegIorL2I src1, iRegIorL2I src2,
11392                          immI src3, rFlagsReg cr) %{
11393   match(Set dst (SubI src1 (LShiftI src2 src3)));
11394 
11395   ins_cost(1.9 * INSN_COST);
11396   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11397 
11398   ins_encode %{
11399     __ subw(as_Register($dst$$reg),
11400               as_Register($src1$$reg),
11401               as_Register($src2$$reg),
11402               Assembler::LSL,
11403               $src3$$constant & 0x1f);
11404   %}
11405 
11406   ins_pipe(ialu_reg_reg_shift);
11407 %}
11408 
11409 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11410                          iRegL src1, iRegL src2,
11411                          immI src3, rFlagsReg cr) %{
11412   match(Set dst (SubL src1 (LShiftL src2 src3)));
11413 
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11416 
11417   ins_encode %{
11418     __ sub(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::LSL,
11422               $src3$$constant & 0x3f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 
11429 
11430 // Shift Left followed by Shift Right.
11431 // This idiom is used by the compiler for the i2b bytecode etc.
11432 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11433 %{
11434   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11435   // Make sure we are not going to exceed what sbfm can do.
11436   predicate((unsigned int)n->in(2)->get_int() <= 63
11437             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11438 
11439   ins_cost(INSN_COST * 2);
11440   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11441   ins_encode %{
11442     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11443     int s = 63 - lshift;
11444     int r = (rshift - lshift) & 63;
11445     __ sbfm(as_Register($dst$$reg),
11446             as_Register($src$$reg),
11447             r, s);
11448   %}
11449 
11450   ins_pipe(ialu_reg_shift);
11451 %}
11452 
11453 // Shift Left followed by Shift Right.
11454 // This idiom is used by the compiler for the i2b bytecode etc.
11455 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11456 %{
11457   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11458   // Make sure we are not going to exceed what sbfmw can do.
11459   predicate((unsigned int)n->in(2)->get_int() <= 31
11460             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11461 
11462   ins_cost(INSN_COST * 2);
11463   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11464   ins_encode %{
11465     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11466     int s = 31 - lshift;
11467     int r = (rshift - lshift) & 31;
11468     __ sbfmw(as_Register($dst$$reg),
11469             as_Register($src$$reg),
11470             r, s);
11471   %}
11472 
11473   ins_pipe(ialu_reg_shift);
11474 %}
11475 
11476 // Shift Left followed by Shift Right.
11477 // This idiom is used by the compiler for the i2b bytecode etc.
11478 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11479 %{
11480   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11481   // Make sure we are not going to exceed what ubfm can do.
11482   predicate((unsigned int)n->in(2)->get_int() <= 63
11483             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11484 
11485   ins_cost(INSN_COST * 2);
11486   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11487   ins_encode %{
11488     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11489     int s = 63 - lshift;
11490     int r = (rshift - lshift) & 63;
11491     __ ubfm(as_Register($dst$$reg),
11492             as_Register($src$$reg),
11493             r, s);
11494   %}
11495 
11496   ins_pipe(ialu_reg_shift);
11497 %}
11498 
11499 // Shift Left followed by Shift Right.
11500 // This idiom is used by the compiler for the i2b bytecode etc.
11501 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11502 %{
11503   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11504   // Make sure we are not going to exceed what ubfmw can do.
11505   predicate((unsigned int)n->in(2)->get_int() <= 31
11506             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11507 
11508   ins_cost(INSN_COST * 2);
11509   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11510   ins_encode %{
11511     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11512     int s = 31 - lshift;
11513     int r = (rshift - lshift) & 31;
11514     __ ubfmw(as_Register($dst$$reg),
11515             as_Register($src$$reg),
11516             r, s);
11517   %}
11518 
11519   ins_pipe(ialu_reg_shift);
11520 %}
11521 // Bitfield extract with shift & mask
11522 
11523 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11524 %{
11525   match(Set dst (AndI (URShiftI src rshift) mask));
11526 
11527   ins_cost(INSN_COST);
11528   format %{ "ubfxw $dst, $src, $mask" %}
11529   ins_encode %{
11530     int rshift = $rshift$$constant;
11531     long mask = $mask$$constant;
11532     int width = exact_log2(mask+1);
11533     __ ubfxw(as_Register($dst$$reg),
11534             as_Register($src$$reg), rshift, width);
11535   %}
11536   ins_pipe(ialu_reg_shift);
11537 %}
11538 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11539 %{
11540   match(Set dst (AndL (URShiftL src rshift) mask));
11541 
11542   ins_cost(INSN_COST);
11543   format %{ "ubfx $dst, $src, $mask" %}
11544   ins_encode %{
11545     int rshift = $rshift$$constant;
11546     long mask = $mask$$constant;
11547     int width = exact_log2(mask+1);
11548     __ ubfx(as_Register($dst$$reg),
11549             as_Register($src$$reg), rshift, width);
11550   %}
11551   ins_pipe(ialu_reg_shift);
11552 %}
11553 
11554 // We can use ubfx when extending an And with a mask when we know mask
11555 // is positive.  We know that because immI_bitmask guarantees it.
11556 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11557 %{
11558   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11559 
11560   ins_cost(INSN_COST * 2);
11561   format %{ "ubfx $dst, $src, $mask" %}
11562   ins_encode %{
11563     int rshift = $rshift$$constant;
11564     long mask = $mask$$constant;
11565     int width = exact_log2(mask+1);
11566     __ ubfx(as_Register($dst$$reg),
11567             as_Register($src$$reg), rshift, width);
11568   %}
11569   ins_pipe(ialu_reg_shift);
11570 %}
11571 
11572 // Rotations
11573 
11574 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11575 %{
11576   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11577   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11578 
11579   ins_cost(INSN_COST);
11580   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11581 
11582   ins_encode %{
11583     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11584             $rshift$$constant & 63);
11585   %}
11586   ins_pipe(ialu_reg_reg_extr);
11587 %}
11588 
11589 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11590 %{
11591   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11592   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11593 
11594   ins_cost(INSN_COST);
11595   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11596 
11597   ins_encode %{
11598     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11599             $rshift$$constant & 31);
11600   %}
11601   ins_pipe(ialu_reg_reg_extr);
11602 %}
11603 
11604 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11605 %{
11606   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11607   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11608 
11609   ins_cost(INSN_COST);
11610   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11611 
11612   ins_encode %{
11613     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11614             $rshift$$constant & 63);
11615   %}
11616   ins_pipe(ialu_reg_reg_extr);
11617 %}
11618 
11619 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11620 %{
11621   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11622   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11623 
11624   ins_cost(INSN_COST);
11625   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11626 
11627   ins_encode %{
11628     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11629             $rshift$$constant & 31);
11630   %}
11631   ins_pipe(ialu_reg_reg_extr);
11632 %}
11633 
11634 
11635 // rol expander
11636 
11637 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11638 %{
11639   effect(DEF dst, USE src, USE shift);
11640 
11641   format %{ "rol    $dst, $src, $shift" %}
11642   ins_cost(INSN_COST * 3);
11643   ins_encode %{
11644     __ subw(rscratch1, zr, as_Register($shift$$reg));
11645     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11646             rscratch1);
11647     %}
11648   ins_pipe(ialu_reg_reg_vshift);
11649 %}
11650 
11651 // rol expander
11652 
11653 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11654 %{
11655   effect(DEF dst, USE src, USE shift);
11656 
11657   format %{ "rol    $dst, $src, $shift" %}
11658   ins_cost(INSN_COST * 3);
11659   ins_encode %{
11660     __ subw(rscratch1, zr, as_Register($shift$$reg));
11661     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11662             rscratch1);
11663     %}
11664   ins_pipe(ialu_reg_reg_vshift);
11665 %}
11666 
11667 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11668 %{
11669   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11670 
11671   expand %{
11672     rolL_rReg(dst, src, shift, cr);
11673   %}
11674 %}
11675 
11676 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11677 %{
11678   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11679 
11680   expand %{
11681     rolL_rReg(dst, src, shift, cr);
11682   %}
11683 %}
11684 
11685 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11686 %{
11687   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11688 
11689   expand %{
11690     rolL_rReg(dst, src, shift, cr);
11691   %}
11692 %}
11693 
11694 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11695 %{
11696   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11697 
11698   expand %{
11699     rolL_rReg(dst, src, shift, cr);
11700   %}
11701 %}
11702 
11703 // ror expander
11704 
11705 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11706 %{
11707   effect(DEF dst, USE src, USE shift);
11708 
11709   format %{ "ror    $dst, $src, $shift" %}
11710   ins_cost(INSN_COST);
11711   ins_encode %{
11712     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11713             as_Register($shift$$reg));
11714     %}
11715   ins_pipe(ialu_reg_reg_vshift);
11716 %}
11717 
11718 // ror expander
11719 
11720 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11721 %{
11722   effect(DEF dst, USE src, USE shift);
11723 
11724   format %{ "ror    $dst, $src, $shift" %}
11725   ins_cost(INSN_COST);
11726   ins_encode %{
11727     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11728             as_Register($shift$$reg));
11729     %}
11730   ins_pipe(ialu_reg_reg_vshift);
11731 %}
11732 
11733 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11734 %{
11735   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11736 
11737   expand %{
11738     rorL_rReg(dst, src, shift, cr);
11739   %}
11740 %}
11741 
11742 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11743 %{
11744   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11745 
11746   expand %{
11747     rorL_rReg(dst, src, shift, cr);
11748   %}
11749 %}
11750 
11751 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11752 %{
11753   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11754 
11755   expand %{
11756     rorL_rReg(dst, src, shift, cr);
11757   %}
11758 %}
11759 
11760 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11761 %{
11762   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11763 
11764   expand %{
11765     rorL_rReg(dst, src, shift, cr);
11766   %}
11767 %}
11768 
11769 // Add/subtract (extended)
11770 
11771 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11772 %{
11773   match(Set dst (AddL src1 (ConvI2L src2)));
11774   ins_cost(INSN_COST);
11775   format %{ "add  $dst, $src1, sxtw $src2" %}
11776 
11777    ins_encode %{
11778      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11779             as_Register($src2$$reg), ext::sxtw);
11780    %}
11781   ins_pipe(ialu_reg_reg);
11782 %};
11783 
11784 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11785 %{
11786   match(Set dst (SubL src1 (ConvI2L src2)));
11787   ins_cost(INSN_COST);
11788   format %{ "sub  $dst, $src1, sxtw $src2" %}
11789 
11790    ins_encode %{
11791      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11792             as_Register($src2$$reg), ext::sxtw);
11793    %}
11794   ins_pipe(ialu_reg_reg);
11795 %};
11796 
11797 
11798 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11799 %{
11800   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11801   ins_cost(INSN_COST);
11802   format %{ "add  $dst, $src1, sxth $src2" %}
11803 
11804    ins_encode %{
11805      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11806             as_Register($src2$$reg), ext::sxth);
11807    %}
11808   ins_pipe(ialu_reg_reg);
11809 %}
11810 
11811 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11812 %{
11813   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11814   ins_cost(INSN_COST);
11815   format %{ "add  $dst, $src1, sxtb $src2" %}
11816 
11817    ins_encode %{
11818      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11819             as_Register($src2$$reg), ext::sxtb);
11820    %}
11821   ins_pipe(ialu_reg_reg);
11822 %}
11823 
11824 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11825 %{
11826   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11827   ins_cost(INSN_COST);
11828   format %{ "add  $dst, $src1, uxtb $src2" %}
11829 
11830    ins_encode %{
11831      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11832             as_Register($src2$$reg), ext::uxtb);
11833    %}
11834   ins_pipe(ialu_reg_reg);
11835 %}
11836 
11837 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11838 %{
11839   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11840   ins_cost(INSN_COST);
11841   format %{ "add  $dst, $src1, sxth $src2" %}
11842 
11843    ins_encode %{
11844      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11845             as_Register($src2$$reg), ext::sxth);
11846    %}
11847   ins_pipe(ialu_reg_reg);
11848 %}
11849 
11850 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11851 %{
11852   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11853   ins_cost(INSN_COST);
11854   format %{ "add  $dst, $src1, sxtw $src2" %}
11855 
11856    ins_encode %{
11857      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11858             as_Register($src2$$reg), ext::sxtw);
11859    %}
11860   ins_pipe(ialu_reg_reg);
11861 %}
11862 
11863 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11864 %{
11865   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11866   ins_cost(INSN_COST);
11867   format %{ "add  $dst, $src1, sxtb $src2" %}
11868 
11869    ins_encode %{
11870      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11871             as_Register($src2$$reg), ext::sxtb);
11872    %}
11873   ins_pipe(ialu_reg_reg);
11874 %}
11875 
11876 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11877 %{
11878   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11879   ins_cost(INSN_COST);
11880   format %{ "add  $dst, $src1, uxtb $src2" %}
11881 
11882    ins_encode %{
11883      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11884             as_Register($src2$$reg), ext::uxtb);
11885    %}
11886   ins_pipe(ialu_reg_reg);
11887 %}
11888 
11889 
11890 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11891 %{
11892   match(Set dst (AddI src1 (AndI src2 mask)));
11893   ins_cost(INSN_COST);
11894   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11895 
11896    ins_encode %{
11897      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11898             as_Register($src2$$reg), ext::uxtb);
11899    %}
11900   ins_pipe(ialu_reg_reg);
11901 %}
11902 
11903 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11904 %{
11905   match(Set dst (AddI src1 (AndI src2 mask)));
11906   ins_cost(INSN_COST);
11907   format %{ "addw  $dst, $src1, $src2, uxth" %}
11908 
11909    ins_encode %{
11910      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11911             as_Register($src2$$reg), ext::uxth);
11912    %}
11913   ins_pipe(ialu_reg_reg);
11914 %}
11915 
11916 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11917 %{
11918   match(Set dst (AddL src1 (AndL src2 mask)));
11919   ins_cost(INSN_COST);
11920   format %{ "add  $dst, $src1, $src2, uxtb" %}
11921 
11922    ins_encode %{
11923      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11924             as_Register($src2$$reg), ext::uxtb);
11925    %}
11926   ins_pipe(ialu_reg_reg);
11927 %}
11928 
11929 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11930 %{
11931   match(Set dst (AddL src1 (AndL src2 mask)));
11932   ins_cost(INSN_COST);
11933   format %{ "add  $dst, $src1, $src2, uxth" %}
11934 
11935    ins_encode %{
11936      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11937             as_Register($src2$$reg), ext::uxth);
11938    %}
11939   ins_pipe(ialu_reg_reg);
11940 %}
11941 
11942 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11943 %{
11944   match(Set dst (AddL src1 (AndL src2 mask)));
11945   ins_cost(INSN_COST);
11946   format %{ "add  $dst, $src1, $src2, uxtw" %}
11947 
11948    ins_encode %{
11949      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11950             as_Register($src2$$reg), ext::uxtw);
11951    %}
11952   ins_pipe(ialu_reg_reg);
11953 %}
11954 
11955 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11956 %{
11957   match(Set dst (SubI src1 (AndI src2 mask)));
11958   ins_cost(INSN_COST);
11959   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11960 
11961    ins_encode %{
11962      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11963             as_Register($src2$$reg), ext::uxtb);
11964    %}
11965   ins_pipe(ialu_reg_reg);
11966 %}
11967 
11968 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11969 %{
11970   match(Set dst (SubI src1 (AndI src2 mask)));
11971   ins_cost(INSN_COST);
11972   format %{ "subw  $dst, $src1, $src2, uxth" %}
11973 
11974    ins_encode %{
11975      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11976             as_Register($src2$$reg), ext::uxth);
11977    %}
11978   ins_pipe(ialu_reg_reg);
11979 %}
11980 
11981 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11982 %{
11983   match(Set dst (SubL src1 (AndL src2 mask)));
11984   ins_cost(INSN_COST);
11985   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11986 
11987    ins_encode %{
11988      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11989             as_Register($src2$$reg), ext::uxtb);
11990    %}
11991   ins_pipe(ialu_reg_reg);
11992 %}
11993 
11994 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11995 %{
11996   match(Set dst (SubL src1 (AndL src2 mask)));
11997   ins_cost(INSN_COST);
11998   format %{ "sub  $dst, $src1, $src2, uxth" %}
11999 
12000    ins_encode %{
12001      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12002             as_Register($src2$$reg), ext::uxth);
12003    %}
12004   ins_pipe(ialu_reg_reg);
12005 %}
12006 
12007 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12008 %{
12009   match(Set dst (SubL src1 (AndL src2 mask)));
12010   ins_cost(INSN_COST);
12011   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12012 
12013    ins_encode %{
12014      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12015             as_Register($src2$$reg), ext::uxtw);
12016    %}
12017   ins_pipe(ialu_reg_reg);
12018 %}
12019 
12020 // END This section of the file is automatically generated. Do not edit --------------
12021 
12022 // ============================================================================
12023 // Floating Point Arithmetic Instructions
12024 
12025 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12026   match(Set dst (AddF src1 src2));
12027 
12028   ins_cost(INSN_COST * 5);
12029   format %{ "fadds   $dst, $src1, $src2" %}
12030 
12031   ins_encode %{
12032     __ fadds(as_FloatRegister($dst$$reg),
12033              as_FloatRegister($src1$$reg),
12034              as_FloatRegister($src2$$reg));
12035   %}
12036 
12037   ins_pipe(pipe_class_default);
12038 %}
12039 
12040 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12041   match(Set dst (AddD src1 src2));
12042 
12043   ins_cost(INSN_COST * 5);
12044   format %{ "faddd   $dst, $src1, $src2" %}
12045 
12046   ins_encode %{
12047     __ faddd(as_FloatRegister($dst$$reg),
12048              as_FloatRegister($src1$$reg),
12049              as_FloatRegister($src2$$reg));
12050   %}
12051 
12052   ins_pipe(pipe_class_default);
12053 %}
12054 
12055 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12056   match(Set dst (SubF src1 src2));
12057 
12058   ins_cost(INSN_COST * 5);
12059   format %{ "fsubs   $dst, $src1, $src2" %}
12060 
12061   ins_encode %{
12062     __ fsubs(as_FloatRegister($dst$$reg),
12063              as_FloatRegister($src1$$reg),
12064              as_FloatRegister($src2$$reg));
12065   %}
12066 
12067   ins_pipe(pipe_class_default);
12068 %}
12069 
12070 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12071   match(Set dst (SubD src1 src2));
12072 
12073   ins_cost(INSN_COST * 5);
12074   format %{ "fsubd   $dst, $src1, $src2" %}
12075 
12076   ins_encode %{
12077     __ fsubd(as_FloatRegister($dst$$reg),
12078              as_FloatRegister($src1$$reg),
12079              as_FloatRegister($src2$$reg));
12080   %}
12081 
12082   ins_pipe(pipe_class_default);
12083 %}
12084 
12085 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12086   match(Set dst (MulF src1 src2));
12087 
12088   ins_cost(INSN_COST * 6);
12089   format %{ "fmuls   $dst, $src1, $src2" %}
12090 
12091   ins_encode %{
12092     __ fmuls(as_FloatRegister($dst$$reg),
12093              as_FloatRegister($src1$$reg),
12094              as_FloatRegister($src2$$reg));
12095   %}
12096 
12097   ins_pipe(pipe_class_default);
12098 %}
12099 
12100 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12101   match(Set dst (MulD src1 src2));
12102 
12103   ins_cost(INSN_COST * 6);
12104   format %{ "fmuld   $dst, $src1, $src2" %}
12105 
12106   ins_encode %{
12107     __ fmuld(as_FloatRegister($dst$$reg),
12108              as_FloatRegister($src1$$reg),
12109              as_FloatRegister($src2$$reg));
12110   %}
12111 
12112   ins_pipe(pipe_class_default);
12113 %}
12114 
12115 // We cannot use these fused mul w add/sub ops because they don't
12116 // produce the same result as the equivalent separated ops
12117 // (essentially they don't round the intermediate result). that's a
12118 // shame. leaving them here in case we can idenitfy cases where it is
12119 // legitimate to use them
12120 
12121 
12122 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12123 //   match(Set dst (AddF (MulF src1 src2) src3));
12124 
12125 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12126 
12127 //   ins_encode %{
12128 //     __ fmadds(as_FloatRegister($dst$$reg),
12129 //              as_FloatRegister($src1$$reg),
12130 //              as_FloatRegister($src2$$reg),
12131 //              as_FloatRegister($src3$$reg));
12132 //   %}
12133 
12134 //   ins_pipe(pipe_class_default);
12135 // %}
12136 
12137 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12138 //   match(Set dst (AddD (MulD src1 src2) src3));
12139 
12140 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12141 
12142 //   ins_encode %{
12143 //     __ fmaddd(as_FloatRegister($dst$$reg),
12144 //              as_FloatRegister($src1$$reg),
12145 //              as_FloatRegister($src2$$reg),
12146 //              as_FloatRegister($src3$$reg));
12147 //   %}
12148 
12149 //   ins_pipe(pipe_class_default);
12150 // %}
12151 
12152 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12153 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12154 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12155 
12156 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12157 
12158 //   ins_encode %{
12159 //     __ fmsubs(as_FloatRegister($dst$$reg),
12160 //               as_FloatRegister($src1$$reg),
12161 //               as_FloatRegister($src2$$reg),
12162 //              as_FloatRegister($src3$$reg));
12163 //   %}
12164 
12165 //   ins_pipe(pipe_class_default);
12166 // %}
12167 
12168 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12169 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12170 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12171 
12172 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12173 
12174 //   ins_encode %{
12175 //     __ fmsubd(as_FloatRegister($dst$$reg),
12176 //               as_FloatRegister($src1$$reg),
12177 //               as_FloatRegister($src2$$reg),
12178 //               as_FloatRegister($src3$$reg));
12179 //   %}
12180 
12181 //   ins_pipe(pipe_class_default);
12182 // %}
12183 
12184 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12185 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12186 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12187 
12188 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12189 
12190 //   ins_encode %{
12191 //     __ fnmadds(as_FloatRegister($dst$$reg),
12192 //                as_FloatRegister($src1$$reg),
12193 //                as_FloatRegister($src2$$reg),
12194 //                as_FloatRegister($src3$$reg));
12195 //   %}
12196 
12197 //   ins_pipe(pipe_class_default);
12198 // %}
12199 
12200 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12201 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12202 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12203 
12204 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12205 
12206 //   ins_encode %{
12207 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12208 //                as_FloatRegister($src1$$reg),
12209 //                as_FloatRegister($src2$$reg),
12210 //                as_FloatRegister($src3$$reg));
12211 //   %}
12212 
12213 //   ins_pipe(pipe_class_default);
12214 // %}
12215 
12216 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12217 //   match(Set dst (SubF (MulF src1 src2) src3));
12218 
12219 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12220 
12221 //   ins_encode %{
12222 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12223 //                as_FloatRegister($src1$$reg),
12224 //                as_FloatRegister($src2$$reg),
12225 //                as_FloatRegister($src3$$reg));
12226 //   %}
12227 
12228 //   ins_pipe(pipe_class_default);
12229 // %}
12230 
12231 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12232 //   match(Set dst (SubD (MulD src1 src2) src3));
12233 
12234 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12235 
12236 //   ins_encode %{
12237 //   // n.b. insn name should be fnmsubd
12238 //     __ fnmsub(as_FloatRegister($dst$$reg),
12239 //                as_FloatRegister($src1$$reg),
12240 //                as_FloatRegister($src2$$reg),
12241 //                as_FloatRegister($src3$$reg));
12242 //   %}
12243 
12244 //   ins_pipe(pipe_class_default);
12245 // %}
12246 
12247 
12248 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12249   match(Set dst (DivF src1  src2));
12250 
12251   ins_cost(INSN_COST * 18);
12252   format %{ "fdivs   $dst, $src1, $src2" %}
12253 
12254   ins_encode %{
12255     __ fdivs(as_FloatRegister($dst$$reg),
12256              as_FloatRegister($src1$$reg),
12257              as_FloatRegister($src2$$reg));
12258   %}
12259 
12260   ins_pipe(pipe_class_default);
12261 %}
12262 
12263 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12264   match(Set dst (DivD src1  src2));
12265 
12266   ins_cost(INSN_COST * 32);
12267   format %{ "fdivd   $dst, $src1, $src2" %}
12268 
12269   ins_encode %{
12270     __ fdivd(as_FloatRegister($dst$$reg),
12271              as_FloatRegister($src1$$reg),
12272              as_FloatRegister($src2$$reg));
12273   %}
12274 
12275   ins_pipe(pipe_class_default);
12276 %}
12277 
12278 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12279   match(Set dst (NegF src));
12280 
12281   ins_cost(INSN_COST * 3);
12282   format %{ "fneg   $dst, $src" %}
12283 
12284   ins_encode %{
12285     __ fnegs(as_FloatRegister($dst$$reg),
12286              as_FloatRegister($src$$reg));
12287   %}
12288 
12289   ins_pipe(pipe_class_default);
12290 %}
12291 
12292 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12293   match(Set dst (NegD src));
12294 
12295   ins_cost(INSN_COST * 3);
12296   format %{ "fnegd   $dst, $src" %}
12297 
12298   ins_encode %{
12299     __ fnegd(as_FloatRegister($dst$$reg),
12300              as_FloatRegister($src$$reg));
12301   %}
12302 
12303   ins_pipe(pipe_class_default);
12304 %}
12305 
12306 instruct absF_reg(vRegF dst, vRegF src) %{
12307   match(Set dst (AbsF src));
12308 
12309   ins_cost(INSN_COST * 3);
12310   format %{ "fabss   $dst, $src" %}
12311   ins_encode %{
12312     __ fabss(as_FloatRegister($dst$$reg),
12313              as_FloatRegister($src$$reg));
12314   %}
12315 
12316   ins_pipe(pipe_class_default);
12317 %}
12318 
12319 instruct absD_reg(vRegD dst, vRegD src) %{
12320   match(Set dst (AbsD src));
12321 
12322   ins_cost(INSN_COST * 3);
12323   format %{ "fabsd   $dst, $src" %}
12324   ins_encode %{
12325     __ fabsd(as_FloatRegister($dst$$reg),
12326              as_FloatRegister($src$$reg));
12327   %}
12328 
12329   ins_pipe(pipe_class_default);
12330 %}
12331 
12332 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12333   match(Set dst (SqrtD src));
12334 
12335   ins_cost(INSN_COST * 50);
12336   format %{ "fsqrtd  $dst, $src" %}
12337   ins_encode %{
12338     __ fsqrtd(as_FloatRegister($dst$$reg),
12339              as_FloatRegister($src$$reg));
12340   %}
12341 
12342   ins_pipe(pipe_class_default);
12343 %}
12344 
12345 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12346   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12347 
12348   ins_cost(INSN_COST * 50);
12349   format %{ "fsqrts  $dst, $src" %}
12350   ins_encode %{
12351     __ fsqrts(as_FloatRegister($dst$$reg),
12352              as_FloatRegister($src$$reg));
12353   %}
12354 
12355   ins_pipe(pipe_class_default);
12356 %}
12357 
12358 // ============================================================================
12359 // Logical Instructions
12360 
12361 // Integer Logical Instructions
12362 
12363 // And Instructions
12364 
12365 
12366 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12367   match(Set dst (AndI src1 src2));
12368 
12369   format %{ "andw  $dst, $src1, $src2\t# int" %}
12370 
12371   ins_cost(INSN_COST);
12372   ins_encode %{
12373     __ andw(as_Register($dst$$reg),
12374             as_Register($src1$$reg),
12375             as_Register($src2$$reg));
12376   %}
12377 
12378   ins_pipe(ialu_reg_reg);
12379 %}
12380 
12381 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12382   match(Set dst (AndI src1 src2));
12383 
12384   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12385 
12386   ins_cost(INSN_COST);
12387   ins_encode %{
12388     __ andw(as_Register($dst$$reg),
12389             as_Register($src1$$reg),
12390             (unsigned long)($src2$$constant));
12391   %}
12392 
12393   ins_pipe(ialu_reg_imm);
12394 %}
12395 
12396 // Or Instructions
12397 
12398 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12399   match(Set dst (OrI src1 src2));
12400 
12401   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12402 
12403   ins_cost(INSN_COST);
12404   ins_encode %{
12405     __ orrw(as_Register($dst$$reg),
12406             as_Register($src1$$reg),
12407             as_Register($src2$$reg));
12408   %}
12409 
12410   ins_pipe(ialu_reg_reg);
12411 %}
12412 
12413 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12414   match(Set dst (OrI src1 src2));
12415 
12416   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12417 
12418   ins_cost(INSN_COST);
12419   ins_encode %{
12420     __ orrw(as_Register($dst$$reg),
12421             as_Register($src1$$reg),
12422             (unsigned long)($src2$$constant));
12423   %}
12424 
12425   ins_pipe(ialu_reg_imm);
12426 %}
12427 
12428 // Xor Instructions
12429 
12430 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12431   match(Set dst (XorI src1 src2));
12432 
12433   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12434 
12435   ins_cost(INSN_COST);
12436   ins_encode %{
12437     __ eorw(as_Register($dst$$reg),
12438             as_Register($src1$$reg),
12439             as_Register($src2$$reg));
12440   %}
12441 
12442   ins_pipe(ialu_reg_reg);
12443 %}
12444 
12445 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12446   match(Set dst (XorI src1 src2));
12447 
12448   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12449 
12450   ins_cost(INSN_COST);
12451   ins_encode %{
12452     __ eorw(as_Register($dst$$reg),
12453             as_Register($src1$$reg),
12454             (unsigned long)($src2$$constant));
12455   %}
12456 
12457   ins_pipe(ialu_reg_imm);
12458 %}
12459 
12460 // Long Logical Instructions
12461 // TODO
12462 
12463 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12464   match(Set dst (AndL src1 src2));
12465 
12466   format %{ "and  $dst, $src1, $src2\t# int" %}
12467 
12468   ins_cost(INSN_COST);
12469   ins_encode %{
12470     __ andr(as_Register($dst$$reg),
12471             as_Register($src1$$reg),
12472             as_Register($src2$$reg));
12473   %}
12474 
12475   ins_pipe(ialu_reg_reg);
12476 %}
12477 
12478 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12479   match(Set dst (AndL src1 src2));
12480 
12481   format %{ "and  $dst, $src1, $src2\t# int" %}
12482 
12483   ins_cost(INSN_COST);
12484   ins_encode %{
12485     __ andr(as_Register($dst$$reg),
12486             as_Register($src1$$reg),
12487             (unsigned long)($src2$$constant));
12488   %}
12489 
12490   ins_pipe(ialu_reg_imm);
12491 %}
12492 
12493 // Or Instructions
12494 
12495 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12496   match(Set dst (OrL src1 src2));
12497 
12498   format %{ "orr  $dst, $src1, $src2\t# int" %}
12499 
12500   ins_cost(INSN_COST);
12501   ins_encode %{
12502     __ orr(as_Register($dst$$reg),
12503            as_Register($src1$$reg),
12504            as_Register($src2$$reg));
12505   %}
12506 
12507   ins_pipe(ialu_reg_reg);
12508 %}
12509 
12510 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12511   match(Set dst (OrL src1 src2));
12512 
12513   format %{ "orr  $dst, $src1, $src2\t# int" %}
12514 
12515   ins_cost(INSN_COST);
12516   ins_encode %{
12517     __ orr(as_Register($dst$$reg),
12518            as_Register($src1$$reg),
12519            (unsigned long)($src2$$constant));
12520   %}
12521 
12522   ins_pipe(ialu_reg_imm);
12523 %}
12524 
12525 // Xor Instructions
12526 
12527 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12528   match(Set dst (XorL src1 src2));
12529 
12530   format %{ "eor  $dst, $src1, $src2\t# int" %}
12531 
12532   ins_cost(INSN_COST);
12533   ins_encode %{
12534     __ eor(as_Register($dst$$reg),
12535            as_Register($src1$$reg),
12536            as_Register($src2$$reg));
12537   %}
12538 
12539   ins_pipe(ialu_reg_reg);
12540 %}
12541 
12542 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12543   match(Set dst (XorL src1 src2));
12544 
12545   ins_cost(INSN_COST);
12546   format %{ "eor  $dst, $src1, $src2\t# int" %}
12547 
12548   ins_encode %{
12549     __ eor(as_Register($dst$$reg),
12550            as_Register($src1$$reg),
12551            (unsigned long)($src2$$constant));
12552   %}
12553 
12554   ins_pipe(ialu_reg_imm);
12555 %}
12556 
12557 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12558 %{
12559   match(Set dst (ConvI2L src));
12560 
12561   ins_cost(INSN_COST);
12562   format %{ "sxtw  $dst, $src\t# i2l" %}
12563   ins_encode %{
12564     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12565   %}
12566   ins_pipe(ialu_reg_shift);
12567 %}
12568 
12569 // this pattern occurs in bigmath arithmetic
12570 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12571 %{
12572   match(Set dst (AndL (ConvI2L src) mask));
12573 
12574   ins_cost(INSN_COST);
12575   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12576   ins_encode %{
12577     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12578   %}
12579 
12580   ins_pipe(ialu_reg_shift);
12581 %}
12582 
12583 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12584   match(Set dst (ConvL2I src));
12585 
12586   ins_cost(INSN_COST);
12587   format %{ "movw  $dst, $src \t// l2i" %}
12588 
12589   ins_encode %{
12590     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12591   %}
12592 
12593   ins_pipe(ialu_reg);
12594 %}
12595 
12596 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12597 %{
12598   match(Set dst (Conv2B src));
12599   effect(KILL cr);
12600 
12601   format %{
12602     "cmpw $src, zr\n\t"
12603     "cset $dst, ne"
12604   %}
12605 
12606   ins_encode %{
12607     __ cmpw(as_Register($src$$reg), zr);
12608     __ cset(as_Register($dst$$reg), Assembler::NE);
12609   %}
12610 
12611   ins_pipe(ialu_reg);
12612 %}
12613 
12614 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12615 %{
12616   match(Set dst (Conv2B src));
12617   effect(KILL cr);
12618 
12619   format %{
12620     "cmp  $src, zr\n\t"
12621     "cset $dst, ne"
12622   %}
12623 
12624   ins_encode %{
12625     __ cmp(as_Register($src$$reg), zr);
12626     __ cset(as_Register($dst$$reg), Assembler::NE);
12627   %}
12628 
12629   ins_pipe(ialu_reg);
12630 %}
12631 
12632 instruct convD2F_reg(vRegF dst, vRegD src) %{
12633   match(Set dst (ConvD2F src));
12634 
12635   ins_cost(INSN_COST * 5);
12636   format %{ "fcvtd  $dst, $src \t// d2f" %}
12637 
12638   ins_encode %{
12639     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12640   %}
12641 
12642   ins_pipe(pipe_class_default);
12643 %}
12644 
12645 instruct convF2D_reg(vRegD dst, vRegF src) %{
12646   match(Set dst (ConvF2D src));
12647 
12648   ins_cost(INSN_COST * 5);
12649   format %{ "fcvts  $dst, $src \t// f2d" %}
12650 
12651   ins_encode %{
12652     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12653   %}
12654 
12655   ins_pipe(pipe_class_default);
12656 %}
12657 
12658 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12659   match(Set dst (ConvF2I src));
12660 
12661   ins_cost(INSN_COST * 5);
12662   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12663 
12664   ins_encode %{
12665     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12666   %}
12667 
12668   ins_pipe(pipe_class_default);
12669 %}
12670 
12671 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12672   match(Set dst (ConvF2L src));
12673 
12674   ins_cost(INSN_COST * 5);
12675   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12676 
12677   ins_encode %{
12678     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12679   %}
12680 
12681   ins_pipe(pipe_class_default);
12682 %}
12683 
12684 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12685   match(Set dst (ConvI2F src));
12686 
12687   ins_cost(INSN_COST * 5);
12688   format %{ "scvtfws  $dst, $src \t// i2f" %}
12689 
12690   ins_encode %{
12691     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12692   %}
12693 
12694   ins_pipe(pipe_class_default);
12695 %}
12696 
12697 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12698   match(Set dst (ConvL2F src));
12699 
12700   ins_cost(INSN_COST * 5);
12701   format %{ "scvtfs  $dst, $src \t// l2f" %}
12702 
12703   ins_encode %{
12704     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12705   %}
12706 
12707   ins_pipe(pipe_class_default);
12708 %}
12709 
12710 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12711   match(Set dst (ConvD2I src));
12712 
12713   ins_cost(INSN_COST * 5);
12714   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12715 
12716   ins_encode %{
12717     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12718   %}
12719 
12720   ins_pipe(pipe_class_default);
12721 %}
12722 
12723 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12724   match(Set dst (ConvD2L src));
12725 
12726   ins_cost(INSN_COST * 5);
12727   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12728 
12729   ins_encode %{
12730     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12731   %}
12732 
12733   ins_pipe(pipe_class_default);
12734 %}
12735 
12736 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12737   match(Set dst (ConvI2D src));
12738 
12739   ins_cost(INSN_COST * 5);
12740   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12741 
12742   ins_encode %{
12743     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12744   %}
12745 
12746   ins_pipe(pipe_class_default);
12747 %}
12748 
12749 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12750   match(Set dst (ConvL2D src));
12751 
12752   ins_cost(INSN_COST * 5);
12753   format %{ "scvtfd  $dst, $src \t// l2d" %}
12754 
12755   ins_encode %{
12756     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12757   %}
12758 
12759   ins_pipe(pipe_class_default);
12760 %}
12761 
12762 // stack <-> reg and reg <-> reg shuffles with no conversion
12763 
12764 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12765 
12766   match(Set dst (MoveF2I src));
12767 
12768   effect(DEF dst, USE src);
12769 
12770   ins_cost(4 * INSN_COST);
12771 
12772   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12773 
12774   ins_encode %{
12775     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12776   %}
12777 
12778   ins_pipe(iload_reg_reg);
12779 
12780 %}
12781 
12782 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12783 
12784   match(Set dst (MoveI2F src));
12785 
12786   effect(DEF dst, USE src);
12787 
12788   ins_cost(4 * INSN_COST);
12789 
12790   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12791 
12792   ins_encode %{
12793     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12794   %}
12795 
12796   ins_pipe(pipe_class_memory);
12797 
12798 %}
12799 
12800 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12801 
12802   match(Set dst (MoveD2L src));
12803 
12804   effect(DEF dst, USE src);
12805 
12806   ins_cost(4 * INSN_COST);
12807 
12808   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12809 
12810   ins_encode %{
12811     __ ldr($dst$$Register, Address(sp, $src$$disp));
12812   %}
12813 
12814   ins_pipe(iload_reg_reg);
12815 
12816 %}
12817 
12818 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12819 
12820   match(Set dst (MoveL2D src));
12821 
12822   effect(DEF dst, USE src);
12823 
12824   ins_cost(4 * INSN_COST);
12825 
12826   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12827 
12828   ins_encode %{
12829     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12830   %}
12831 
12832   ins_pipe(pipe_class_memory);
12833 
12834 %}
12835 
12836 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12837 
12838   match(Set dst (MoveF2I src));
12839 
12840   effect(DEF dst, USE src);
12841 
12842   ins_cost(INSN_COST);
12843 
12844   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12845 
12846   ins_encode %{
12847     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12848   %}
12849 
12850   ins_pipe(pipe_class_memory);
12851 
12852 %}
12853 
12854 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12855 
12856   match(Set dst (MoveI2F src));
12857 
12858   effect(DEF dst, USE src);
12859 
12860   ins_cost(INSN_COST);
12861 
12862   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12863 
12864   ins_encode %{
12865     __ strw($src$$Register, Address(sp, $dst$$disp));
12866   %}
12867 
12868   ins_pipe(istore_reg_reg);
12869 
12870 %}
12871 
12872 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12873 
12874   match(Set dst (MoveD2L src));
12875 
12876   effect(DEF dst, USE src);
12877 
12878   ins_cost(INSN_COST);
12879 
12880   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12881 
12882   ins_encode %{
12883     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12884   %}
12885 
12886   ins_pipe(pipe_class_memory);
12887 
12888 %}
12889 
12890 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12891 
12892   match(Set dst (MoveL2D src));
12893 
12894   effect(DEF dst, USE src);
12895 
12896   ins_cost(INSN_COST);
12897 
12898   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12899 
12900   ins_encode %{
12901     __ str($src$$Register, Address(sp, $dst$$disp));
12902   %}
12903 
12904   ins_pipe(istore_reg_reg);
12905 
12906 %}
12907 
12908 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12909 
12910   match(Set dst (MoveF2I src));
12911 
12912   effect(DEF dst, USE src);
12913 
12914   ins_cost(INSN_COST);
12915 
12916   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12917 
12918   ins_encode %{
12919     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12920   %}
12921 
12922   ins_pipe(pipe_class_memory);
12923 
12924 %}
12925 
12926 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12927 
12928   match(Set dst (MoveI2F src));
12929 
12930   effect(DEF dst, USE src);
12931 
12932   ins_cost(INSN_COST);
12933 
12934   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12935 
12936   ins_encode %{
12937     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12938   %}
12939 
12940   ins_pipe(pipe_class_memory);
12941 
12942 %}
12943 
12944 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12945 
12946   match(Set dst (MoveD2L src));
12947 
12948   effect(DEF dst, USE src);
12949 
12950   ins_cost(INSN_COST);
12951 
12952   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12953 
12954   ins_encode %{
12955     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12956   %}
12957 
12958   ins_pipe(pipe_class_memory);
12959 
12960 %}
12961 
12962 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12963 
12964   match(Set dst (MoveL2D src));
12965 
12966   effect(DEF dst, USE src);
12967 
12968   ins_cost(INSN_COST);
12969 
12970   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12971 
12972   ins_encode %{
12973     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12974   %}
12975 
12976   ins_pipe(pipe_class_memory);
12977 
12978 %}
12979 
12980 // ============================================================================
12981 // clearing of an array
12982 
12983 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12984 %{
12985   match(Set dummy (ClearArray cnt base));
12986   effect(USE_KILL cnt, USE_KILL base);
12987 
12988   ins_cost(4 * INSN_COST);
12989   format %{ "ClearArray $cnt, $base" %}
12990 
12991   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12992 
12993   ins_pipe(pipe_class_memory);
12994 %}
12995 
12996 // ============================================================================
12997 // Overflow Math Instructions
12998 
12999 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13000 %{
13001   match(Set cr (OverflowAddI op1 op2));
13002 
13003   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13004   ins_cost(INSN_COST);
13005   ins_encode %{
13006     __ cmnw($op1$$Register, $op2$$Register);
13007   %}
13008 
13009   ins_pipe(icmp_reg_reg);
13010 %}
13011 
13012 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13013 %{
13014   match(Set cr (OverflowAddI op1 op2));
13015 
13016   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13017   ins_cost(INSN_COST);
13018   ins_encode %{
13019     __ cmnw($op1$$Register, $op2$$constant);
13020   %}
13021 
13022   ins_pipe(icmp_reg_imm);
13023 %}
13024 
13025 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13026 %{
13027   match(Set cr (OverflowAddL op1 op2));
13028 
13029   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13030   ins_cost(INSN_COST);
13031   ins_encode %{
13032     __ cmn($op1$$Register, $op2$$Register);
13033   %}
13034 
13035   ins_pipe(icmp_reg_reg);
13036 %}
13037 
13038 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13039 %{
13040   match(Set cr (OverflowAddL op1 op2));
13041 
13042   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13043   ins_cost(INSN_COST);
13044   ins_encode %{
13045     __ cmn($op1$$Register, $op2$$constant);
13046   %}
13047 
13048   ins_pipe(icmp_reg_imm);
13049 %}
13050 
13051 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13052 %{
13053   match(Set cr (OverflowSubI op1 op2));
13054 
13055   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13056   ins_cost(INSN_COST);
13057   ins_encode %{
13058     __ cmpw($op1$$Register, $op2$$Register);
13059   %}
13060 
13061   ins_pipe(icmp_reg_reg);
13062 %}
13063 
13064 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13065 %{
13066   match(Set cr (OverflowSubI op1 op2));
13067 
13068   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13069   ins_cost(INSN_COST);
13070   ins_encode %{
13071     __ cmpw($op1$$Register, $op2$$constant);
13072   %}
13073 
13074   ins_pipe(icmp_reg_imm);
13075 %}
13076 
13077 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13078 %{
13079   match(Set cr (OverflowSubL op1 op2));
13080 
13081   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13082   ins_cost(INSN_COST);
13083   ins_encode %{
13084     __ cmp($op1$$Register, $op2$$Register);
13085   %}
13086 
13087   ins_pipe(icmp_reg_reg);
13088 %}
13089 
13090 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13091 %{
13092   match(Set cr (OverflowSubL op1 op2));
13093 
13094   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13095   ins_cost(INSN_COST);
13096   ins_encode %{
13097     __ cmp($op1$$Register, $op2$$constant);
13098   %}
13099 
13100   ins_pipe(icmp_reg_imm);
13101 %}
13102 
13103 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13104 %{
13105   match(Set cr (OverflowSubI zero op1));
13106 
13107   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13108   ins_cost(INSN_COST);
13109   ins_encode %{
13110     __ cmpw(zr, $op1$$Register);
13111   %}
13112 
13113   ins_pipe(icmp_reg_imm);
13114 %}
13115 
13116 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13117 %{
13118   match(Set cr (OverflowSubL zero op1));
13119 
13120   format %{ "cmp   zr, $op1\t# overflow check long" %}
13121   ins_cost(INSN_COST);
13122   ins_encode %{
13123     __ cmp(zr, $op1$$Register);
13124   %}
13125 
13126   ins_pipe(icmp_reg_imm);
13127 %}
13128 
13129 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13130 %{
13131   match(Set cr (OverflowMulI op1 op2));
13132 
13133   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13134             "cmp   rscratch1, rscratch1, sxtw\n\t"
13135             "movw  rscratch1, #0x80000000\n\t"
13136             "cselw rscratch1, rscratch1, zr, NE\n\t"
13137             "cmpw  rscratch1, #1" %}
13138   ins_cost(5 * INSN_COST);
13139   ins_encode %{
13140     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13141     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13142     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13143     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13144     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13145   %}
13146 
13147   ins_pipe(pipe_slow);
13148 %}
13149 
13150 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13151 %{
13152   match(If cmp (OverflowMulI op1 op2));
13153   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13154             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13155   effect(USE labl, KILL cr);
13156 
13157   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13158             "cmp   rscratch1, rscratch1, sxtw\n\t"
13159             "b$cmp   $labl" %}
13160   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13161   ins_encode %{
13162     Label* L = $labl$$label;
13163     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13164     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13165     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13166     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13167   %}
13168 
13169   ins_pipe(pipe_serial);
13170 %}
13171 
13172 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13173 %{
13174   match(Set cr (OverflowMulL op1 op2));
13175 
13176   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13177             "smulh rscratch2, $op1, $op2\n\t"
13178             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13179             "movw  rscratch1, #0x80000000\n\t"
13180             "cselw rscratch1, rscratch1, zr, NE\n\t"
13181             "cmpw  rscratch1, #1" %}
13182   ins_cost(6 * INSN_COST);
13183   ins_encode %{
13184     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13185     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13186     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13187     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13188     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13189     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13190   %}
13191 
13192   ins_pipe(pipe_slow);
13193 %}
13194 
13195 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13196 %{
13197   match(If cmp (OverflowMulL op1 op2));
13198   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13199             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13200   effect(USE labl, KILL cr);
13201 
13202   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13203             "smulh rscratch2, $op1, $op2\n\t"
13204             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13205             "b$cmp $labl" %}
13206   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13207   ins_encode %{
13208     Label* L = $labl$$label;
13209     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13210     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13211     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13212     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13213     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13214   %}
13215 
13216   ins_pipe(pipe_serial);
13217 %}
13218 
13219 // ============================================================================
13220 // Compare Instructions
13221 
13222 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13223 %{
13224   match(Set cr (CmpI op1 op2));
13225 
13226   effect(DEF cr, USE op1, USE op2);
13227 
13228   ins_cost(INSN_COST);
13229   format %{ "cmpw  $op1, $op2" %}
13230 
13231   ins_encode(aarch64_enc_cmpw(op1, op2));
13232 
13233   ins_pipe(icmp_reg_reg);
13234 %}
13235 
13236 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13237 %{
13238   match(Set cr (CmpI op1 zero));
13239 
13240   effect(DEF cr, USE op1);
13241 
13242   ins_cost(INSN_COST);
13243   format %{ "cmpw $op1, 0" %}
13244 
13245   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13246 
13247   ins_pipe(icmp_reg_imm);
13248 %}
13249 
13250 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13251 %{
13252   match(Set cr (CmpI op1 op2));
13253 
13254   effect(DEF cr, USE op1);
13255 
13256   ins_cost(INSN_COST);
13257   format %{ "cmpw  $op1, $op2" %}
13258 
13259   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13260 
13261   ins_pipe(icmp_reg_imm);
13262 %}
13263 
13264 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13265 %{
13266   match(Set cr (CmpI op1 op2));
13267 
13268   effect(DEF cr, USE op1);
13269 
13270   ins_cost(INSN_COST * 2);
13271   format %{ "cmpw  $op1, $op2" %}
13272 
13273   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13274 
13275   ins_pipe(icmp_reg_imm);
13276 %}
13277 
13278 // Unsigned compare Instructions; really, same as signed compare
13279 // except it should only be used to feed an If or a CMovI which takes a
13280 // cmpOpU.
13281 
13282 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13283 %{
13284   match(Set cr (CmpU op1 op2));
13285 
13286   effect(DEF cr, USE op1, USE op2);
13287 
13288   ins_cost(INSN_COST);
13289   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13290 
13291   ins_encode(aarch64_enc_cmpw(op1, op2));
13292 
13293   ins_pipe(icmp_reg_reg);
13294 %}
13295 
13296 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13297 %{
13298   match(Set cr (CmpU op1 zero));
13299 
13300   effect(DEF cr, USE op1);
13301 
13302   ins_cost(INSN_COST);
13303   format %{ "cmpw $op1, #0\t# unsigned" %}
13304 
13305   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13306 
13307   ins_pipe(icmp_reg_imm);
13308 %}
13309 
13310 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13311 %{
13312   match(Set cr (CmpU op1 op2));
13313 
13314   effect(DEF cr, USE op1);
13315 
13316   ins_cost(INSN_COST);
13317   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13318 
13319   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13320 
13321   ins_pipe(icmp_reg_imm);
13322 %}
13323 
13324 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13325 %{
13326   match(Set cr (CmpU op1 op2));
13327 
13328   effect(DEF cr, USE op1);
13329 
13330   ins_cost(INSN_COST * 2);
13331   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13332 
13333   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13334 
13335   ins_pipe(icmp_reg_imm);
13336 %}
13337 
13338 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13339 %{
13340   match(Set cr (CmpL op1 op2));
13341 
13342   effect(DEF cr, USE op1, USE op2);
13343 
13344   ins_cost(INSN_COST);
13345   format %{ "cmp  $op1, $op2" %}
13346 
13347   ins_encode(aarch64_enc_cmp(op1, op2));
13348 
13349   ins_pipe(icmp_reg_reg);
13350 %}
13351 
13352 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13353 %{
13354   match(Set cr (CmpL op1 zero));
13355 
13356   effect(DEF cr, USE op1);
13357 
13358   ins_cost(INSN_COST);
13359   format %{ "tst  $op1" %}
13360 
13361   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13362 
13363   ins_pipe(icmp_reg_imm);
13364 %}
13365 
13366 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13367 %{
13368   match(Set cr (CmpL op1 op2));
13369 
13370   effect(DEF cr, USE op1);
13371 
13372   ins_cost(INSN_COST);
13373   format %{ "cmp  $op1, $op2" %}
13374 
13375   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13376 
13377   ins_pipe(icmp_reg_imm);
13378 %}
13379 
13380 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13381 %{
13382   match(Set cr (CmpL op1 op2));
13383 
13384   effect(DEF cr, USE op1);
13385 
13386   ins_cost(INSN_COST * 2);
13387   format %{ "cmp  $op1, $op2" %}
13388 
13389   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13390 
13391   ins_pipe(icmp_reg_imm);
13392 %}
13393 
13394 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13395 %{
13396   match(Set cr (CmpP op1 op2));
13397 
13398   effect(DEF cr, USE op1, USE op2);
13399 
13400   ins_cost(INSN_COST);
13401   format %{ "cmp  $op1, $op2\t // ptr" %}
13402 
13403   ins_encode(aarch64_enc_cmpp(op1, op2));
13404 
13405   ins_pipe(icmp_reg_reg);
13406 %}
13407 
13408 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13409 %{
13410   match(Set cr (CmpN op1 op2));
13411 
13412   effect(DEF cr, USE op1, USE op2);
13413 
13414   ins_cost(INSN_COST);
13415   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13416 
13417   ins_encode(aarch64_enc_cmpn(op1, op2));
13418 
13419   ins_pipe(icmp_reg_reg);
13420 %}
13421 
13422 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13423 %{
13424   match(Set cr (CmpP op1 zero));
13425 
13426   effect(DEF cr, USE op1, USE zero);
13427 
13428   ins_cost(INSN_COST);
13429   format %{ "cmp  $op1, 0\t // ptr" %}
13430 
13431   ins_encode(aarch64_enc_testp(op1));
13432 
13433   ins_pipe(icmp_reg_imm);
13434 %}
13435 
13436 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13437 %{
13438   match(Set cr (CmpN op1 zero));
13439 
13440   effect(DEF cr, USE op1, USE zero);
13441 
13442   ins_cost(INSN_COST);
13443   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13444 
13445   ins_encode(aarch64_enc_testn(op1));
13446 
13447   ins_pipe(icmp_reg_imm);
13448 %}
13449 
13450 // FP comparisons
13451 //
13452 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13453 // using normal cmpOp. See declaration of rFlagsReg for details.
13454 
13455 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13456 %{
13457   match(Set cr (CmpF src1 src2));
13458 
13459   ins_cost(3 * INSN_COST);
13460   format %{ "fcmps $src1, $src2" %}
13461 
13462   ins_encode %{
13463     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13464   %}
13465 
13466   ins_pipe(pipe_class_compare);
13467 %}
13468 
13469 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13470 %{
13471   match(Set cr (CmpF src1 src2));
13472 
13473   ins_cost(3 * INSN_COST);
13474   format %{ "fcmps $src1, 0.0" %}
13475 
13476   ins_encode %{
13477     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13478   %}
13479 
13480   ins_pipe(pipe_class_compare);
13481 %}
13482 // FROM HERE
13483 
13484 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13485 %{
13486   match(Set cr (CmpD src1 src2));
13487 
13488   ins_cost(3 * INSN_COST);
13489   format %{ "fcmpd $src1, $src2" %}
13490 
13491   ins_encode %{
13492     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13493   %}
13494 
13495   ins_pipe(pipe_class_compare);
13496 %}
13497 
13498 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13499 %{
13500   match(Set cr (CmpD src1 src2));
13501 
13502   ins_cost(3 * INSN_COST);
13503   format %{ "fcmpd $src1, 0.0" %}
13504 
13505   ins_encode %{
13506     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13507   %}
13508 
13509   ins_pipe(pipe_class_compare);
13510 %}
13511 
13512 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13513 %{
13514   match(Set dst (CmpF3 src1 src2));
13515   effect(KILL cr);
13516 
13517   ins_cost(5 * INSN_COST);
13518   format %{ "fcmps $src1, $src2\n\t"
13519             "csinvw($dst, zr, zr, eq\n\t"
13520             "csnegw($dst, $dst, $dst, lt)"
13521   %}
13522 
13523   ins_encode %{
13524     Label done;
13525     FloatRegister s1 = as_FloatRegister($src1$$reg);
13526     FloatRegister s2 = as_FloatRegister($src2$$reg);
13527     Register d = as_Register($dst$$reg);
13528     __ fcmps(s1, s2);
13529     // installs 0 if EQ else -1
13530     __ csinvw(d, zr, zr, Assembler::EQ);
13531     // keeps -1 if less or unordered else installs 1
13532     __ csnegw(d, d, d, Assembler::LT);
13533     __ bind(done);
13534   %}
13535 
13536   ins_pipe(pipe_class_default);
13537 
13538 %}
13539 
13540 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13541 %{
13542   match(Set dst (CmpD3 src1 src2));
13543   effect(KILL cr);
13544 
13545   ins_cost(5 * INSN_COST);
13546   format %{ "fcmpd $src1, $src2\n\t"
13547             "csinvw($dst, zr, zr, eq\n\t"
13548             "csnegw($dst, $dst, $dst, lt)"
13549   %}
13550 
13551   ins_encode %{
13552     Label done;
13553     FloatRegister s1 = as_FloatRegister($src1$$reg);
13554     FloatRegister s2 = as_FloatRegister($src2$$reg);
13555     Register d = as_Register($dst$$reg);
13556     __ fcmpd(s1, s2);
13557     // installs 0 if EQ else -1
13558     __ csinvw(d, zr, zr, Assembler::EQ);
13559     // keeps -1 if less or unordered else installs 1
13560     __ csnegw(d, d, d, Assembler::LT);
13561     __ bind(done);
13562   %}
13563   ins_pipe(pipe_class_default);
13564 
13565 %}
13566 
13567 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13568 %{
13569   match(Set dst (CmpF3 src1 zero));
13570   effect(KILL cr);
13571 
13572   ins_cost(5 * INSN_COST);
13573   format %{ "fcmps $src1, 0.0\n\t"
13574             "csinvw($dst, zr, zr, eq\n\t"
13575             "csnegw($dst, $dst, $dst, lt)"
13576   %}
13577 
13578   ins_encode %{
13579     Label done;
13580     FloatRegister s1 = as_FloatRegister($src1$$reg);
13581     Register d = as_Register($dst$$reg);
13582     __ fcmps(s1, 0.0D);
13583     // installs 0 if EQ else -1
13584     __ csinvw(d, zr, zr, Assembler::EQ);
13585     // keeps -1 if less or unordered else installs 1
13586     __ csnegw(d, d, d, Assembler::LT);
13587     __ bind(done);
13588   %}
13589 
13590   ins_pipe(pipe_class_default);
13591 
13592 %}
13593 
13594 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13595 %{
13596   match(Set dst (CmpD3 src1 zero));
13597   effect(KILL cr);
13598 
13599   ins_cost(5 * INSN_COST);
13600   format %{ "fcmpd $src1, 0.0\n\t"
13601             "csinvw($dst, zr, zr, eq\n\t"
13602             "csnegw($dst, $dst, $dst, lt)"
13603   %}
13604 
13605   ins_encode %{
13606     Label done;
13607     FloatRegister s1 = as_FloatRegister($src1$$reg);
13608     Register d = as_Register($dst$$reg);
13609     __ fcmpd(s1, 0.0D);
13610     // installs 0 if EQ else -1
13611     __ csinvw(d, zr, zr, Assembler::EQ);
13612     // keeps -1 if less or unordered else installs 1
13613     __ csnegw(d, d, d, Assembler::LT);
13614     __ bind(done);
13615   %}
13616   ins_pipe(pipe_class_default);
13617 
13618 %}
13619 
13620 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13621 %{
13622   match(Set dst (CmpLTMask p q));
13623   effect(KILL cr);
13624 
13625   ins_cost(3 * INSN_COST);
13626 
13627   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13628             "csetw $dst, lt\n\t"
13629             "subw $dst, zr, $dst"
13630   %}
13631 
13632   ins_encode %{
13633     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13634     __ csetw(as_Register($dst$$reg), Assembler::LT);
13635     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13636   %}
13637 
13638   ins_pipe(ialu_reg_reg);
13639 %}
13640 
13641 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13642 %{
13643   match(Set dst (CmpLTMask src zero));
13644   effect(KILL cr);
13645 
13646   ins_cost(INSN_COST);
13647 
13648   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13649 
13650   ins_encode %{
13651     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13652   %}
13653 
13654   ins_pipe(ialu_reg_shift);
13655 %}
13656 
13657 // ============================================================================
13658 // Max and Min
13659 
13660 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13661 %{
13662   match(Set dst (MinI src1 src2));
13663 
13664   effect(DEF dst, USE src1, USE src2, KILL cr);
13665   size(8);
13666 
13667   ins_cost(INSN_COST * 3);
13668   format %{
13669     "cmpw $src1 $src2\t signed int\n\t"
13670     "cselw $dst, $src1, $src2 lt\t"
13671   %}
13672 
13673   ins_encode %{
13674     __ cmpw(as_Register($src1$$reg),
13675             as_Register($src2$$reg));
13676     __ cselw(as_Register($dst$$reg),
13677              as_Register($src1$$reg),
13678              as_Register($src2$$reg),
13679              Assembler::LT);
13680   %}
13681 
13682   ins_pipe(ialu_reg_reg);
13683 %}
13684 // FROM HERE
13685 
13686 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13687 %{
13688   match(Set dst (MaxI src1 src2));
13689 
13690   effect(DEF dst, USE src1, USE src2, KILL cr);
13691   size(8);
13692 
13693   ins_cost(INSN_COST * 3);
13694   format %{
13695     "cmpw $src1 $src2\t signed int\n\t"
13696     "cselw $dst, $src1, $src2 gt\t"
13697   %}
13698 
13699   ins_encode %{
13700     __ cmpw(as_Register($src1$$reg),
13701             as_Register($src2$$reg));
13702     __ cselw(as_Register($dst$$reg),
13703              as_Register($src1$$reg),
13704              as_Register($src2$$reg),
13705              Assembler::GT);
13706   %}
13707 
13708   ins_pipe(ialu_reg_reg);
13709 %}
13710 
13711 // ============================================================================
13712 // Branch Instructions
13713 
13714 // Direct Branch.
13715 instruct branch(label lbl)
13716 %{
13717   match(Goto);
13718 
13719   effect(USE lbl);
13720 
13721   ins_cost(BRANCH_COST);
13722   format %{ "b  $lbl" %}
13723 
13724   ins_encode(aarch64_enc_b(lbl));
13725 
13726   ins_pipe(pipe_branch);
13727 %}
13728 
13729 // Conditional Near Branch
13730 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13731 %{
13732   // Same match rule as `branchConFar'.
13733   match(If cmp cr);
13734 
13735   effect(USE lbl);
13736 
13737   ins_cost(BRANCH_COST);
13738   // If set to 1 this indicates that the current instruction is a
13739   // short variant of a long branch. This avoids using this
13740   // instruction in first-pass matching. It will then only be used in
13741   // the `Shorten_branches' pass.
13742   // ins_short_branch(1);
13743   format %{ "b$cmp  $lbl" %}
13744 
13745   ins_encode(aarch64_enc_br_con(cmp, lbl));
13746 
13747   ins_pipe(pipe_branch_cond);
13748 %}
13749 
13750 // Conditional Near Branch Unsigned
13751 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13752 %{
13753   // Same match rule as `branchConFar'.
13754   match(If cmp cr);
13755 
13756   effect(USE lbl);
13757 
13758   ins_cost(BRANCH_COST);
13759   // If set to 1 this indicates that the current instruction is a
13760   // short variant of a long branch. This avoids using this
13761   // instruction in first-pass matching. It will then only be used in
13762   // the `Shorten_branches' pass.
13763   // ins_short_branch(1);
13764   format %{ "b$cmp  $lbl\t# unsigned" %}
13765 
13766   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13767 
13768   ins_pipe(pipe_branch_cond);
13769 %}
13770 
13771 // Make use of CBZ and CBNZ.  These instructions, as well as being
13772 // shorter than (cmp; branch), have the additional benefit of not
13773 // killing the flags.
13774 
13775 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13776   match(If cmp (CmpI op1 op2));
13777   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13778             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13779   effect(USE labl);
13780 
13781   ins_cost(BRANCH_COST);
13782   format %{ "cbw$cmp   $op1, $labl" %}
13783   ins_encode %{
13784     Label* L = $labl$$label;
13785     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13786     if (cond == Assembler::EQ)
13787       __ cbzw($op1$$Register, *L);
13788     else
13789       __ cbnzw($op1$$Register, *L);
13790   %}
13791   ins_pipe(pipe_cmp_branch);
13792 %}
13793 
13794 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13795   match(If cmp (CmpL op1 op2));
13796   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13797             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13798   effect(USE labl);
13799 
13800   ins_cost(BRANCH_COST);
13801   format %{ "cb$cmp   $op1, $labl" %}
13802   ins_encode %{
13803     Label* L = $labl$$label;
13804     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13805     if (cond == Assembler::EQ)
13806       __ cbz($op1$$Register, *L);
13807     else
13808       __ cbnz($op1$$Register, *L);
13809   %}
13810   ins_pipe(pipe_cmp_branch);
13811 %}
13812 
13813 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13814   match(If cmp (CmpP op1 op2));
13815   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13816             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13817   effect(USE labl);
13818 
13819   ins_cost(BRANCH_COST);
13820   format %{ "cb$cmp   $op1, $labl" %}
13821   ins_encode %{
13822     Label* L = $labl$$label;
13823     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13824     if (cond == Assembler::EQ)
13825       __ cbz($op1$$Register, *L);
13826     else
13827       __ cbnz($op1$$Register, *L);
13828   %}
13829   ins_pipe(pipe_cmp_branch);
13830 %}
13831 
13832 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13833   match(If cmp (CmpP (DecodeN oop) zero));
13834   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13835             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13836   effect(USE labl);
13837 
13838   ins_cost(BRANCH_COST);
13839   format %{ "cb$cmp   $oop, $labl" %}
13840   ins_encode %{
13841     Label* L = $labl$$label;
13842     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13843     if (cond == Assembler::EQ)
13844       __ cbzw($oop$$Register, *L);
13845     else
13846       __ cbnzw($oop$$Register, *L);
13847   %}
13848   ins_pipe(pipe_cmp_branch);
13849 %}
13850 
13851 // Conditional Far Branch
13852 // Conditional Far Branch Unsigned
13853 // TODO: fixme
13854 
13855 // counted loop end branch near
13856 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13857 %{
13858   match(CountedLoopEnd cmp cr);
13859 
13860   effect(USE lbl);
13861 
13862   ins_cost(BRANCH_COST);
13863   // short variant.
13864   // ins_short_branch(1);
13865   format %{ "b$cmp $lbl \t// counted loop end" %}
13866 
13867   ins_encode(aarch64_enc_br_con(cmp, lbl));
13868 
13869   ins_pipe(pipe_branch);
13870 %}
13871 
13872 // counted loop end branch near Unsigned
13873 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13874 %{
13875   match(CountedLoopEnd cmp cr);
13876 
13877   effect(USE lbl);
13878 
13879   ins_cost(BRANCH_COST);
13880   // short variant.
13881   // ins_short_branch(1);
13882   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13883 
13884   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13885 
13886   ins_pipe(pipe_branch);
13887 %}
13888 
13889 // counted loop end branch far
13890 // counted loop end branch far unsigned
13891 // TODO: fixme
13892 
13893 // ============================================================================
13894 // inlined locking and unlocking
13895 
13896 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13897 %{
13898   match(Set cr (FastLock object box));
13899   effect(TEMP tmp, TEMP tmp2);
13900 
13901   // TODO
13902   // identify correct cost
13903   ins_cost(5 * INSN_COST);
13904   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13905 
13906   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13907 
13908   ins_pipe(pipe_serial);
13909 %}
13910 
13911 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13912 %{
13913   match(Set cr (FastUnlock object box));
13914   effect(TEMP tmp, TEMP tmp2);
13915 
13916   ins_cost(5 * INSN_COST);
13917   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13918 
13919   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13920 
13921   ins_pipe(pipe_serial);
13922 %}
13923 
13924 
13925 // ============================================================================
13926 // Safepoint Instructions
13927 
13928 // TODO
13929 // provide a near and far version of this code
13930 
13931 instruct safePoint(iRegP poll)
13932 %{
13933   match(SafePoint poll);
13934 
13935   format %{
13936     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13937   %}
13938   ins_encode %{
13939     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13940   %}
13941   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13942 %}
13943 
13944 
13945 // ============================================================================
13946 // Procedure Call/Return Instructions
13947 
13948 // Call Java Static Instruction
13949 
13950 instruct CallStaticJavaDirect(method meth)
13951 %{
13952   match(CallStaticJava);
13953 
13954   effect(USE meth);
13955 
13956   ins_cost(CALL_COST);
13957 
13958   format %{ "call,static $meth \t// ==> " %}
13959 
13960   ins_encode( aarch64_enc_java_static_call(meth),
13961               aarch64_enc_call_epilog );
13962 
13963   ins_pipe(pipe_class_call);
13964 %}
13965 
13966 // TO HERE
13967 
13968 // Call Java Dynamic Instruction
13969 instruct CallDynamicJavaDirect(method meth)
13970 %{
13971   match(CallDynamicJava);
13972 
13973   effect(USE meth);
13974 
13975   ins_cost(CALL_COST);
13976 
13977   format %{ "CALL,dynamic $meth \t// ==> " %}
13978 
13979   ins_encode( aarch64_enc_java_dynamic_call(meth),
13980                aarch64_enc_call_epilog );
13981 
13982   ins_pipe(pipe_class_call);
13983 %}
13984 
13985 // Call Runtime Instruction
13986 
13987 instruct CallRuntimeDirect(method meth)
13988 %{
13989   match(CallRuntime);
13990 
13991   effect(USE meth);
13992 
13993   ins_cost(CALL_COST);
13994 
13995   format %{ "CALL, runtime $meth" %}
13996 
13997   ins_encode( aarch64_enc_java_to_runtime(meth) );
13998 
13999   ins_pipe(pipe_class_call);
14000 %}
14001 
14002 // Call Runtime Instruction
14003 
14004 instruct CallLeafDirect(method meth)
14005 %{
14006   match(CallLeaf);
14007 
14008   effect(USE meth);
14009 
14010   ins_cost(CALL_COST);
14011 
14012   format %{ "CALL, runtime leaf $meth" %}
14013 
14014   ins_encode( aarch64_enc_java_to_runtime(meth) );
14015 
14016   ins_pipe(pipe_class_call);
14017 %}
14018 
14019 // Call Runtime Instruction
14020 
14021 instruct CallLeafNoFPDirect(method meth)
14022 %{
14023   match(CallLeafNoFP);
14024 
14025   effect(USE meth);
14026 
14027   ins_cost(CALL_COST);
14028 
14029   format %{ "CALL, runtime leaf nofp $meth" %}
14030 
14031   ins_encode( aarch64_enc_java_to_runtime(meth) );
14032 
14033   ins_pipe(pipe_class_call);
14034 %}
14035 
14036 // Tail Call; Jump from runtime stub to Java code.
14037 // Also known as an 'interprocedural jump'.
14038 // Target of jump will eventually return to caller.
14039 // TailJump below removes the return address.
14040 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14041 %{
14042   match(TailCall jump_target method_oop);
14043 
14044   ins_cost(CALL_COST);
14045 
14046   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14047 
14048   ins_encode(aarch64_enc_tail_call(jump_target));
14049 
14050   ins_pipe(pipe_class_call);
14051 %}
14052 
14053 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14054 %{
14055   match(TailJump jump_target ex_oop);
14056 
14057   ins_cost(CALL_COST);
14058 
14059   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14060 
14061   ins_encode(aarch64_enc_tail_jmp(jump_target));
14062 
14063   ins_pipe(pipe_class_call);
14064 %}
14065 
14066 // Create exception oop: created by stack-crawling runtime code.
14067 // Created exception is now available to this handler, and is setup
14068 // just prior to jumping to this handler. No code emitted.
14069 // TODO check
14070 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14071 instruct CreateException(iRegP_R0 ex_oop)
14072 %{
14073   match(Set ex_oop (CreateEx));
14074 
14075   format %{ " -- \t// exception oop; no code emitted" %}
14076 
14077   size(0);
14078 
14079   ins_encode( /*empty*/ );
14080 
14081   ins_pipe(pipe_class_empty);
14082 %}
14083 
14084 // Rethrow exception: The exception oop will come in the first
14085 // argument position. Then JUMP (not call) to the rethrow stub code.
14086 instruct RethrowException() %{
14087   match(Rethrow);
14088   ins_cost(CALL_COST);
14089 
14090   format %{ "b rethrow_stub" %}
14091 
14092   ins_encode( aarch64_enc_rethrow() );
14093 
14094   ins_pipe(pipe_class_call);
14095 %}
14096 
14097 
14098 // Return Instruction
14099 // epilog node loads ret address into lr as part of frame pop
14100 instruct Ret()
14101 %{
14102   match(Return);
14103 
14104   format %{ "ret\t// return register" %}
14105 
14106   ins_encode( aarch64_enc_ret() );
14107 
14108   ins_pipe(pipe_branch);
14109 %}
14110 
14111 // Die now.
14112 instruct ShouldNotReachHere() %{
14113   match(Halt);
14114 
14115   ins_cost(CALL_COST);
14116   format %{ "ShouldNotReachHere" %}
14117 
14118   ins_encode %{
14119     // TODO
14120     // implement proper trap call here
14121     __ brk(999);
14122   %}
14123 
14124   ins_pipe(pipe_class_default);
14125 %}
14126 
14127 // ============================================================================
14128 // Partial Subtype Check
14129 //
14130 // superklass array for an instance of the superklass.  Set a hidden
14131 // internal cache on a hit (cache is checked with exposed code in
14132 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14133 // encoding ALSO sets flags.
14134 
14135 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14136 %{
14137   match(Set result (PartialSubtypeCheck sub super));
14138   effect(KILL cr, KILL temp);
14139 
14140   ins_cost(1100);  // slightly larger than the next version
14141   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14142 
14143   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14144 
14145   opcode(0x1); // Force zero of result reg on hit
14146 
14147   ins_pipe(pipe_class_memory);
14148 %}
14149 
14150 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14151 %{
14152   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14153   effect(KILL temp, KILL result);
14154 
14155   ins_cost(1100);  // slightly larger than the next version
14156   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14157 
14158   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14159 
14160   opcode(0x0); // Don't zero result reg on hit
14161 
14162   ins_pipe(pipe_class_memory);
14163 %}
14164 
14165 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14166                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14167 %{
14168   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14169   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14170 
14171   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14172   ins_encode %{
14173     __ string_compare($str1$$Register, $str2$$Register,
14174                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14175                       $tmp1$$Register);
14176   %}
14177   ins_pipe(pipe_class_memory);
14178 %}
14179 
14180 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14181        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14182 %{
14183   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14184   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14185          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14186   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14187 
14188   ins_encode %{
14189     __ string_indexof($str1$$Register, $str2$$Register,
14190                       $cnt1$$Register, $cnt2$$Register,
14191                       $tmp1$$Register, $tmp2$$Register,
14192                       $tmp3$$Register, $tmp4$$Register,
14193                       -1, $result$$Register);
14194   %}
14195   ins_pipe(pipe_class_memory);
14196 %}
14197 
14198 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14199                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14200                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14201 %{
14202   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14203   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14204          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14205   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14206 
14207   ins_encode %{
14208     int icnt2 = (int)$int_cnt2$$constant;
14209     __ string_indexof($str1$$Register, $str2$$Register,
14210                       $cnt1$$Register, zr,
14211                       $tmp1$$Register, $tmp2$$Register,
14212                       $tmp3$$Register, $tmp4$$Register,
14213                       icnt2, $result$$Register);
14214   %}
14215   ins_pipe(pipe_class_memory);
14216 %}
14217 
14218 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14219                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14220 %{
14221   match(Set result (StrEquals (Binary str1 str2) cnt));
14222   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14223 
14224   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14225   ins_encode %{
14226     __ string_equals($str1$$Register, $str2$$Register,
14227                       $cnt$$Register, $result$$Register,
14228                       $tmp$$Register);
14229   %}
14230   ins_pipe(pipe_class_memory);
14231 %}
14232 
14233 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14234                       iRegP_R10 tmp, rFlagsReg cr)
14235 %{
14236   match(Set result (AryEq ary1 ary2));
14237   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14238 
14239   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14240   ins_encode %{
14241     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14242                           $result$$Register, $tmp$$Register);
14243   %}
14244   ins_pipe(pipe_class_memory);
14245 %}
14246 
14247 // encode char[] to byte[] in ISO_8859_1
14248 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14249                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14250                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14251                           iRegI_R0 result, rFlagsReg cr)
14252 %{
14253   match(Set result (EncodeISOArray src (Binary dst len)));
14254   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14255          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14256 
14257   format %{ "Encode array $src,$dst,$len -> $result" %}
14258   ins_encode %{
14259     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14260          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14261          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14262   %}
14263   ins_pipe( pipe_class_memory );
14264 %}
14265 
14266 // ============================================================================
14267 // This name is KNOWN by the ADLC and cannot be changed.
14268 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14269 // for this guy.
14270 instruct tlsLoadP(thread_RegP dst)
14271 %{
14272   match(Set dst (ThreadLocal));
14273 
14274   ins_cost(0);
14275 
14276   format %{ " -- \t// $dst=Thread::current(), empty" %}
14277 
14278   size(0);
14279 
14280   ins_encode( /*empty*/ );
14281 
14282   ins_pipe(pipe_class_empty);
14283 %}
14284 
14285 // ====================VECTOR INSTRUCTIONS=====================================
14286 
14287 // Load vector (32 bits)
14288 instruct loadV4(vecD dst, vmem mem)
14289 %{
14290   predicate(n->as_LoadVector()->memory_size() == 4);
14291   match(Set dst (LoadVector mem));
14292   ins_cost(4 * INSN_COST);
14293   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14294   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14295   ins_pipe(pipe_class_memory);
14296 %}
14297 
14298 // Load vector (64 bits)
14299 instruct loadV8(vecD dst, vmem mem)
14300 %{
14301   predicate(n->as_LoadVector()->memory_size() == 8);
14302   match(Set dst (LoadVector mem));
14303   ins_cost(4 * INSN_COST);
14304   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14305   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14306   ins_pipe(pipe_class_memory);
14307 %}
14308 
14309 // Load Vector (128 bits)
14310 instruct loadV16(vecX dst, vmem mem)
14311 %{
14312   predicate(n->as_LoadVector()->memory_size() == 16);
14313   match(Set dst (LoadVector mem));
14314   ins_cost(4 * INSN_COST);
14315   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14316   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14317   ins_pipe(pipe_class_memory);
14318 %}
14319 
14320 // Store Vector (32 bits)
14321 instruct storeV4(vecD src, vmem mem)
14322 %{
14323   predicate(n->as_StoreVector()->memory_size() == 4);
14324   match(Set mem (StoreVector mem src));
14325   ins_cost(4 * INSN_COST);
14326   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14327   ins_encode( aarch64_enc_strvS(src, mem) );
14328   ins_pipe(pipe_class_memory);
14329 %}
14330 
14331 // Store Vector (64 bits)
14332 instruct storeV8(vecD src, vmem mem)
14333 %{
14334   predicate(n->as_StoreVector()->memory_size() == 8);
14335   match(Set mem (StoreVector mem src));
14336   ins_cost(4 * INSN_COST);
14337   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14338   ins_encode( aarch64_enc_strvD(src, mem) );
14339   ins_pipe(pipe_class_memory);
14340 %}
14341 
14342 // Store Vector (128 bits)
14343 instruct storeV16(vecX src, vmem mem)
14344 %{
14345   predicate(n->as_StoreVector()->memory_size() == 16);
14346   match(Set mem (StoreVector mem src));
14347   ins_cost(4 * INSN_COST);
14348   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14349   ins_encode( aarch64_enc_strvQ(src, mem) );
14350   ins_pipe(pipe_class_memory);
14351 %}
14352 
14353 instruct replicate8B(vecD dst, iRegIorL2I src)
14354 %{
14355   predicate(n->as_Vector()->length() == 4 ||
14356             n->as_Vector()->length() == 8);
14357   match(Set dst (ReplicateB src));
14358   ins_cost(INSN_COST);
14359   format %{ "dup  $dst, $src\t# vector (8B)" %}
14360   ins_encode %{
14361     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14362   %}
14363   ins_pipe(pipe_class_default);
14364 %}
14365 
14366 instruct replicate16B(vecX dst, iRegIorL2I src)
14367 %{
14368   predicate(n->as_Vector()->length() == 16);
14369   match(Set dst (ReplicateB src));
14370   ins_cost(INSN_COST);
14371   format %{ "dup  $dst, $src\t# vector (16B)" %}
14372   ins_encode %{
14373     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14374   %}
14375   ins_pipe(pipe_class_default);
14376 %}
14377 
14378 instruct replicate8B_imm(vecD dst, immI con)
14379 %{
14380   predicate(n->as_Vector()->length() == 4 ||
14381             n->as_Vector()->length() == 8);
14382   match(Set dst (ReplicateB con));
14383   ins_cost(INSN_COST);
14384   format %{ "movi  $dst, $con\t# vector(8B)" %}
14385   ins_encode %{
14386     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14387   %}
14388   ins_pipe(pipe_class_default);
14389 %}
14390 
14391 instruct replicate16B_imm(vecX dst, immI con)
14392 %{
14393   predicate(n->as_Vector()->length() == 16);
14394   match(Set dst (ReplicateB con));
14395   ins_cost(INSN_COST);
14396   format %{ "movi  $dst, $con\t# vector(16B)" %}
14397   ins_encode %{
14398     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14399   %}
14400   ins_pipe(pipe_class_default);
14401 %}
14402 
14403 instruct replicate4S(vecD dst, iRegIorL2I src)
14404 %{
14405   predicate(n->as_Vector()->length() == 2 ||
14406             n->as_Vector()->length() == 4);
14407   match(Set dst (ReplicateS src));
14408   ins_cost(INSN_COST);
14409   format %{ "dup  $dst, $src\t# vector (4S)" %}
14410   ins_encode %{
14411     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14412   %}
14413   ins_pipe(pipe_class_default);
14414 %}
14415 
14416 instruct replicate8S(vecX dst, iRegIorL2I src)
14417 %{
14418   predicate(n->as_Vector()->length() == 8);
14419   match(Set dst (ReplicateS src));
14420   ins_cost(INSN_COST);
14421   format %{ "dup  $dst, $src\t# vector (8S)" %}
14422   ins_encode %{
14423     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14424   %}
14425   ins_pipe(pipe_class_default);
14426 %}
14427 
14428 instruct replicate4S_imm(vecD dst, immI con)
14429 %{
14430   predicate(n->as_Vector()->length() == 2 ||
14431             n->as_Vector()->length() == 4);
14432   match(Set dst (ReplicateS con));
14433   ins_cost(INSN_COST);
14434   format %{ "movi  $dst, $con\t# vector(4H)" %}
14435   ins_encode %{
14436     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14437   %}
14438   ins_pipe(pipe_class_default);
14439 %}
14440 
14441 instruct replicate8S_imm(vecX dst, immI con)
14442 %{
14443   predicate(n->as_Vector()->length() == 8);
14444   match(Set dst (ReplicateS con));
14445   ins_cost(INSN_COST);
14446   format %{ "movi  $dst, $con\t# vector(8H)" %}
14447   ins_encode %{
14448     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14449   %}
14450   ins_pipe(pipe_class_default);
14451 %}
14452 
14453 instruct replicate2I(vecD dst, iRegIorL2I src)
14454 %{
14455   predicate(n->as_Vector()->length() == 2);
14456   match(Set dst (ReplicateI src));
14457   ins_cost(INSN_COST);
14458   format %{ "dup  $dst, $src\t# vector (2I)" %}
14459   ins_encode %{
14460     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14461   %}
14462   ins_pipe(pipe_class_default);
14463 %}
14464 
14465 instruct replicate4I(vecX dst, iRegIorL2I src)
14466 %{
14467   predicate(n->as_Vector()->length() == 4);
14468   match(Set dst (ReplicateI src));
14469   ins_cost(INSN_COST);
14470   format %{ "dup  $dst, $src\t# vector (4I)" %}
14471   ins_encode %{
14472     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14473   %}
14474   ins_pipe(pipe_class_default);
14475 %}
14476 
14477 instruct replicate2I_imm(vecD dst, immI con)
14478 %{
14479   predicate(n->as_Vector()->length() == 2);
14480   match(Set dst (ReplicateI con));
14481   ins_cost(INSN_COST);
14482   format %{ "movi  $dst, $con\t# vector(2I)" %}
14483   ins_encode %{
14484     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14485   %}
14486   ins_pipe(pipe_class_default);
14487 %}
14488 
14489 instruct replicate4I_imm(vecX dst, immI con)
14490 %{
14491   predicate(n->as_Vector()->length() == 4);
14492   match(Set dst (ReplicateI con));
14493   ins_cost(INSN_COST);
14494   format %{ "movi  $dst, $con\t# vector(4I)" %}
14495   ins_encode %{
14496     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14497   %}
14498   ins_pipe(pipe_class_default);
14499 %}
14500 
14501 instruct replicate2L(vecX dst, iRegL src)
14502 %{
14503   predicate(n->as_Vector()->length() == 2);
14504   match(Set dst (ReplicateL src));
14505   ins_cost(INSN_COST);
14506   format %{ "dup  $dst, $src\t# vector (2L)" %}
14507   ins_encode %{
14508     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14509   %}
14510   ins_pipe(pipe_class_default);
14511 %}
14512 
14513 instruct replicate2L_zero(vecX dst, immI0 zero)
14514 %{
14515   predicate(n->as_Vector()->length() == 2);
14516   match(Set dst (ReplicateI zero));
14517   ins_cost(INSN_COST);
14518   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14519   ins_encode %{
14520     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14521            as_FloatRegister($dst$$reg),
14522            as_FloatRegister($dst$$reg));
14523   %}
14524   ins_pipe(pipe_class_default);
14525 %}
14526 
14527 instruct replicate2F(vecD dst, vRegF src)
14528 %{
14529   predicate(n->as_Vector()->length() == 2);
14530   match(Set dst (ReplicateF src));
14531   ins_cost(INSN_COST);
14532   format %{ "dup  $dst, $src\t# vector (2F)" %}
14533   ins_encode %{
14534     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14535            as_FloatRegister($src$$reg));
14536   %}
14537   ins_pipe(pipe_class_default);
14538 %}
14539 
14540 instruct replicate4F(vecX dst, vRegF src)
14541 %{
14542   predicate(n->as_Vector()->length() == 4);
14543   match(Set dst (ReplicateF src));
14544   ins_cost(INSN_COST);
14545   format %{ "dup  $dst, $src\t# vector (4F)" %}
14546   ins_encode %{
14547     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14548            as_FloatRegister($src$$reg));
14549   %}
14550   ins_pipe(pipe_class_default);
14551 %}
14552 
14553 instruct replicate2D(vecX dst, vRegD src)
14554 %{
14555   predicate(n->as_Vector()->length() == 2);
14556   match(Set dst (ReplicateD src));
14557   ins_cost(INSN_COST);
14558   format %{ "dup  $dst, $src\t# vector (2D)" %}
14559   ins_encode %{
14560     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14561            as_FloatRegister($src$$reg));
14562   %}
14563   ins_pipe(pipe_class_default);
14564 %}
14565 
14566 // ====================REDUCTION ARITHMETIC====================================
14567 
14568 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14569 %{
14570   match(Set dst (AddReductionVI src1 src2));
14571   ins_cost(INSN_COST);
14572   effect(TEMP tmp, TEMP tmp2);
14573   format %{ "umov  $tmp, $src2, S, 0\n\t"
14574             "umov  $tmp2, $src2, S, 1\n\t"
14575             "addw  $dst, $src1, $tmp\n\t"
14576             "addw  $dst, $dst, $tmp2\t add reduction2i"
14577   %}
14578   ins_encode %{
14579     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14580     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14581     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14582     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14583   %}
14584   ins_pipe(pipe_class_default);
14585 %}
14586 
14587 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14588 %{
14589   match(Set dst (AddReductionVI src1 src2));
14590   ins_cost(INSN_COST);
14591   effect(TEMP tmp, TEMP tmp2);
14592   format %{ "addv  $tmp, T4S, $src2\n\t"
14593             "umov  $tmp2, $tmp, S, 0\n\t"
14594             "addw  $dst, $tmp2, $src1\t add reduction4i"
14595   %}
14596   ins_encode %{
14597     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14598             as_FloatRegister($src2$$reg));
14599     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14600     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14601   %}
14602   ins_pipe(pipe_class_default);
14603 %}
14604 
14605 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14606 %{
14607   match(Set dst (MulReductionVI src1 src2));
14608   ins_cost(INSN_COST);
14609   effect(TEMP tmp, TEMP dst);
14610   format %{ "umov  $tmp, $src2, S, 0\n\t"
14611             "mul   $dst, $tmp, $src1\n\t"
14612             "umov  $tmp, $src2, S, 1\n\t"
14613             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14614   %}
14615   ins_encode %{
14616     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14617     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14618     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14619     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14620   %}
14621   ins_pipe(pipe_class_default);
14622 %}
14623 
14624 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14625 %{
14626   match(Set dst (MulReductionVI src1 src2));
14627   ins_cost(INSN_COST);
14628   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14629   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14630             "mul   $tmp, $tmp, $src2\n\t"
14631             "umov  $tmp2, $tmp, S, 0\n\t"
14632             "mul   $dst, $tmp2, $src1\n\t"
14633             "umov  $tmp2, $tmp, S, 1\n\t"
14634             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14635   %}
14636   ins_encode %{
14637     __ ins(as_FloatRegister($tmp$$reg), __ D,
14638            as_FloatRegister($src2$$reg), 0, 1);
14639     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14640            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14641     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14642     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14643     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14644     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14645   %}
14646   ins_pipe(pipe_class_default);
14647 %}
14648 
14649 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14650 %{
14651   match(Set dst (AddReductionVF src1 src2));
14652   ins_cost(INSN_COST);
14653   effect(TEMP tmp, TEMP dst);
14654   format %{ "fadds $dst, $src1, $src2\n\t"
14655             "ins   $tmp, S, $src2, 0, 1\n\t"
14656             "fadds $dst, $dst, $tmp\t add reduction2f"
14657   %}
14658   ins_encode %{
14659     __ fadds(as_FloatRegister($dst$$reg),
14660              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14661     __ ins(as_FloatRegister($tmp$$reg), __ S,
14662            as_FloatRegister($src2$$reg), 0, 1);
14663     __ fadds(as_FloatRegister($dst$$reg),
14664              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14665   %}
14666   ins_pipe(pipe_class_default);
14667 %}
14668 
14669 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14670 %{
14671   match(Set dst (AddReductionVF src1 src2));
14672   ins_cost(INSN_COST);
14673   effect(TEMP tmp, TEMP dst);
14674   format %{ "fadds $dst, $src1, $src2\n\t"
14675             "ins   $tmp, S, $src2, 0, 1\n\t"
14676             "fadds $dst, $dst, $tmp\n\t"
14677             "ins   $tmp, S, $src2, 0, 2\n\t"
14678             "fadds $dst, $dst, $tmp\n\t"
14679             "ins   $tmp, S, $src2, 0, 3\n\t"
14680             "fadds $dst, $dst, $tmp\t add reduction4f"
14681   %}
14682   ins_encode %{
14683     __ fadds(as_FloatRegister($dst$$reg),
14684              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14685     __ ins(as_FloatRegister($tmp$$reg), __ S,
14686            as_FloatRegister($src2$$reg), 0, 1);
14687     __ fadds(as_FloatRegister($dst$$reg),
14688              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14689     __ ins(as_FloatRegister($tmp$$reg), __ S,
14690            as_FloatRegister($src2$$reg), 0, 2);
14691     __ fadds(as_FloatRegister($dst$$reg),
14692              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14693     __ ins(as_FloatRegister($tmp$$reg), __ S,
14694            as_FloatRegister($src2$$reg), 0, 3);
14695     __ fadds(as_FloatRegister($dst$$reg),
14696              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14697   %}
14698   ins_pipe(pipe_class_default);
14699 %}
14700 
14701 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14702 %{
14703   match(Set dst (MulReductionVF src1 src2));
14704   ins_cost(INSN_COST);
14705   effect(TEMP tmp, TEMP dst);
14706   format %{ "fmuls $dst, $src1, $src2\n\t"
14707             "ins   $tmp, S, $src2, 0, 1\n\t"
14708             "fmuls $dst, $dst, $tmp\t add reduction4f"
14709   %}
14710   ins_encode %{
14711     __ fmuls(as_FloatRegister($dst$$reg),
14712              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14713     __ ins(as_FloatRegister($tmp$$reg), __ S,
14714            as_FloatRegister($src2$$reg), 0, 1);
14715     __ fmuls(as_FloatRegister($dst$$reg),
14716              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14717   %}
14718   ins_pipe(pipe_class_default);
14719 %}
14720 
14721 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14722 %{
14723   match(Set dst (MulReductionVF src1 src2));
14724   ins_cost(INSN_COST);
14725   effect(TEMP tmp, TEMP dst);
14726   format %{ "fmuls $dst, $src1, $src2\n\t"
14727             "ins   $tmp, S, $src2, 0, 1\n\t"
14728             "fmuls $dst, $dst, $tmp\n\t"
14729             "ins   $tmp, S, $src2, 0, 2\n\t"
14730             "fmuls $dst, $dst, $tmp\n\t"
14731             "ins   $tmp, S, $src2, 0, 3\n\t"
14732             "fmuls $dst, $dst, $tmp\t add reduction4f"
14733   %}
14734   ins_encode %{
14735     __ fmuls(as_FloatRegister($dst$$reg),
14736              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14737     __ ins(as_FloatRegister($tmp$$reg), __ S,
14738            as_FloatRegister($src2$$reg), 0, 1);
14739     __ fmuls(as_FloatRegister($dst$$reg),
14740              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14741     __ ins(as_FloatRegister($tmp$$reg), __ S,
14742            as_FloatRegister($src2$$reg), 0, 2);
14743     __ fmuls(as_FloatRegister($dst$$reg),
14744              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14745     __ ins(as_FloatRegister($tmp$$reg), __ S,
14746            as_FloatRegister($src2$$reg), 0, 3);
14747     __ fmuls(as_FloatRegister($dst$$reg),
14748              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14749   %}
14750   ins_pipe(pipe_class_default);
14751 %}
14752 
14753 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14754 %{
14755   match(Set dst (AddReductionVD src1 src2));
14756   ins_cost(INSN_COST);
14757   effect(TEMP tmp, TEMP dst);
14758   format %{ "faddd $dst, $src1, $src2\n\t"
14759             "ins   $tmp, D, $src2, 0, 1\n\t"
14760             "faddd $dst, $dst, $tmp\t add reduction2d"
14761   %}
14762   ins_encode %{
14763     __ faddd(as_FloatRegister($dst$$reg),
14764              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14765     __ ins(as_FloatRegister($tmp$$reg), __ D,
14766            as_FloatRegister($src2$$reg), 0, 1);
14767     __ faddd(as_FloatRegister($dst$$reg),
14768              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14769   %}
14770   ins_pipe(pipe_class_default);
14771 %}
14772 
14773 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14774 %{
14775   match(Set dst (MulReductionVD src1 src2));
14776   ins_cost(INSN_COST);
14777   effect(TEMP tmp, TEMP dst);
14778   format %{ "fmuld $dst, $src1, $src2\n\t"
14779             "ins   $tmp, D, $src2, 0, 1\n\t"
14780             "fmuld $dst, $dst, $tmp\t add reduction2d"
14781   %}
14782   ins_encode %{
14783     __ fmuld(as_FloatRegister($dst$$reg),
14784              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14785     __ ins(as_FloatRegister($tmp$$reg), __ D,
14786            as_FloatRegister($src2$$reg), 0, 1);
14787     __ fmuld(as_FloatRegister($dst$$reg),
14788              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14789   %}
14790   ins_pipe(pipe_class_default);
14791 %}
14792 
14793 // ====================VECTOR ARITHMETIC=======================================
14794 
14795 // --------------------------------- ADD --------------------------------------
14796 
14797 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14798 %{
14799   predicate(n->as_Vector()->length() == 4 ||
14800             n->as_Vector()->length() == 8);
14801   match(Set dst (AddVB src1 src2));
14802   ins_cost(INSN_COST);
14803   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14804   ins_encode %{
14805     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14806             as_FloatRegister($src1$$reg),
14807             as_FloatRegister($src2$$reg));
14808   %}
14809   ins_pipe(pipe_class_default);
14810 %}
14811 
14812 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14813 %{
14814   predicate(n->as_Vector()->length() == 16);
14815   match(Set dst (AddVB src1 src2));
14816   ins_cost(INSN_COST);
14817   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14818   ins_encode %{
14819     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14820             as_FloatRegister($src1$$reg),
14821             as_FloatRegister($src2$$reg));
14822   %}
14823   ins_pipe(pipe_class_default);
14824 %}
14825 
14826 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14827 %{
14828   predicate(n->as_Vector()->length() == 2 ||
14829             n->as_Vector()->length() == 4);
14830   match(Set dst (AddVS src1 src2));
14831   ins_cost(INSN_COST);
14832   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14833   ins_encode %{
14834     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14835             as_FloatRegister($src1$$reg),
14836             as_FloatRegister($src2$$reg));
14837   %}
14838   ins_pipe(pipe_class_default);
14839 %}
14840 
14841 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14842 %{
14843   predicate(n->as_Vector()->length() == 8);
14844   match(Set dst (AddVS src1 src2));
14845   ins_cost(INSN_COST);
14846   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14847   ins_encode %{
14848     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14849             as_FloatRegister($src1$$reg),
14850             as_FloatRegister($src2$$reg));
14851   %}
14852   ins_pipe(pipe_class_default);
14853 %}
14854 
14855 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14856 %{
14857   predicate(n->as_Vector()->length() == 2);
14858   match(Set dst (AddVI src1 src2));
14859   ins_cost(INSN_COST);
14860   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14861   ins_encode %{
14862     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14863             as_FloatRegister($src1$$reg),
14864             as_FloatRegister($src2$$reg));
14865   %}
14866   ins_pipe(pipe_class_default);
14867 %}
14868 
14869 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14870 %{
14871   predicate(n->as_Vector()->length() == 4);
14872   match(Set dst (AddVI src1 src2));
14873   ins_cost(INSN_COST);
14874   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14875   ins_encode %{
14876     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14877             as_FloatRegister($src1$$reg),
14878             as_FloatRegister($src2$$reg));
14879   %}
14880   ins_pipe(pipe_class_default);
14881 %}
14882 
14883 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14884 %{
14885   predicate(n->as_Vector()->length() == 2);
14886   match(Set dst (AddVL src1 src2));
14887   ins_cost(INSN_COST);
14888   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14889   ins_encode %{
14890     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14891             as_FloatRegister($src1$$reg),
14892             as_FloatRegister($src2$$reg));
14893   %}
14894   ins_pipe(pipe_class_default);
14895 %}
14896 
14897 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14898 %{
14899   predicate(n->as_Vector()->length() == 2);
14900   match(Set dst (AddVF src1 src2));
14901   ins_cost(INSN_COST);
14902   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14903   ins_encode %{
14904     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14905             as_FloatRegister($src1$$reg),
14906             as_FloatRegister($src2$$reg));
14907   %}
14908   ins_pipe(pipe_class_default);
14909 %}
14910 
14911 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14912 %{
14913   predicate(n->as_Vector()->length() == 4);
14914   match(Set dst (AddVF src1 src2));
14915   ins_cost(INSN_COST);
14916   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14917   ins_encode %{
14918     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14919             as_FloatRegister($src1$$reg),
14920             as_FloatRegister($src2$$reg));
14921   %}
14922   ins_pipe(pipe_class_default);
14923 %}
14924 
14925 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14926 %{
14927   match(Set dst (AddVD src1 src2));
14928   ins_cost(INSN_COST);
14929   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14930   ins_encode %{
14931     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14932             as_FloatRegister($src1$$reg),
14933             as_FloatRegister($src2$$reg));
14934   %}
14935   ins_pipe(pipe_class_default);
14936 %}
14937 
14938 // --------------------------------- SUB --------------------------------------
14939 
14940 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14941 %{
14942   predicate(n->as_Vector()->length() == 4 ||
14943             n->as_Vector()->length() == 8);
14944   match(Set dst (SubVB src1 src2));
14945   ins_cost(INSN_COST);
14946   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14947   ins_encode %{
14948     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14949             as_FloatRegister($src1$$reg),
14950             as_FloatRegister($src2$$reg));
14951   %}
14952   ins_pipe(pipe_class_default);
14953 %}
14954 
14955 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14956 %{
14957   predicate(n->as_Vector()->length() == 16);
14958   match(Set dst (SubVB src1 src2));
14959   ins_cost(INSN_COST);
14960   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14961   ins_encode %{
14962     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14963             as_FloatRegister($src1$$reg),
14964             as_FloatRegister($src2$$reg));
14965   %}
14966   ins_pipe(pipe_class_default);
14967 %}
14968 
14969 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14970 %{
14971   predicate(n->as_Vector()->length() == 2 ||
14972             n->as_Vector()->length() == 4);
14973   match(Set dst (SubVS src1 src2));
14974   ins_cost(INSN_COST);
14975   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
14976   ins_encode %{
14977     __ subv(as_FloatRegister($dst$$reg), __ T4H,
14978             as_FloatRegister($src1$$reg),
14979             as_FloatRegister($src2$$reg));
14980   %}
14981   ins_pipe(pipe_class_default);
14982 %}
14983 
14984 instruct vsub8S(vecX dst, vecX src1, vecX src2)
14985 %{
14986   predicate(n->as_Vector()->length() == 8);
14987   match(Set dst (SubVS src1 src2));
14988   ins_cost(INSN_COST);
14989   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
14990   ins_encode %{
14991     __ subv(as_FloatRegister($dst$$reg), __ T8H,
14992             as_FloatRegister($src1$$reg),
14993             as_FloatRegister($src2$$reg));
14994   %}
14995   ins_pipe(pipe_class_default);
14996 %}
14997 
14998 instruct vsub2I(vecD dst, vecD src1, vecD src2)
14999 %{
15000   predicate(n->as_Vector()->length() == 2);
15001   match(Set dst (SubVI src1 src2));
15002   ins_cost(INSN_COST);
15003   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15004   ins_encode %{
15005     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15006             as_FloatRegister($src1$$reg),
15007             as_FloatRegister($src2$$reg));
15008   %}
15009   ins_pipe(pipe_class_default);
15010 %}
15011 
15012 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15013 %{
15014   predicate(n->as_Vector()->length() == 4);
15015   match(Set dst (SubVI src1 src2));
15016   ins_cost(INSN_COST);
15017   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15018   ins_encode %{
15019     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15020             as_FloatRegister($src1$$reg),
15021             as_FloatRegister($src2$$reg));
15022   %}
15023   ins_pipe(pipe_class_default);
15024 %}
15025 
15026 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15027 %{
15028   predicate(n->as_Vector()->length() == 2);
15029   match(Set dst (SubVL src1 src2));
15030   ins_cost(INSN_COST);
15031   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15032   ins_encode %{
15033     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15034             as_FloatRegister($src1$$reg),
15035             as_FloatRegister($src2$$reg));
15036   %}
15037   ins_pipe(pipe_class_default);
15038 %}
15039 
15040 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15041 %{
15042   predicate(n->as_Vector()->length() == 2);
15043   match(Set dst (SubVF src1 src2));
15044   ins_cost(INSN_COST);
15045   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15046   ins_encode %{
15047     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15048             as_FloatRegister($src1$$reg),
15049             as_FloatRegister($src2$$reg));
15050   %}
15051   ins_pipe(pipe_class_default);
15052 %}
15053 
15054 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15055 %{
15056   predicate(n->as_Vector()->length() == 4);
15057   match(Set dst (SubVF src1 src2));
15058   ins_cost(INSN_COST);
15059   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15060   ins_encode %{
15061     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15062             as_FloatRegister($src1$$reg),
15063             as_FloatRegister($src2$$reg));
15064   %}
15065   ins_pipe(pipe_class_default);
15066 %}
15067 
15068 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15069 %{
15070   predicate(n->as_Vector()->length() == 2);
15071   match(Set dst (SubVD src1 src2));
15072   ins_cost(INSN_COST);
15073   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15074   ins_encode %{
15075     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15076             as_FloatRegister($src1$$reg),
15077             as_FloatRegister($src2$$reg));
15078   %}
15079   ins_pipe(pipe_class_default);
15080 %}
15081 
15082 // --------------------------------- MUL --------------------------------------
15083 
15084 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15085 %{
15086   predicate(n->as_Vector()->length() == 2 ||
15087             n->as_Vector()->length() == 4);
15088   match(Set dst (MulVS src1 src2));
15089   ins_cost(INSN_COST);
15090   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15091   ins_encode %{
15092     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15093             as_FloatRegister($src1$$reg),
15094             as_FloatRegister($src2$$reg));
15095   %}
15096   ins_pipe(pipe_class_default);
15097 %}
15098 
15099 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15100 %{
15101   predicate(n->as_Vector()->length() == 8);
15102   match(Set dst (MulVS src1 src2));
15103   ins_cost(INSN_COST);
15104   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15105   ins_encode %{
15106     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15107             as_FloatRegister($src1$$reg),
15108             as_FloatRegister($src2$$reg));
15109   %}
15110   ins_pipe(pipe_class_default);
15111 %}
15112 
15113 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15114 %{
15115   predicate(n->as_Vector()->length() == 2);
15116   match(Set dst (MulVI src1 src2));
15117   ins_cost(INSN_COST);
15118   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15119   ins_encode %{
15120     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15121             as_FloatRegister($src1$$reg),
15122             as_FloatRegister($src2$$reg));
15123   %}
15124   ins_pipe(pipe_class_default);
15125 %}
15126 
15127 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15128 %{
15129   predicate(n->as_Vector()->length() == 4);
15130   match(Set dst (MulVI src1 src2));
15131   ins_cost(INSN_COST);
15132   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15133   ins_encode %{
15134     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15135             as_FloatRegister($src1$$reg),
15136             as_FloatRegister($src2$$reg));
15137   %}
15138   ins_pipe(pipe_class_default);
15139 %}
15140 
15141 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15142 %{
15143   predicate(n->as_Vector()->length() == 2);
15144   match(Set dst (MulVF src1 src2));
15145   ins_cost(INSN_COST);
15146   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15147   ins_encode %{
15148     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15149             as_FloatRegister($src1$$reg),
15150             as_FloatRegister($src2$$reg));
15151   %}
15152   ins_pipe(pipe_class_default);
15153 %}
15154 
15155 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15156 %{
15157   predicate(n->as_Vector()->length() == 4);
15158   match(Set dst (MulVF src1 src2));
15159   ins_cost(INSN_COST);
15160   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15161   ins_encode %{
15162     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15163             as_FloatRegister($src1$$reg),
15164             as_FloatRegister($src2$$reg));
15165   %}
15166   ins_pipe(pipe_class_default);
15167 %}
15168 
15169 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15170 %{
15171   predicate(n->as_Vector()->length() == 2);
15172   match(Set dst (MulVD src1 src2));
15173   ins_cost(INSN_COST);
15174   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15175   ins_encode %{
15176     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15177             as_FloatRegister($src1$$reg),
15178             as_FloatRegister($src2$$reg));
15179   %}
15180   ins_pipe(pipe_class_default);
15181 %}
15182 
15183 // --------------------------------- DIV --------------------------------------
15184 
15185 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15186 %{
15187   predicate(n->as_Vector()->length() == 2);
15188   match(Set dst (DivVF src1 src2));
15189   ins_cost(INSN_COST);
15190   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15191   ins_encode %{
15192     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15193             as_FloatRegister($src1$$reg),
15194             as_FloatRegister($src2$$reg));
15195   %}
15196   ins_pipe(pipe_class_default);
15197 %}
15198 
15199 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15200 %{
15201   predicate(n->as_Vector()->length() == 4);
15202   match(Set dst (DivVF src1 src2));
15203   ins_cost(INSN_COST);
15204   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15205   ins_encode %{
15206     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15207             as_FloatRegister($src1$$reg),
15208             as_FloatRegister($src2$$reg));
15209   %}
15210   ins_pipe(pipe_class_default);
15211 %}
15212 
15213 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15214 %{
15215   predicate(n->as_Vector()->length() == 2);
15216   match(Set dst (DivVD src1 src2));
15217   ins_cost(INSN_COST);
15218   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15219   ins_encode %{
15220     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15221             as_FloatRegister($src1$$reg),
15222             as_FloatRegister($src2$$reg));
15223   %}
15224   ins_pipe(pipe_class_default);
15225 %}
15226 
15227 // --------------------------------- SQRT -------------------------------------
15228 
15229 instruct vsqrt2D(vecX dst, vecX src)
15230 %{
15231   predicate(n->as_Vector()->length() == 2);
15232   match(Set dst (SqrtVD src));
15233   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15234   ins_encode %{
15235     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15236              as_FloatRegister($src$$reg));
15237   %}
15238   ins_pipe(pipe_class_default);
15239 %}
15240 
15241 // --------------------------------- ABS --------------------------------------
15242 
15243 instruct vabs2F(vecD dst, vecD src)
15244 %{
15245   predicate(n->as_Vector()->length() == 2);
15246   match(Set dst (AbsVF src));
15247   ins_cost(INSN_COST * 3);
15248   format %{ "fabs  $dst,$src\t# vector (2S)" %}
15249   ins_encode %{
15250     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
15251             as_FloatRegister($src$$reg));
15252   %}
15253   ins_pipe(pipe_class_default);
15254 %}
15255 
15256 instruct vabs4F(vecX dst, vecX src)
15257 %{
15258   predicate(n->as_Vector()->length() == 4);
15259   match(Set dst (AbsVF src));
15260   ins_cost(INSN_COST * 3);
15261   format %{ "fabs  $dst,$src\t# vector (4S)" %}
15262   ins_encode %{
15263     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
15264             as_FloatRegister($src$$reg));
15265   %}
15266   ins_pipe(pipe_class_default);
15267 %}
15268 
15269 instruct vabs2D(vecX dst, vecX src)
15270 %{
15271   predicate(n->as_Vector()->length() == 2);
15272   match(Set dst (AbsVD src));
15273   ins_cost(INSN_COST * 3);
15274   format %{ "fabs  $dst,$src\t# vector (2D)" %}
15275   ins_encode %{
15276     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
15277             as_FloatRegister($src$$reg));
15278   %}
15279   ins_pipe(pipe_class_default);
15280 %}
15281 
15282 // --------------------------------- NEG --------------------------------------
15283 
15284 instruct vneg2F(vecD dst, vecD src)
15285 %{
15286   predicate(n->as_Vector()->length() == 2);
15287   match(Set dst (NegVF src));
15288   ins_cost(INSN_COST * 3);
15289   format %{ "fneg  $dst,$src\t# vector (2S)" %}
15290   ins_encode %{
15291     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
15292             as_FloatRegister($src$$reg));
15293   %}
15294   ins_pipe(pipe_class_default);
15295 %}
15296 
15297 instruct vneg4F(vecX dst, vecX src)
15298 %{
15299   predicate(n->as_Vector()->length() == 4);
15300   match(Set dst (NegVF src));
15301   ins_cost(INSN_COST * 3);
15302   format %{ "fneg  $dst,$src\t# vector (4S)" %}
15303   ins_encode %{
15304     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
15305             as_FloatRegister($src$$reg));
15306   %}
15307   ins_pipe(pipe_class_default);
15308 %}
15309 
15310 instruct vneg2D(vecX dst, vecX src)
15311 %{
15312   predicate(n->as_Vector()->length() == 2);
15313   match(Set dst (NegVD src));
15314   ins_cost(INSN_COST * 3);
15315   format %{ "fneg  $dst,$src\t# vector (2D)" %}
15316   ins_encode %{
15317     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
15318             as_FloatRegister($src$$reg));
15319   %}
15320   ins_pipe(pipe_class_default);
15321 %}
15322 
15323 // --------------------------------- AND --------------------------------------
15324 
15325 instruct vand8B(vecD dst, vecD src1, vecD src2)
15326 %{
15327   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15328             n->as_Vector()->length_in_bytes() == 8);
15329   match(Set dst (AndV src1 src2));
15330   ins_cost(INSN_COST);
15331   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15332   ins_encode %{
15333     __ andr(as_FloatRegister($dst$$reg), __ T8B,
15334             as_FloatRegister($src1$$reg),
15335             as_FloatRegister($src2$$reg));
15336   %}
15337   ins_pipe(pipe_class_default);
15338 %}
15339 
15340 instruct vand16B(vecX dst, vecX src1, vecX src2)
15341 %{
15342   predicate(n->as_Vector()->length_in_bytes() == 16);
15343   match(Set dst (AndV src1 src2));
15344   ins_cost(INSN_COST);
15345   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
15346   ins_encode %{
15347     __ andr(as_FloatRegister($dst$$reg), __ T16B,
15348             as_FloatRegister($src1$$reg),
15349             as_FloatRegister($src2$$reg));
15350   %}
15351   ins_pipe(pipe_class_default);
15352 %}
15353 
15354 // --------------------------------- OR ---------------------------------------
15355 
15356 instruct vor8B(vecD dst, vecD src1, vecD src2)
15357 %{
15358   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15359             n->as_Vector()->length_in_bytes() == 8);
15360   match(Set dst (OrV src1 src2));
15361   ins_cost(INSN_COST);
15362   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15363   ins_encode %{
15364     __ orr(as_FloatRegister($dst$$reg), __ T8B,
15365             as_FloatRegister($src1$$reg),
15366             as_FloatRegister($src2$$reg));
15367   %}
15368   ins_pipe(pipe_class_default);
15369 %}
15370 
15371 instruct vor16B(vecX dst, vecX src1, vecX src2)
15372 %{
15373   predicate(n->as_Vector()->length_in_bytes() == 16);
15374   match(Set dst (OrV src1 src2));
15375   ins_cost(INSN_COST);
15376   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
15377   ins_encode %{
15378     __ orr(as_FloatRegister($dst$$reg), __ T16B,
15379             as_FloatRegister($src1$$reg),
15380             as_FloatRegister($src2$$reg));
15381   %}
15382   ins_pipe(pipe_class_default);
15383 %}
15384 
15385 // --------------------------------- XOR --------------------------------------
15386 
15387 instruct vxor8B(vecD dst, vecD src1, vecD src2)
15388 %{
15389   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15390             n->as_Vector()->length_in_bytes() == 8);
15391   match(Set dst (XorV src1 src2));
15392   ins_cost(INSN_COST);
15393   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
15394   ins_encode %{
15395     __ eor(as_FloatRegister($dst$$reg), __ T8B,
15396             as_FloatRegister($src1$$reg),
15397             as_FloatRegister($src2$$reg));
15398   %}
15399   ins_pipe(pipe_class_default);
15400 %}
15401 
15402 instruct vxor16B(vecX dst, vecX src1, vecX src2)
15403 %{
15404   predicate(n->as_Vector()->length_in_bytes() == 16);
15405   match(Set dst (XorV src1 src2));
15406   ins_cost(INSN_COST);
15407   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
15408   ins_encode %{
15409     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15410             as_FloatRegister($src1$$reg),
15411             as_FloatRegister($src2$$reg));
15412   %}
15413   ins_pipe(pipe_class_default);
15414 %}
15415 
15416 // ------------------------------ Shift ---------------------------------------
15417 
15418 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
15419   match(Set dst (LShiftCntV cnt));
15420   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
15421   ins_encode %{
15422     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15423   %}
15424   ins_pipe(pipe_class_default);
15425 %}
15426 
15427 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
15428 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
15429   match(Set dst (RShiftCntV cnt));
15430   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
15431   ins_encode %{
15432     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15433     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
15434   %}
15435   ins_pipe(pipe_class_default);
15436 %}
15437 
15438 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
15439   predicate(n->as_Vector()->length() == 4 ||
15440             n->as_Vector()->length() == 8);
15441   match(Set dst (LShiftVB src shift));
15442   match(Set dst (RShiftVB src shift));
15443   ins_cost(INSN_COST);
15444   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
15445   ins_encode %{
15446     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
15447             as_FloatRegister($src$$reg),
15448             as_FloatRegister($shift$$reg));
15449   %}
15450   ins_pipe(pipe_class_default);
15451 %}
15452 
15453 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
15454   predicate(n->as_Vector()->length() == 16);
15455   match(Set dst (LShiftVB src shift));
15456   match(Set dst (RShiftVB src shift));
15457   ins_cost(INSN_COST);
15458   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
15459   ins_encode %{
15460     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
15461             as_FloatRegister($src$$reg),
15462             as_FloatRegister($shift$$reg));
15463   %}
15464   ins_pipe(pipe_class_default);
15465 %}
15466 
15467 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
15468   predicate(n->as_Vector()->length() == 4 ||
15469             n->as_Vector()->length() == 8);
15470   match(Set dst (URShiftVB src shift));
15471   ins_cost(INSN_COST);
15472   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
15473   ins_encode %{
15474     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
15475             as_FloatRegister($src$$reg),
15476             as_FloatRegister($shift$$reg));
15477   %}
15478   ins_pipe(pipe_class_default);
15479 %}
15480 
15481 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
15482   predicate(n->as_Vector()->length() == 16);
15483   match(Set dst (URShiftVB src shift));
15484   ins_cost(INSN_COST);
15485   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
15486   ins_encode %{
15487     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
15488             as_FloatRegister($src$$reg),
15489             as_FloatRegister($shift$$reg));
15490   %}
15491   ins_pipe(pipe_class_default);
15492 %}
15493 
15494 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
15495   predicate(n->as_Vector()->length() == 4 ||
15496             n->as_Vector()->length() == 8);
15497   match(Set dst (LShiftVB src shift));
15498   ins_cost(INSN_COST);
15499   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
15500   ins_encode %{
15501     int sh = (int)$shift$$constant & 31;
15502     if (sh >= 8) {
15503       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15504              as_FloatRegister($src$$reg),
15505              as_FloatRegister($src$$reg));
15506     } else {
15507       __ shl(as_FloatRegister($dst$$reg), __ T8B,
15508              as_FloatRegister($src$$reg), sh);
15509     }
15510   %}
15511   ins_pipe(pipe_class_default);
15512 %}
15513 
15514 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
15515   predicate(n->as_Vector()->length() == 16);
15516   match(Set dst (LShiftVB src shift));
15517   ins_cost(INSN_COST);
15518   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15519   ins_encode %{
15520     int sh = (int)$shift$$constant & 31;
15521     if (sh >= 8) {
15522       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15523              as_FloatRegister($src$$reg),
15524              as_FloatRegister($src$$reg));
15525     } else {
15526       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15527              as_FloatRegister($src$$reg), sh);
15528     }
15529   %}
15530   ins_pipe(pipe_class_default);
15531 %}
15532 
15533 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15534   predicate(n->as_Vector()->length() == 4 ||
15535             n->as_Vector()->length() == 8);
15536   match(Set dst (RShiftVB src shift));
15537   ins_cost(INSN_COST);
15538   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15539   ins_encode %{
15540     int sh = (int)$shift$$constant & 31;
15541     if (sh >= 8) sh = 7;
15542     sh = -sh & 7;
15543     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15544            as_FloatRegister($src$$reg), sh);
15545   %}
15546   ins_pipe(pipe_class_default);
15547 %}
15548 
15549 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15550   predicate(n->as_Vector()->length() == 16);
15551   match(Set dst (RShiftVB src shift));
15552   ins_cost(INSN_COST);
15553   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15554   ins_encode %{
15555     int sh = (int)$shift$$constant & 31;
15556     if (sh >= 8) sh = 7;
15557     sh = -sh & 7;
15558     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15559            as_FloatRegister($src$$reg), sh);
15560   %}
15561   ins_pipe(pipe_class_default);
15562 %}
15563 
15564 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15565   predicate(n->as_Vector()->length() == 4 ||
15566             n->as_Vector()->length() == 8);
15567   match(Set dst (URShiftVB src shift));
15568   ins_cost(INSN_COST);
15569   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15570   ins_encode %{
15571     int sh = (int)$shift$$constant & 31;
15572     if (sh >= 8) {
15573       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15574              as_FloatRegister($src$$reg),
15575              as_FloatRegister($src$$reg));
15576     } else {
15577       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15578              as_FloatRegister($src$$reg), -sh & 7);
15579     }
15580   %}
15581   ins_pipe(pipe_class_default);
15582 %}
15583 
15584 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15585   predicate(n->as_Vector()->length() == 16);
15586   match(Set dst (URShiftVB src shift));
15587   ins_cost(INSN_COST);
15588   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15589   ins_encode %{
15590     int sh = (int)$shift$$constant & 31;
15591     if (sh >= 8) {
15592       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15593              as_FloatRegister($src$$reg),
15594              as_FloatRegister($src$$reg));
15595     } else {
15596       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15597              as_FloatRegister($src$$reg), -sh & 7);
15598     }
15599   %}
15600   ins_pipe(pipe_class_default);
15601 %}
15602 
15603 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
15604   predicate(n->as_Vector()->length() == 2 ||
15605             n->as_Vector()->length() == 4);
15606   match(Set dst (LShiftVS src shift));
15607   match(Set dst (RShiftVS src shift));
15608   ins_cost(INSN_COST);
15609   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15610   ins_encode %{
15611     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15612             as_FloatRegister($src$$reg),
15613             as_FloatRegister($shift$$reg));
15614   %}
15615   ins_pipe(pipe_class_default);
15616 %}
15617 
15618 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15619   predicate(n->as_Vector()->length() == 8);
15620   match(Set dst (LShiftVS src shift));
15621   match(Set dst (RShiftVS src shift));
15622   ins_cost(INSN_COST);
15623   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15624   ins_encode %{
15625     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15626             as_FloatRegister($src$$reg),
15627             as_FloatRegister($shift$$reg));
15628   %}
15629   ins_pipe(pipe_class_default);
15630 %}
15631 
15632 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15633   predicate(n->as_Vector()->length() == 2 ||
15634             n->as_Vector()->length() == 4);
15635   match(Set dst (URShiftVS src shift));
15636   ins_cost(INSN_COST);
15637   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15638   ins_encode %{
15639     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15640             as_FloatRegister($src$$reg),
15641             as_FloatRegister($shift$$reg));
15642   %}
15643   ins_pipe(pipe_class_default);
15644 %}
15645 
15646 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15647   predicate(n->as_Vector()->length() == 8);
15648   match(Set dst (URShiftVS src shift));
15649   ins_cost(INSN_COST);
15650   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15651   ins_encode %{
15652     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15653             as_FloatRegister($src$$reg),
15654             as_FloatRegister($shift$$reg));
15655   %}
15656   ins_pipe(pipe_class_default);
15657 %}
15658 
15659 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15660   predicate(n->as_Vector()->length() == 2 ||
15661             n->as_Vector()->length() == 4);
15662   match(Set dst (LShiftVS src shift));
15663   ins_cost(INSN_COST);
15664   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15665   ins_encode %{
15666     int sh = (int)$shift$$constant & 31;
15667     if (sh >= 16) {
15668       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15669              as_FloatRegister($src$$reg),
15670              as_FloatRegister($src$$reg));
15671     } else {
15672       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15673              as_FloatRegister($src$$reg), sh);
15674     }
15675   %}
15676   ins_pipe(pipe_class_default);
15677 %}
15678 
15679 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15680   predicate(n->as_Vector()->length() == 8);
15681   match(Set dst (LShiftVS src shift));
15682   ins_cost(INSN_COST);
15683   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15684   ins_encode %{
15685     int sh = (int)$shift$$constant & 31;
15686     if (sh >= 16) {
15687       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15688              as_FloatRegister($src$$reg),
15689              as_FloatRegister($src$$reg));
15690     } else {
15691       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15692              as_FloatRegister($src$$reg), sh);
15693     }
15694   %}
15695   ins_pipe(pipe_class_default);
15696 %}
15697 
15698 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15699   predicate(n->as_Vector()->length() == 2 ||
15700             n->as_Vector()->length() == 4);
15701   match(Set dst (RShiftVS src shift));
15702   ins_cost(INSN_COST);
15703   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15704   ins_encode %{
15705     int sh = (int)$shift$$constant & 31;
15706     if (sh >= 16) sh = 15;
15707     sh = -sh & 15;
15708     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15709            as_FloatRegister($src$$reg), sh);
15710   %}
15711   ins_pipe(pipe_class_default);
15712 %}
15713 
15714 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15715   predicate(n->as_Vector()->length() == 8);
15716   match(Set dst (RShiftVS src shift));
15717   ins_cost(INSN_COST);
15718   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15719   ins_encode %{
15720     int sh = (int)$shift$$constant & 31;
15721     if (sh >= 16) sh = 15;
15722     sh = -sh & 15;
15723     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15724            as_FloatRegister($src$$reg), sh);
15725   %}
15726   ins_pipe(pipe_class_default);
15727 %}
15728 
15729 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15730   predicate(n->as_Vector()->length() == 2 ||
15731             n->as_Vector()->length() == 4);
15732   match(Set dst (URShiftVS src shift));
15733   ins_cost(INSN_COST);
15734   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15735   ins_encode %{
15736     int sh = (int)$shift$$constant & 31;
15737     if (sh >= 16) {
15738       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15739              as_FloatRegister($src$$reg),
15740              as_FloatRegister($src$$reg));
15741     } else {
15742       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15743              as_FloatRegister($src$$reg), -sh & 15);
15744     }
15745   %}
15746   ins_pipe(pipe_class_default);
15747 %}
15748 
15749 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15750   predicate(n->as_Vector()->length() == 8);
15751   match(Set dst (URShiftVS src shift));
15752   ins_cost(INSN_COST);
15753   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15754   ins_encode %{
15755     int sh = (int)$shift$$constant & 31;
15756     if (sh >= 16) {
15757       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15758              as_FloatRegister($src$$reg),
15759              as_FloatRegister($src$$reg));
15760     } else {
15761       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15762              as_FloatRegister($src$$reg), -sh & 15);
15763     }
15764   %}
15765   ins_pipe(pipe_class_default);
15766 %}
15767 
15768 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15769   predicate(n->as_Vector()->length() == 2);
15770   match(Set dst (LShiftVI src shift));
15771   match(Set dst (RShiftVI src shift));
15772   ins_cost(INSN_COST);
15773   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15774   ins_encode %{
15775     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15776             as_FloatRegister($src$$reg),
15777             as_FloatRegister($shift$$reg));
15778   %}
15779   ins_pipe(pipe_class_default);
15780 %}
15781 
15782 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15783   predicate(n->as_Vector()->length() == 4);
15784   match(Set dst (LShiftVI src shift));
15785   match(Set dst (RShiftVI src shift));
15786   ins_cost(INSN_COST);
15787   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15788   ins_encode %{
15789     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15790             as_FloatRegister($src$$reg),
15791             as_FloatRegister($shift$$reg));
15792   %}
15793   ins_pipe(pipe_class_default);
15794 %}
15795 
15796 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15797   predicate(n->as_Vector()->length() == 2);
15798   match(Set dst (URShiftVI src shift));
15799   ins_cost(INSN_COST);
15800   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15801   ins_encode %{
15802     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15803             as_FloatRegister($src$$reg),
15804             as_FloatRegister($shift$$reg));
15805   %}
15806   ins_pipe(pipe_class_default);
15807 %}
15808 
15809 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15810   predicate(n->as_Vector()->length() == 4);
15811   match(Set dst (URShiftVI src shift));
15812   ins_cost(INSN_COST);
15813   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15814   ins_encode %{
15815     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15816             as_FloatRegister($src$$reg),
15817             as_FloatRegister($shift$$reg));
15818   %}
15819   ins_pipe(pipe_class_default);
15820 %}
15821 
15822 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15823   predicate(n->as_Vector()->length() == 2);
15824   match(Set dst (LShiftVI src shift));
15825   ins_cost(INSN_COST);
15826   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15827   ins_encode %{
15828     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15829            as_FloatRegister($src$$reg),
15830            (int)$shift$$constant & 31);
15831   %}
15832   ins_pipe(pipe_class_default);
15833 %}
15834 
15835 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15836   predicate(n->as_Vector()->length() == 4);
15837   match(Set dst (LShiftVI src shift));
15838   ins_cost(INSN_COST);
15839   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15840   ins_encode %{
15841     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15842            as_FloatRegister($src$$reg),
15843            (int)$shift$$constant & 31);
15844   %}
15845   ins_pipe(pipe_class_default);
15846 %}
15847 
15848 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15849   predicate(n->as_Vector()->length() == 2);
15850   match(Set dst (RShiftVI src shift));
15851   ins_cost(INSN_COST);
15852   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15853   ins_encode %{
15854     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15855             as_FloatRegister($src$$reg),
15856             -(int)$shift$$constant & 31);
15857   %}
15858   ins_pipe(pipe_class_default);
15859 %}
15860 
15861 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15862   predicate(n->as_Vector()->length() == 4);
15863   match(Set dst (RShiftVI src shift));
15864   ins_cost(INSN_COST);
15865   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15866   ins_encode %{
15867     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15868             as_FloatRegister($src$$reg),
15869             -(int)$shift$$constant & 31);
15870   %}
15871   ins_pipe(pipe_class_default);
15872 %}
15873 
15874 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15875   predicate(n->as_Vector()->length() == 2);
15876   match(Set dst (URShiftVI src shift));
15877   ins_cost(INSN_COST);
15878   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15879   ins_encode %{
15880     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15881             as_FloatRegister($src$$reg),
15882             -(int)$shift$$constant & 31);
15883   %}
15884   ins_pipe(pipe_class_default);
15885 %}
15886 
15887 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15888   predicate(n->as_Vector()->length() == 4);
15889   match(Set dst (URShiftVI src shift));
15890   ins_cost(INSN_COST);
15891   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15892   ins_encode %{
15893     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15894             as_FloatRegister($src$$reg),
15895             -(int)$shift$$constant & 31);
15896   %}
15897   ins_pipe(pipe_class_default);
15898 %}
15899 
15900 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15901   predicate(n->as_Vector()->length() == 2);
15902   match(Set dst (LShiftVL src shift));
15903   match(Set dst (RShiftVL src shift));
15904   ins_cost(INSN_COST);
15905   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15906   ins_encode %{
15907     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15908             as_FloatRegister($src$$reg),
15909             as_FloatRegister($shift$$reg));
15910   %}
15911   ins_pipe(pipe_class_default);
15912 %}
15913 
15914 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15915   predicate(n->as_Vector()->length() == 2);
15916   match(Set dst (URShiftVL src shift));
15917   ins_cost(INSN_COST);
15918   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15919   ins_encode %{
15920     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15921             as_FloatRegister($src$$reg),
15922             as_FloatRegister($shift$$reg));
15923   %}
15924   ins_pipe(pipe_class_default);
15925 %}
15926 
15927 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15928   predicate(n->as_Vector()->length() == 2);
15929   match(Set dst (LShiftVL src shift));
15930   ins_cost(INSN_COST);
15931   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15932   ins_encode %{
15933     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15934            as_FloatRegister($src$$reg),
15935            (int)$shift$$constant & 63);
15936   %}
15937   ins_pipe(pipe_class_default);
15938 %}
15939 
15940 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15941   predicate(n->as_Vector()->length() == 2);
15942   match(Set dst (RShiftVL src shift));
15943   ins_cost(INSN_COST);
15944   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15945   ins_encode %{
15946     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15947             as_FloatRegister($src$$reg),
15948             -(int)$shift$$constant & 63);
15949   %}
15950   ins_pipe(pipe_class_default);
15951 %}
15952 
15953 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15954   predicate(n->as_Vector()->length() == 2);
15955   match(Set dst (URShiftVL src shift));
15956   ins_cost(INSN_COST);
15957   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15958   ins_encode %{
15959     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15960             as_FloatRegister($src$$reg),
15961             -(int)$shift$$constant & 63);
15962   %}
15963   ins_pipe(pipe_class_default);
15964 %}
15965 
15966 //----------PEEPHOLE RULES-----------------------------------------------------
15967 // These must follow all instruction definitions as they use the names
15968 // defined in the instructions definitions.
15969 //
15970 // peepmatch ( root_instr_name [preceding_instruction]* );
15971 //
15972 // peepconstraint %{
15973 // (instruction_number.operand_name relational_op instruction_number.operand_name
15974 //  [, ...] );
15975 // // instruction numbers are zero-based using left to right order in peepmatch
15976 //
15977 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15978 // // provide an instruction_number.operand_name for each operand that appears
15979 // // in the replacement instruction's match rule
15980 //
15981 // ---------VM FLAGS---------------------------------------------------------
15982 //
15983 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15984 //
15985 // Each peephole rule is given an identifying number starting with zero and
15986 // increasing by one in the order seen by the parser.  An individual peephole
15987 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15988 // on the command-line.
15989 //
15990 // ---------CURRENT LIMITATIONS----------------------------------------------
15991 //
15992 // Only match adjacent instructions in same basic block
15993 // Only equality constraints
15994 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15995 // Only one replacement instruction
15996 //
15997 // ---------EXAMPLE----------------------------------------------------------
15998 //
15999 // // pertinent parts of existing instructions in architecture description
16000 // instruct movI(iRegINoSp dst, iRegI src)
16001 // %{
16002 //   match(Set dst (CopyI src));
16003 // %}
16004 //
16005 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16006 // %{
16007 //   match(Set dst (AddI dst src));
16008 //   effect(KILL cr);
16009 // %}
16010 //
16011 // // Change (inc mov) to lea
16012 // peephole %{
16013 //   // increment preceeded by register-register move
16014 //   peepmatch ( incI_iReg movI );
16015 //   // require that the destination register of the increment
16016 //   // match the destination register of the move
16017 //   peepconstraint ( 0.dst == 1.dst );
16018 //   // construct a replacement instruction that sets
16019 //   // the destination to ( move's source register + one )
16020 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16021 // %}
16022 //
16023 
16024 // Implementation no longer uses movX instructions since
16025 // machine-independent system no longer uses CopyX nodes.
16026 //
16027 // peephole
16028 // %{
16029 //   peepmatch (incI_iReg movI);
16030 //   peepconstraint (0.dst == 1.dst);
16031 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16032 // %}
16033 
16034 // peephole
16035 // %{
16036 //   peepmatch (decI_iReg movI);
16037 //   peepconstraint (0.dst == 1.dst);
16038 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16039 // %}
16040 
16041 // peephole
16042 // %{
16043 //   peepmatch (addI_iReg_imm movI);
16044 //   peepconstraint (0.dst == 1.dst);
16045 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16046 // %}
16047 
16048 // peephole
16049 // %{
16050 //   peepmatch (incL_iReg movL);
16051 //   peepconstraint (0.dst == 1.dst);
16052 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16053 // %}
16054 
16055 // peephole
16056 // %{
16057 //   peepmatch (decL_iReg movL);
16058 //   peepconstraint (0.dst == 1.dst);
16059 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16060 // %}
16061 
16062 // peephole
16063 // %{
16064 //   peepmatch (addL_iReg_imm movL);
16065 //   peepconstraint (0.dst == 1.dst);
16066 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16067 // %}
16068 
16069 // peephole
16070 // %{
16071 //   peepmatch (addP_iReg_imm movP);
16072 //   peepconstraint (0.dst == 1.dst);
16073 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16074 // %}
16075 
16076 // // Change load of spilled value to only a spill
16077 // instruct storeI(memory mem, iRegI src)
16078 // %{
16079 //   match(Set mem (StoreI mem src));
16080 // %}
16081 //
16082 // instruct loadI(iRegINoSp dst, memory mem)
16083 // %{
16084 //   match(Set dst (LoadI mem));
16085 // %}
16086 //
16087 
16088 //----------SMARTSPILL RULES---------------------------------------------------
16089 // These must follow all instruction definitions as they use the names
16090 // defined in the instructions definitions.
16091 
16092 // Local Variables:
16093 // mode: c++
16094 // End: