1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   // 
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   // 
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   // 
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
1289       return NULL;
1290 
1291     membar = ctl->lookup(0);
1292 
1293     if (!membar || !membar->is_MemBar())
1294       return NULL;
1295 
1296     if (mem->lookup(0) != membar)
1297       return NULL;
1298 
1299     return membar->as_MemBar();
1300   }
1301 
1302   // if n is linked to a child MemBarNode by intervening Control and
1303   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1304 
1305   MemBarNode *child_membar(const MemBarNode *n)
1306   {
1307     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1308     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1309 
1310     // MemBar needs to have both a Ctl and Mem projection
1311     if (! ctl || ! mem)
1312       return NULL;
1313 
1314     MemBarNode *child = NULL;
1315     Node *x;
1316 
1317     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1318       x = ctl->fast_out(i);
1319       // if we see a membar we keep hold of it. we may also see a new
1320       // arena copy of the original but it will appear later
1321       if (x->is_MemBar()) {
1322           child = x->as_MemBar();
1323           break;
1324       }
1325     }
1326 
1327     if (child == NULL)
1328       return NULL;
1329 
1330     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1331       x = mem->fast_out(i);
1332       // if we see a membar we keep hold of it. we may also see a new
1333       // arena copy of the original but it will appear later
1334       if (x == child) {
1335         return child;
1336       }
1337     }
1338     return NULL;
1339   }
1340 
1341   // helper predicate use to filter candidates for a leading memory
1342   // barrier
1343   //
1344   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1345   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1346 
1347   bool leading_membar(const MemBarNode *barrier)
1348   {
1349     int opcode = barrier->Opcode();
1350     // if this is a release membar we are ok
1351     if (opcode == Op_MemBarRelease)
1352       return true;
1353     // if its a cpuorder membar . . .
1354     if (opcode != Op_MemBarCPUOrder)
1355       return false;
1356     // then the parent has to be a release membar
1357     MemBarNode *parent = parent_membar(barrier);
1358     if (!parent)
1359       return false;
1360     opcode = parent->Opcode();
1361     return opcode == Op_MemBarRelease;
1362   }
1363  
1364   // 2) card mark detection helper
1365 
1366   // helper predicate which can be used to detect a volatile membar
1367   // introduced as part of a conditional card mark sequence either by
1368   // G1 or by CMS when UseCondCardMark is true.
1369   //
1370   // membar can be definitively determined to be part of a card mark
1371   // sequence if and only if all the following hold
1372   //
1373   // i) it is a MemBarVolatile
1374   //
1375   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1376   // true
1377   //
1378   // iii) the node's Mem projection feeds a StoreCM node.
1379   
1380   bool is_card_mark_membar(const MemBarNode *barrier)
1381   {
1382     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark))
1383       return false;
1384 
1385     if (barrier->Opcode() != Op_MemBarVolatile)
1386       return false;
1387 
1388     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1389 
1390     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1391       Node *y = mem->fast_out(i);
1392       if (y->Opcode() == Op_StoreCM) {
1393         return true;
1394       }
1395     }
1396   
1397     return false;
1398   }
1399 
1400 
1401   // 3) helper predicates to traverse volatile put or CAS graphs which
1402   // may contain GC barrier subgraphs
1403 
1404   // Preamble
1405   // --------
1406   //
1407   // for volatile writes we can omit generating barriers and employ a
1408   // releasing store when we see a node sequence sequence with a
1409   // leading MemBarRelease and a trailing MemBarVolatile as follows
1410   //
1411   //   MemBarRelease
1412   //  {      ||      } -- optional
1413   //  {MemBarCPUOrder}
1414   //         ||     \\
1415   //         ||     StoreX[mo_release]
1416   //         | \     /
1417   //         | MergeMem
1418   //         | /
1419   //   MemBarVolatile
1420   //
1421   // where
1422   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1423   //  | \ and / indicate further routing of the Ctl and Mem feeds
1424   // 
1425   // this is the graph we see for non-object stores. however, for a
1426   // volatile Object store (StoreN/P) we may see other nodes below the
1427   // leading membar because of the need for a GC pre- or post-write
1428   // barrier.
1429   //
1430   // with most GC configurations we with see this simple variant which
1431   // includes a post-write barrier card mark.
1432   //
1433   //   MemBarRelease______________________________
1434   //         ||    \\               Ctl \        \\
1435   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1436   //         | \     /                       . . .  /
1437   //         | MergeMem
1438   //         | /
1439   //         ||      /
1440   //   MemBarVolatile
1441   //
1442   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1443   // the object address to an int used to compute the card offset) and
1444   // Ctl+Mem to a StoreB node (which does the actual card mark).
1445   //
1446   // n.b. a StoreCM node will only appear in this configuration when
1447   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1448   // because it implies a requirement to order visibility of the card
1449   // mark (StoreCM) relative to the object put (StoreP/N) using a
1450   // StoreStore memory barrier (arguably this ought to be represented
1451   // explicitly in the ideal graph but that is not how it works). This
1452   // ordering is required for both non-volatile and volatile
1453   // puts. Normally that means we need to translate a StoreCM using
1454   // the sequence
1455   //
1456   //   dmb ishst
1457   //   stlrb
1458   //
1459   // However, in the case of a volatile put if we can recognise this
1460   // configuration and plant an stlr for the object write then we can
1461   // omit the dmb and just plant an strb since visibility of the stlr
1462   // is ordered before visibility of subsequent stores. StoreCM nodes
1463   // also arise when using G1 or using CMS with conditional card
1464   // marking. In these cases (as we shall see) we don't need to insert
1465   // the dmb when translating StoreCM because there is already an
1466   // intervening StoreLoad barrier between it and the StoreP/N.
1467   //
1468   // It is also possible to perform the card mark conditionally on it
1469   // currently being unmarked in which case the volatile put graph
1470   // will look slightly different
1471   //
1472   //   MemBarRelease____________________________________________
1473   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1474   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1475   //         | \     /                              \            |
1476   //         | MergeMem                            . . .      StoreB
1477   //         | /                                                /
1478   //         ||     /
1479   //   MemBarVolatile
1480   //
1481   // It is worth noting at this stage that both the above
1482   // configurations can be uniquely identified by checking that the
1483   // memory flow includes the following subgraph:
1484   //
1485   //   MemBarRelease
1486   //  {MemBarCPUOrder}
1487   //          |  \      . . .
1488   //          |  StoreX[mo_release]  . . .
1489   //          |   /
1490   //         MergeMem
1491   //          |
1492   //   MemBarVolatile
1493   //
1494   // This is referred to as a *normal* subgraph. It can easily be
1495   // detected starting from any candidate MemBarRelease,
1496   // StoreX[mo_release] or MemBarVolatile.
1497   //
1498   // A simple variation on this normal case occurs for an unsafe CAS
1499   // operation. The basic graph for a non-object CAS is
1500   //
1501   //   MemBarRelease
1502   //         ||
1503   //   MemBarCPUOrder
1504   //         ||     \\   . . .
1505   //         ||     CompareAndSwapX
1506   //         ||       |
1507   //         ||     SCMemProj
1508   //         | \     /
1509   //         | MergeMem
1510   //         | /
1511   //   MemBarCPUOrder
1512   //         ||
1513   //   MemBarAcquire
1514   //
1515   // The same basic variations on this arrangement (mutatis mutandis)
1516   // occur when a card mark is introduced. i.e. we se the same basic
1517   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1518   // tail of the graph is a pair comprising a MemBarCPUOrder +
1519   // MemBarAcquire.
1520   //
1521   // So, in the case of a CAS the normal graph has the variant form
1522   //
1523   //   MemBarRelease
1524   //   MemBarCPUOrder
1525   //          |   \      . . .
1526   //          |  CompareAndSwapX  . . .
1527   //          |    |
1528   //          |   SCMemProj
1529   //          |   /  . . .
1530   //         MergeMem
1531   //          |
1532   //   MemBarCPUOrder
1533   //   MemBarAcquire
1534   //
1535   // This graph can also easily be detected starting from any
1536   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1537   //
1538   // the code below uses two helper predicates, leading_to_normal and
1539   // normal_to_leading to identify these normal graphs, one validating
1540   // the layout starting from the top membar and searching down and
1541   // the other validating the layout starting from the lower membar
1542   // and searching up.
1543   //
1544   // There are two special case GC configurations when a normal graph
1545   // may not be generated: when using G1 (which always employs a
1546   // conditional card mark); and when using CMS with conditional card
1547   // marking configured. These GCs are both concurrent rather than
1548   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1549   // graph between the leading and trailing membar nodes, in
1550   // particular enforcing stronger memory serialisation beween the
1551   // object put and the corresponding conditional card mark. CMS
1552   // employs a post-write GC barrier while G1 employs both a pre- and
1553   // post-write GC barrier. Of course the extra nodes may be absent --
1554   // they are only inserted for object puts. This significantly
1555   // complicates the task of identifying whether a MemBarRelease,
1556   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1557   // when using these GC configurations (see below). It adds similar
1558   // complexity to the task of identifying whether a MemBarRelease,
1559   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1560   //
1561   // In both cases the post-write subtree includes an auxiliary
1562   // MemBarVolatile (StoreLoad barrier) separating the object put and
1563   // the read of the corresponding card. This poses two additional
1564   // problems.
1565   //
1566   // Firstly, a card mark MemBarVolatile needs to be distinguished
1567   // from a normal trailing MemBarVolatile. Resolving this first
1568   // problem is straightforward: a card mark MemBarVolatile always
1569   // projects a Mem feed to a StoreCM node and that is a unique marker
1570   //
1571   //      MemBarVolatile (card mark)
1572   //       C |    \     . . .
1573   //         |   StoreCM   . . .
1574   //       . . .
1575   //
1576   // The second problem is how the code generator is to translate the
1577   // card mark barrier? It always needs to be translated to a "dmb
1578   // ish" instruction whether or not it occurs as part of a volatile
1579   // put. A StoreLoad barrier is needed after the object put to ensure
1580   // i) visibility to GC threads of the object put and ii) visibility
1581   // to the mutator thread of any card clearing write by a GC
1582   // thread. Clearly a normal store (str) will not guarantee this
1583   // ordering but neither will a releasing store (stlr). The latter
1584   // guarantees that the object put is visible but does not guarantee
1585   // that writes by other threads have also been observed.
1586   // 
1587   // So, returning to the task of translating the object put and the
1588   // leading/trailing membar nodes: what do the non-normal node graph
1589   // look like for these 2 special cases? and how can we determine the
1590   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1591   // in both normal and non-normal cases?
1592   //
1593   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1594   // which selects conditonal execution based on the value loaded
1595   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1596   // intervening StoreLoad barrier (MemBarVolatile).
1597   //
1598   // So, with CMS we may see a node graph for a volatile object store
1599   // which looks like this
1600   //
1601   //   MemBarRelease
1602   //   MemBarCPUOrder_(leading)__________________
1603   //     C |    M \       \\                   C \
1604   //       |       \    StoreN/P[mo_release]  CastP2X
1605   //       |    Bot \    /
1606   //       |       MergeMem
1607   //       |         /
1608   //      MemBarVolatile (card mark)
1609   //     C |  ||    M |
1610   //       | LoadB    |
1611   //       |   |      |
1612   //       | Cmp      |\
1613   //       | /        | \
1614   //       If         |  \
1615   //       | \        |   \
1616   // IfFalse  IfTrue  |    \
1617   //       \     / \  |     \
1618   //        \   / StoreCM    |
1619   //         \ /      |      |
1620   //        Region   . . .   |
1621   //          | \           /
1622   //          |  . . .  \  / Bot
1623   //          |       MergeMem
1624   //          |          |
1625   //        MemBarVolatile (trailing)
1626   //
1627   // The first MergeMem merges the AliasIdxBot Mem slice from the
1628   // leading membar and the oopptr Mem slice from the Store into the
1629   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1630   // Mem slice from the card mark membar and the AliasIdxRaw slice
1631   // from the StoreCM into the trailing membar (n.b. the latter
1632   // proceeds via a Phi associated with the If region).
1633   //
1634   // The graph for a CAS varies slightly, the obvious difference being
1635   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1636   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1637   // MemBarAcquire pair. The other important difference is that the
1638   // CompareAndSwap node's SCMemProj is not merged into the card mark
1639   // membar - it still feeds the trailing MergeMem. This also means
1640   // that the card mark membar receives its Mem feed directly from the
1641   // leading membar rather than via a MergeMem.
1642   //
1643   //   MemBarRelease
1644   //   MemBarCPUOrder__(leading)_________________________
1645   //       ||                       \\                 C \
1646   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1647   //     C |  ||    M |              |
1648   //       | LoadB    |       ______/|
1649   //       |   |      |      /       |
1650   //       | Cmp      |     /      SCMemProj
1651   //       | /        |    /         |
1652   //       If         |   /         /
1653   //       | \        |  /         /
1654   // IfFalse  IfTrue  | /         /
1655   //       \     / \  |/ prec    /
1656   //        \   / StoreCM       /
1657   //         \ /      |        /
1658   //        Region   . . .    /
1659   //          | \            /
1660   //          |  . . .  \   / Bot
1661   //          |       MergeMem
1662   //          |          |
1663   //        MemBarCPUOrder
1664   //        MemBarAcquire (trailing)
1665   //
1666   // This has a slightly different memory subgraph to the one seen
1667   // previously but the core of it is the same as for the CAS normal
1668   // sungraph
1669   //
1670   //   MemBarRelease
1671   //   MemBarCPUOrder____
1672   //      ||             \      . . .
1673   //   MemBarVolatile  CompareAndSwapX  . . .
1674   //      |  \            |
1675   //        . . .   SCMemProj
1676   //          |     /  . . .
1677   //         MergeMem
1678   //          |
1679   //   MemBarCPUOrder
1680   //   MemBarAcquire
1681   //
1682   //
1683   // G1 is quite a lot more complicated. The nodes inserted on behalf
1684   // of G1 may comprise: a pre-write graph which adds the old value to
1685   // the SATB queue; the releasing store itself; and, finally, a
1686   // post-write graph which performs a card mark.
1687   //
1688   // The pre-write graph may be omitted, but only when the put is
1689   // writing to a newly allocated (young gen) object and then only if
1690   // there is a direct memory chain to the Initialize node for the
1691   // object allocation. This will not happen for a volatile put since
1692   // any memory chain passes through the leading membar.
1693   //
1694   // The pre-write graph includes a series of 3 If tests. The outermost
1695   // If tests whether SATB is enabled (no else case). The next If tests
1696   // whether the old value is non-NULL (no else case). The third tests
1697   // whether the SATB queue index is > 0, if so updating the queue. The
1698   // else case for this third If calls out to the runtime to allocate a
1699   // new queue buffer.
1700   //
1701   // So with G1 the pre-write and releasing store subgraph looks like
1702   // this (the nested Ifs are omitted).
1703   //
1704   //  MemBarRelease (leading)____________
1705   //     C |  ||  M \   M \    M \  M \ . . .
1706   //       | LoadB   \  LoadL  LoadN   \
1707   //       | /        \                 \
1708   //       If         |\                 \
1709   //       | \        | \                 \
1710   //  IfFalse  IfTrue |  \                 \
1711   //       |     |    |   \                 |
1712   //       |     If   |   /\                |
1713   //       |     |          \               |
1714   //       |                 \              |
1715   //       |    . . .         \             |
1716   //       | /       | /       |            |
1717   //      Region  Phi[M]       |            |
1718   //       | \       |         |            |
1719   //       |  \_____ | ___     |            |
1720   //     C | C \     |   C \ M |            |
1721   //       | CastP2X | StoreN/P[mo_release] |
1722   //       |         |         |            |
1723   //     C |       M |       M |          M |
1724   //        \        |         |           /
1725   //                  . . . 
1726   //          (post write subtree elided)
1727   //                    . . .
1728   //             C \         M /
1729   //         MemBarVolatile (trailing)
1730   //
1731   // n.b. the LoadB in this subgraph is not the card read -- it's a
1732   // read of the SATB queue active flag.
1733   //
1734   // Once again the CAS graph is a minor variant on the above with the
1735   // expected substitutions of CompareAndSawpX for StoreN/P and
1736   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1737   //
1738   // The G1 post-write subtree is also optional, this time when the
1739   // new value being written is either null or can be identified as a
1740   // newly allocated (young gen) object with no intervening control
1741   // flow. The latter cannot happen but the former may, in which case
1742   // the card mark membar is omitted and the memory feeds form the
1743   // leading membar and the SToreN/P are merged direct into the
1744   // trailing membar as per the normal subgraph. So, the only special
1745   // case which arises is when the post-write subgraph is generated.
1746   //
1747   // The kernel of the post-write G1 subgraph is the card mark itself
1748   // which includes a card mark memory barrier (MemBarVolatile), a
1749   // card test (LoadB), and a conditional update (If feeding a
1750   // StoreCM). These nodes are surrounded by a series of nested Ifs
1751   // which try to avoid doing the card mark. The top level If skips if
1752   // the object reference does not cross regions (i.e. it tests if
1753   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1754   // need not be recorded. The next If, which skips on a NULL value,
1755   // may be absent (it is not generated if the type of value is >=
1756   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1757   // checking if card_val != young).  n.b. although this test requires
1758   // a pre-read of the card it can safely be done before the StoreLoad
1759   // barrier. However that does not bypass the need to reread the card
1760   // after the barrier.
1761   //
1762   //                (pre-write subtree elided)
1763   //        . . .                  . . .    . . .  . . .
1764   //        C |                    M |     M |    M |
1765   //       Region                  Phi[M] StoreN    |
1766   //          |                     / \      |      |
1767   //         / \_______            /   \     |      |
1768   //      C / C \      . . .            \    |      |
1769   //       If   CastP2X . . .            |   |      |
1770   //       / \                           |   |      |
1771   //      /   \                          |   |      |
1772   // IfFalse IfTrue                      |   |      |
1773   //   |       |                         |   |     /|
1774   //   |       If                        |   |    / |
1775   //   |      / \                        |   |   /  |
1776   //   |     /   \                        \  |  /   |
1777   //   | IfFalse IfTrue                   MergeMem  |
1778   //   |  . . .    / \                       /      |
1779   //   |          /   \                     /       |
1780   //   |     IfFalse IfTrue                /        |
1781   //   |      . . .    |                  /         |
1782   //   |               If                /          |
1783   //   |               / \              /           |
1784   //   |              /   \            /            |
1785   //   |         IfFalse IfTrue       /             |
1786   //   |           . . .   |         /              |
1787   //   |                    \       /               |
1788   //   |                     \     /                |
1789   //   |             MemBarVolatile__(card mark)    |
1790   //   |                ||   C |  M \  M \          |
1791   //   |               LoadB   If    |    |         |
1792   //   |                      / \    |    |         |
1793   //   |                     . . .   |    |         |
1794   //   |                          \  |    |        /
1795   //   |                        StoreCM   |       /
1796   //   |                          . . .   |      /
1797   //   |                        _________/      /
1798   //   |                       /  _____________/
1799   //   |   . . .       . . .  |  /            /
1800   //   |    |                 | /   _________/
1801   //   |    |               Phi[M] /        /
1802   //   |    |                 |   /        /
1803   //   |    |                 |  /        /
1804   //   |  Region  . . .     Phi[M]  _____/
1805   //   |    /                 |    /
1806   //   |                      |   /   
1807   //   | . . .   . . .        |  /
1808   //   | /                    | /
1809   // Region           |  |  Phi[M]
1810   //   |              |  |  / Bot
1811   //    \            MergeMem 
1812   //     \            /
1813   //     MemBarVolatile
1814   //
1815   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1816   // from the leading membar and the oopptr Mem slice from the Store
1817   // into the card mark membar i.e. the memory flow to the card mark
1818   // membar still looks like a normal graph.
1819   //
1820   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1821   // Mem slices (from the StoreCM and other card mark queue stores).
1822   // However in this case the AliasIdxBot Mem slice does not come
1823   // direct from the card mark membar. It is merged through a series
1824   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1825   // from the leading membar with the Mem feed from the card mark
1826   // membar. Each Phi corresponds to one of the Ifs which may skip
1827   // around the card mark membar. So when the If implementing the NULL
1828   // value check has been elided the total number of Phis is 2
1829   // otherwise it is 3.
1830   //
1831   // The CAS graph when using G1GC also includes a pre-write subgraph
1832   // and an optional post-write subgraph. Teh sam evarioations are
1833   // introduced as for CMS with conditional card marking i.e. the
1834   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1835   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1836   // Mem feed from the CompareAndSwapP/N includes a precedence
1837   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1838   // trailing membar. So, as before the configuration includes the
1839   // normal CAS graph as a subgraph of the memory flow.
1840   //
1841   // So, the upshot is that in all cases the volatile put graph will
1842   // include a *normal* memory subgraph betwen the leading membar and
1843   // its child membar, either a volatile put graph (including a
1844   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1845   // When that child is not a card mark membar then it marks the end
1846   // of the volatile put or CAS subgraph. If the child is a card mark
1847   // membar then the normal subgraph will form part of a volatile put
1848   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1849   // to a trailing barrier via a MergeMem. That feed is either direct
1850   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1851   // memory flow (for G1).
1852   // 
1853   // The predicates controlling generation of instructions for store
1854   // and barrier nodes employ a few simple helper functions (described
1855   // below) which identify the presence or absence of all these
1856   // subgraph configurations and provide a means of traversing from
1857   // one node in the subgraph to another.
1858 
1859   // is_CAS(int opcode)
1860   //
1861   // return true if opcode is one of the possible CompareAndSwapX
1862   // values otherwise false.
1863 
1864   bool is_CAS(int opcode)
1865   {
1866     return (opcode == Op_CompareAndSwapI ||
1867             opcode == Op_CompareAndSwapL ||
1868             opcode == Op_CompareAndSwapN ||
1869             opcode == Op_CompareAndSwapP);
1870   }
1871 
1872   // leading_to_normal
1873   //
1874   //graph traversal helper which detects the normal case Mem feed from
1875   // a release membar (or, optionally, its cpuorder child) to a
1876   // dependent volatile membar i.e. it ensures that one or other of
1877   // the following Mem flow subgraph is present.
1878   //
1879   //   MemBarRelease
1880   //   MemBarCPUOrder {leading}
1881   //          |  \      . . .
1882   //          |  StoreN/P[mo_release]  . . .
1883   //          |   /
1884   //         MergeMem
1885   //          |
1886   //   MemBarVolatile {trailing or card mark}
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //      |       \      . . .
1891   //      |     CompareAndSwapX  . . .
1892   //               |
1893   //     . . .    SCMemProj
1894   //           \   |
1895   //      |    MergeMem
1896   //      |       /
1897   //    MemBarCPUOrder
1898   //    MemBarAcquire {trailing}
1899   //
1900   // if the correct configuration is present returns the trailing
1901   // membar otherwise NULL.
1902   //
1903   // the input membar is expected to be either a cpuorder membar or a
1904   // release membar. in the latter case it should not have a cpu membar
1905   // child.
1906   //
1907   // the returned value may be a card mark or trailing membar
1908   //
1909 
1910   MemBarNode *leading_to_normal(MemBarNode *leading)
1911   {
1912     assert((leading->Opcode() == Op_MemBarRelease ||
1913             leading->Opcode() == Op_MemBarCPUOrder),
1914            "expecting a volatile or cpuroder membar!");
1915 
1916     // check the mem flow
1917     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1918 
1919     if (!mem)
1920       return NULL;
1921 
1922     Node *x = NULL;
1923     StoreNode * st = NULL;
1924     LoadStoreNode *cas = NULL;
1925     MergeMemNode *mm = NULL;
1926 
1927     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1928       x = mem->fast_out(i);
1929       if (x->is_MergeMem()) {
1930         if (mm != NULL)
1931           return NULL;
1932         // two merge mems is one too many
1933         mm = x->as_MergeMem();
1934       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1935         // two releasing stores/CAS nodes is one too many
1936         if (st != NULL || cas != NULL)
1937           return NULL;
1938         st = x->as_Store();
1939       } else if (is_CAS(x->Opcode())) {
1940         if (st != NULL || cas != NULL)
1941           return NULL;
1942         cas = x->as_LoadStore();
1943       }
1944     }
1945 
1946     // must have a store or a cas
1947     if (!st && !cas)
1948       return NULL;
1949 
1950     // must have a merge if we also have st
1951     if (st && !mm)
1952       return NULL;
1953 
1954     Node *y = NULL;
1955     if (cas) {
1956       // look for an SCMemProj
1957       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1958         x = cas->fast_out(i);
1959         if (x->is_Proj()) {
1960           y = x;
1961           break;
1962         }
1963       }
1964       if (y == NULL)
1965         return NULL;
1966       // the proj must feed a MergeMem
1967       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1968         x = y->fast_out(i);
1969         if (x->is_MergeMem()) {
1970           mm = x->as_MergeMem();
1971           break;
1972         }
1973       }
1974       if (mm == NULL)
1975         return NULL;
1976     } else {
1977       // ensure the store feeds the existing mergemem;
1978       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1979         if (st->fast_out(i) == mm) {
1980           y = st;
1981           break;
1982         }
1983       }
1984       if (y == NULL)
1985         return NULL;
1986     }
1987 
1988     MemBarNode *mbar = NULL;
1989     // ensure the merge feeds to the expected type of membar
1990     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1991       x = mm->fast_out(i);
1992       if (x->is_MemBar()) {
1993         int opcode = x->Opcode();
1994         if (opcode == Op_MemBarVolatile && st) {
1995           mbar = x->as_MemBar();
1996         } else if (cas && opcode == Op_MemBarCPUOrder) {
1997           MemBarNode *y =  x->as_MemBar();
1998           y = child_membar(y);
1999           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2000             mbar = y;
2001           }
2002         }
2003         break;
2004       }
2005     }
2006 
2007     return mbar;
2008   }
2009 
2010   // normal_to_leading
2011   //
2012   // graph traversal helper which detects the normal case Mem feed
2013   // from either a card mark or a trailing membar to a preceding
2014   // release membar (optionally its cpuorder child) i.e. it ensures
2015   // that one or other of the following Mem flow subgraphs is present.
2016   //
2017   //   MemBarRelease
2018   //   MemBarCPUOrder {leading}
2019   //          |  \      . . .
2020   //          |  StoreN/P[mo_release]  . . .
2021   //          |   /
2022   //         MergeMem
2023   //          |
2024   //   MemBarVolatile {card mark or trailing}
2025   //
2026   //   MemBarRelease
2027   //   MemBarCPUOrder {leading}
2028   //      |       \      . . .
2029   //      |     CompareAndSwapX  . . .
2030   //               |
2031   //     . . .    SCMemProj
2032   //           \   |
2033   //      |    MergeMem
2034   //      |        /
2035   //    MemBarCPUOrder
2036   //    MemBarAcquire {trailing}
2037   //
2038   // this predicate checks for the same flow as the previous predicate
2039   // but starting from the bottom rather than the top.
2040   //
2041   // if the configuration is present returns the cpuorder member for
2042   // preference or when absent the release membar otherwise NULL.
2043   //
2044   // n.b. the input membar is expected to be a MemBarVolatile but
2045   // need not be a card mark membar.
2046 
2047   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2048   {
2049     // input must be a volatile membar
2050     assert((barrier->Opcode() == Op_MemBarVolatile ||
2051             barrier->Opcode() == Op_MemBarAcquire),
2052            "expecting a volatile or an acquire membar");
2053     Node *x;
2054     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2055 
2056     // if we have an acquire membar then it must be fed via a CPUOrder
2057     // membar
2058 
2059     if (is_cas) {
2060       // skip to parent barrier which must be a cpuorder
2061       x = parent_membar(barrier);
2062       if (x->Opcode() != Op_MemBarCPUOrder)
2063         return NULL;
2064     } else {
2065       // start from the supplied barrier
2066       x = (Node *)barrier;
2067     }
2068 
2069     // the Mem feed to the membar should be a merge
2070     x = x ->in(TypeFunc::Memory);
2071     if (!x->is_MergeMem())
2072       return NULL;
2073 
2074     MergeMemNode *mm = x->as_MergeMem();
2075 
2076     if (is_cas) {
2077       // the merge should be fed from the CAS via an SCMemProj node
2078       x = NULL;
2079       for (uint idx = 1; idx < mm->req(); idx++) {
2080         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2081           x = mm->in(idx);
2082           break;
2083         }
2084       }
2085       if (x == NULL)
2086         return NULL;
2087       // check for a CAS feeding this proj
2088       x = x->in(0);
2089       int opcode = x->Opcode();
2090       if (!is_CAS(opcode))
2091         return NULL;
2092       // the CAS should get its mem feed from the leading membar
2093       x = x->in(MemNode::Memory);
2094     } else {
2095       // the merge should get its Bottom mem feed from the leading membar
2096       x = mm->in(Compile::AliasIdxBot);      
2097     } 
2098 
2099     // ensure this is a non control projection
2100     if (!x->is_Proj() || x->is_CFG())
2101       return NULL;
2102     // if it is fed by a membar that's the one we want
2103     x = x->in(0);
2104 
2105     if (!x->is_MemBar())
2106       return NULL;
2107 
2108     MemBarNode *leading = x->as_MemBar();
2109     // reject invalid candidates
2110     if (!leading_membar(leading))
2111       return NULL;
2112 
2113     // ok, we have a leading membar, now for the sanity clauses
2114 
2115     // the leading membar must feed Mem to a releasing store or CAS
2116     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2117     StoreNode *st = NULL;
2118     LoadStoreNode *cas = NULL;
2119     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2120       x = mem->fast_out(i);
2121       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2122         // two stores or CASes is one too many
2123         if (st != NULL || cas != NULL)
2124           return NULL;
2125         st = x->as_Store();
2126       } else if (is_CAS(x->Opcode())) {
2127         if (st != NULL || cas != NULL)
2128           return NULL;
2129         cas = x->as_LoadStore();
2130       }
2131     }
2132 
2133     // we should not have both a store and a cas
2134     if (st == NULL & cas == NULL)
2135       return NULL;
2136 
2137     if (st == NULL) {
2138       // nothing more to check
2139       return leading;
2140     } else {
2141       // we should not have a store if we started from an acquire
2142       if (is_cas)
2143         return NULL;
2144 
2145       // the store should feed the merge we used to get here
2146       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2147         if (st->fast_out(i) == mm)
2148           return leading;
2149       }
2150     }
2151 
2152     return NULL;
2153   }
2154 
2155   // card_mark_to_trailing
2156   //
2157   // graph traversal helper which detects extra, non-normal Mem feed
2158   // from a card mark volatile membar to a trailing membar i.e. it
2159   // ensures that one of the following three GC post-write Mem flow
2160   // subgraphs is present.
2161   //
2162   // 1)
2163   //     . . .
2164   //       |
2165   //   MemBarVolatile (card mark)
2166   //      |          |     
2167   //      |        StoreCM
2168   //      |          |
2169   //      |        . . .
2170   //  Bot |  / 
2171   //   MergeMem 
2172   //      |
2173   //      |
2174   //    MemBarVolatile {trailing}
2175   //
2176   // 2)
2177   //   MemBarRelease/CPUOrder (leading)
2178   //    |
2179   //    | 
2180   //    |\       . . .
2181   //    | \        | 
2182   //    |  \  MemBarVolatile (card mark) 
2183   //    |   \   |     |
2184   //     \   \  |   StoreCM    . . .
2185   //      \   \ |
2186   //       \  Phi
2187   //        \ /
2188   //        Phi  . . .
2189   //     Bot |   /
2190   //       MergeMem
2191   //         |
2192   //    MemBarVolatile {trailing}
2193   //
2194   //
2195   // 3)
2196   //   MemBarRelease/CPUOrder (leading)
2197   //    |
2198   //    |\
2199   //    | \
2200   //    |  \      . . .
2201   //    |   \       |
2202   //    |\   \  MemBarVolatile (card mark)
2203   //    | \   \   |     |
2204   //    |  \   \  |   StoreCM    . . .
2205   //    |   \   \ |
2206   //     \   \  Phi
2207   //      \   \ /  
2208   //       \  Phi
2209   //        \ /
2210   //        Phi  . . .
2211   //     Bot |   /
2212   //       MergeMem
2213   //         |
2214   //         |
2215   //    MemBarVolatile {trailing}
2216   //
2217   // configuration 1 is only valid if UseConcMarkSweepGC &&
2218   // UseCondCardMark
2219   //
2220   // configurations 2 and 3 are only valid if UseG1GC.
2221   //
2222   // if a valid configuration is present returns the trailing membar
2223   // otherwise NULL.
2224   //
2225   // n.b. the supplied membar is expected to be a card mark
2226   // MemBarVolatile i.e. the caller must ensure the input node has the
2227   // correct operand and feeds Mem to a StoreCM node
2228 
2229   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2230   {
2231     // input must be a card mark volatile membar
2232     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2233 
2234     Node *feed = barrier->proj_out(TypeFunc::Memory);
2235     Node *x;
2236     MergeMemNode *mm = NULL;
2237 
2238     const int MAX_PHIS = 3;     // max phis we will search through
2239     int phicount = 0;           // current search count
2240 
2241     bool retry_feed = true;
2242     while (retry_feed) {
2243       // see if we have a direct MergeMem feed
2244       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2245         x = feed->fast_out(i);
2246         // the correct Phi will be merging a Bot memory slice
2247         if (x->is_MergeMem()) {
2248           mm = x->as_MergeMem();
2249           break;
2250         }
2251       }
2252       if (mm) {
2253         retry_feed = false;
2254       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2255         // the barrier may feed indirectly via one or two Phi nodes
2256         PhiNode *phi = NULL;
2257         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2258           x = feed->fast_out(i);
2259           // the correct Phi will be merging a Bot memory slice
2260           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2261             phi = x->as_Phi();
2262             break;
2263           }
2264         }
2265         if (!phi)
2266           return NULL;
2267         // look for another merge below this phi
2268         feed = phi;
2269       } else {
2270         // couldn't find a merge
2271         return NULL;
2272       }
2273     }
2274 
2275     // sanity check this feed turns up as the expected slice
2276     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2277 
2278     MemBarNode *trailing = NULL;
2279     // be sure we have a trailing membar the merge
2280     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2281       x = mm->fast_out(i);
2282       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2283         trailing = x->as_MemBar();
2284         break;
2285       }
2286     }
2287 
2288     return trailing;
2289   }
2290 
2291   // trailing_to_card_mark
2292   //
2293   // graph traversal helper which detects extra, non-normal Mem feed
2294   // from a trailing volatile membar to a preceding card mark volatile
2295   // membar i.e. it identifies whether one of the three possible extra
2296   // GC post-write Mem flow subgraphs is present
2297   //
2298   // this predicate checks for the same flow as the previous predicate
2299   // but starting from the bottom rather than the top.
2300   //
2301   // if the configuration is present returns the card mark membar
2302   // otherwise NULL
2303   //
2304   // n.b. the supplied membar is expected to be a trailing
2305   // MemBarVolatile i.e. the caller must ensure the input node has the
2306   // correct opcode
2307 
2308   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2309   {
2310     assert(trailing->Opcode() == Op_MemBarVolatile,
2311            "expecting a volatile membar");
2312     assert(!is_card_mark_membar(trailing),
2313            "not expecting a card mark membar");
2314 
2315     // the Mem feed to the membar should be a merge
2316     Node *x = trailing->in(TypeFunc::Memory);
2317     if (!x->is_MergeMem())
2318       return NULL;
2319 
2320     MergeMemNode *mm = x->as_MergeMem();
2321 
2322     x = mm->in(Compile::AliasIdxBot);
2323     // with G1 we may possibly see a Phi or two before we see a Memory
2324     // Proj from the card mark membar
2325 
2326     const int MAX_PHIS = 3;     // max phis we will search through
2327     int phicount = 0;           // current search count
2328 
2329     bool retry_feed = !x->is_Proj();
2330 
2331     while (retry_feed) {
2332       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2333         PhiNode *phi = x->as_Phi();
2334         ProjNode *proj = NULL;
2335         PhiNode *nextphi = NULL;
2336         bool found_leading = false;
2337         for (uint i = 1; i < phi->req(); i++) {
2338           x = phi->in(i);
2339           if (x->is_Phi()) {
2340             nextphi = x->as_Phi();
2341           } else if (x->is_Proj()) {
2342             int opcode = x->in(0)->Opcode();
2343             if (opcode == Op_MemBarVolatile) {
2344               proj = x->as_Proj();
2345             } else if (opcode == Op_MemBarRelease ||
2346                        opcode == Op_MemBarCPUOrder) {
2347               // probably a leading membar
2348               found_leading = true;
2349             }
2350           }
2351         }
2352         // if we found a correct looking proj then retry from there
2353         // otherwise we must see a leading and a phi or this the
2354         // wrong config
2355         if (proj != NULL) {
2356           x = proj;
2357           retry_feed = false;
2358         } else if (found_leading && nextphi != NULL) {
2359           // retry from this phi to check phi2
2360           x = nextphi;
2361         } else {
2362           // not what we were looking for
2363           return NULL;
2364         }
2365       } else {
2366         return NULL;
2367       }
2368     }
2369     // the proj has to come from the card mark membar
2370     x = x->in(0);
2371     if (!x->is_MemBar())
2372       return NULL;
2373 
2374     MemBarNode *card_mark_membar = x->as_MemBar();
2375 
2376     if (!is_card_mark_membar(card_mark_membar))
2377       return NULL;
2378 
2379     return card_mark_membar;
2380   }
2381 
2382   // trailing_to_leading
2383   //
2384   // graph traversal helper which checks the Mem flow up the graph
2385   // from a (non-card mark) trailing membar attempting to locate and
2386   // return an associated leading membar. it first looks for a
2387   // subgraph in the normal configuration (relying on helper
2388   // normal_to_leading). failing that it then looks for one of the
2389   // possible post-write card mark subgraphs linking the trailing node
2390   // to a the card mark membar (relying on helper
2391   // trailing_to_card_mark), and then checks that the card mark membar
2392   // is fed by a leading membar (once again relying on auxiliary
2393   // predicate normal_to_leading).
2394   //
2395   // if the configuration is valid returns the cpuorder member for
2396   // preference or when absent the release membar otherwise NULL.
2397   //
2398   // n.b. the input membar is expected to be either a volatile or
2399   // acquire membar but in the former case must *not* be a card mark
2400   // membar.
2401 
2402   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2403   {
2404     assert((trailing->Opcode() == Op_MemBarAcquire ||
2405             trailing->Opcode() == Op_MemBarVolatile),
2406            "expecting an acquire or volatile membar");
2407     assert((trailing->Opcode() != Op_MemBarVolatile ||
2408             !is_card_mark_membar(trailing)),
2409            "not expecting a card mark membar");
2410 
2411     MemBarNode *leading = normal_to_leading(trailing);
2412 
2413     if (leading)
2414       return leading;
2415 
2416     // nothing more to do if this is an acquire
2417     if (trailing->Opcode() == Op_MemBarAcquire)
2418       return NULL;
2419 
2420     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2421 
2422     if (!card_mark_membar)
2423       return NULL;
2424 
2425     return normal_to_leading(card_mark_membar);
2426   }
2427 
2428   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2429 
2430 bool unnecessary_acquire(const Node *barrier)
2431 {
2432   assert(barrier->is_MemBar(), "expecting a membar");
2433 
2434   if (UseBarriersForVolatile)
2435     // we need to plant a dmb
2436     return false;
2437 
2438   // a volatile read derived from bytecode (or also from an inlined
2439   // SHA field read via LibraryCallKit::load_field_from_object)
2440   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2441   // with a bogus read dependency on it's preceding load. so in those
2442   // cases we will find the load node at the PARMS offset of the
2443   // acquire membar.  n.b. there may be an intervening DecodeN node.
2444   //
2445   // a volatile load derived from an inlined unsafe field access
2446   // manifests as a cpuorder membar with Ctl and Mem projections
2447   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2448   // acquire then feeds another cpuorder membar via Ctl and Mem
2449   // projections. The load has no output dependency on these trailing
2450   // membars because subsequent nodes inserted into the graph take
2451   // their control feed from the final membar cpuorder meaning they
2452   // are all ordered after the load.
2453 
2454   Node *x = barrier->lookup(TypeFunc::Parms);
2455   if (x) {
2456     // we are starting from an acquire and it has a fake dependency
2457     //
2458     // need to check for
2459     //
2460     //   LoadX[mo_acquire]
2461     //   {  |1   }
2462     //   {DecodeN}
2463     //      |Parms
2464     //   MemBarAcquire*
2465     //
2466     // where * tags node we were passed
2467     // and |k means input k
2468     if (x->is_DecodeNarrowPtr())
2469       x = x->in(1);
2470 
2471     return (x->is_Load() && x->as_Load()->is_acquire());
2472   }
2473   
2474   // now check for an unsafe volatile get
2475 
2476   // need to check for
2477   //
2478   //   MemBarCPUOrder
2479   //        ||       \\
2480   //   MemBarAcquire* LoadX[mo_acquire]
2481   //        ||
2482   //   MemBarCPUOrder
2483   //
2484   // where * tags node we were passed
2485   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2486 
2487   // check for a parent MemBarCPUOrder
2488   ProjNode *ctl;
2489   ProjNode *mem;
2490   MemBarNode *parent = parent_membar(barrier);
2491   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2492     return false;
2493   ctl = parent->proj_out(TypeFunc::Control);
2494   mem = parent->proj_out(TypeFunc::Memory);
2495   if (!ctl || !mem)
2496     return false;
2497   // ensure the proj nodes both feed a LoadX[mo_acquire]
2498   LoadNode *ld = NULL;
2499   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2500     x = ctl->fast_out(i);
2501     // if we see a load we keep hold of it and stop searching
2502     if (x->is_Load()) {
2503       ld = x->as_Load();
2504       break;
2505     }
2506   }
2507   // it must be an acquiring load
2508   if (ld && ld->is_acquire()) {
2509 
2510     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2511       x = mem->fast_out(i);
2512       // if we see the same load we drop it and stop searching
2513       if (x == ld) {
2514         ld = NULL;
2515         break;
2516       }
2517     }
2518     // we must have dropped the load
2519     if (ld == NULL) {
2520       // check for a child cpuorder membar
2521       MemBarNode *child  = child_membar(barrier->as_MemBar());
2522       if (child && child->Opcode() != Op_MemBarCPUOrder)
2523         return true;
2524     }
2525   }
2526 
2527   // final option for unnecessary mebar is that it is a trailing node
2528   // belonging to a CAS
2529 
2530   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2531 
2532   return leading != NULL;
2533 }
2534 
2535 bool needs_acquiring_load(const Node *n)
2536 {
2537   assert(n->is_Load(), "expecting a load");
2538   if (UseBarriersForVolatile)
2539     // we use a normal load and a dmb
2540     return false;
2541 
2542   LoadNode *ld = n->as_Load();
2543 
2544   if (!ld->is_acquire())
2545     return false;
2546 
2547   // check if this load is feeding an acquire membar
2548   //
2549   //   LoadX[mo_acquire]
2550   //   {  |1   }
2551   //   {DecodeN}
2552   //      |Parms
2553   //   MemBarAcquire*
2554   //
2555   // where * tags node we were passed
2556   // and |k means input k
2557 
2558   Node *start = ld;
2559   Node *mbacq = NULL;
2560 
2561   // if we hit a DecodeNarrowPtr we reset the start node and restart
2562   // the search through the outputs
2563  restart:
2564 
2565   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2566     Node *x = start->fast_out(i);
2567     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2568       mbacq = x;
2569     } else if (!mbacq &&
2570                (x->is_DecodeNarrowPtr() ||
2571                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2572       start = x;
2573       goto restart;
2574     }
2575   }
2576 
2577   if (mbacq) {
2578     return true;
2579   }
2580 
2581   // now check for an unsafe volatile get
2582 
2583   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2584   //
2585   //     MemBarCPUOrder
2586   //        ||       \\
2587   //   MemBarAcquire* LoadX[mo_acquire]
2588   //        ||
2589   //   MemBarCPUOrder
2590 
2591   MemBarNode *membar;
2592 
2593   membar = parent_membar(ld);
2594 
2595   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2596     return false;
2597 
2598   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2599 
2600   membar = child_membar(membar);
2601 
2602   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
2603     return false;
2604 
2605   membar = child_membar(membar);
2606   
2607   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2608     return false;
2609 
2610   return true;
2611 }
2612 
2613 bool unnecessary_release(const Node *n)
2614 {
2615   assert((n->is_MemBar() &&
2616           n->Opcode() == Op_MemBarRelease),
2617          "expecting a release membar");
2618 
2619   if (UseBarriersForVolatile)
2620     // we need to plant a dmb
2621     return false;
2622 
2623   // if there is a dependent CPUOrder barrier then use that as the
2624   // leading
2625 
2626   MemBarNode *barrier = n->as_MemBar();
2627   // check for an intervening cpuorder membar
2628   MemBarNode *b = child_membar(barrier);
2629   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2630     // ok, so start the check from the dependent cpuorder barrier
2631     barrier = b;
2632   }
2633 
2634   // must start with a normal feed
2635   MemBarNode *child_barrier = leading_to_normal(barrier);
2636 
2637   if (!child_barrier)
2638     return false;
2639 
2640   if (!is_card_mark_membar(child_barrier))
2641     // this is the trailing membar and we are done
2642     return true;
2643 
2644   // must be sure this card mark feeds a trailing membar
2645   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2646   return (trailing != NULL);
2647 }
2648 
2649 bool unnecessary_volatile(const Node *n)
2650 {
2651   // assert n->is_MemBar();
2652   if (UseBarriersForVolatile)
2653     // we need to plant a dmb
2654     return false;
2655 
2656   MemBarNode *mbvol = n->as_MemBar();
2657 
2658   // first we check if this is part of a card mark. if so then we have
2659   // to generate a StoreLoad barrier
2660   
2661   if (is_card_mark_membar(mbvol))
2662       return false;
2663 
2664   // ok, if it's not a card mark then we still need to check if it is
2665   // a trailing membar of a volatile put hgraph.
2666 
2667   return (trailing_to_leading(mbvol) != NULL);
2668 }
2669 
2670 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2671 
2672 bool needs_releasing_store(const Node *n)
2673 {
2674   // assert n->is_Store();
2675   if (UseBarriersForVolatile)
2676     // we use a normal store and dmb combination
2677     return false;
2678 
2679   StoreNode *st = n->as_Store();
2680 
2681   // the store must be marked as releasing
2682   if (!st->is_release())
2683     return false;
2684 
2685   // the store must be fed by a membar
2686 
2687   Node *x = st->lookup(StoreNode::Memory);
2688 
2689   if (! x || !x->is_Proj())
2690     return false;
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   if (!x || !x->is_MemBar())
2697     return false;
2698 
2699   MemBarNode *barrier = x->as_MemBar();
2700 
2701   // if the barrier is a release membar or a cpuorder mmebar fed by a
2702   // release membar then we need to check whether that forms part of a
2703   // volatile put graph.
2704 
2705   // reject invalid candidates
2706   if (!leading_membar(barrier))
2707     return false;
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbvol = leading_to_normal(barrier);
2711 
2712   if (!mbvol)
2713     return false;
2714 
2715   // all done unless this is a card mark
2716   if (!is_card_mark_membar(mbvol))
2717     return true;
2718   
2719   // we found a card mark -- just make sure we have a trailing barrier
2720 
2721   return (card_mark_to_trailing(mbvol) != NULL);
2722 }
2723 
2724 // predicate controlling translation of CAS
2725 //
2726 // returns true if CAS needs to use an acquiring load otherwise false
2727 
2728 bool needs_acquiring_load_exclusive(const Node *n)
2729 {
2730   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2731   if (UseBarriersForVolatile)
2732     return false;
2733 
2734   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2735 #ifndef PRODUCT
2736 #ifdef ASSERT
2737   LoadStoreNode *st = n->as_LoadStore();
2738 
2739   // the store must be fed by a membar
2740 
2741   Node *x = st->lookup(StoreNode::Memory);
2742 
2743   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2744 
2745   ProjNode *proj = x->as_Proj();
2746 
2747   x = proj->lookup(0);
2748 
2749   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2750 
2751   MemBarNode *barrier = x->as_MemBar();
2752 
2753   // the barrier must be a cpuorder mmebar fed by a release membar
2754 
2755   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2756          "CAS not fed by cpuorder membar!");
2757       
2758   MemBarNode *b = parent_membar(barrier);
2759   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2760           "CAS not fed by cpuorder+release membar pair!");
2761 
2762   // does this lead a normal subgraph?
2763   MemBarNode *mbar = leading_to_normal(barrier);
2764 
2765   assert(mbar != NULL, "CAS not embedded in normal graph!");
2766 
2767   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2768 #endif // ASSERT
2769 #endif // !PRODUCT
2770   // so we can just return true here
2771   return true;
2772 }
2773 
2774 // predicate controlling translation of StoreCM
2775 //
2776 // returns true if a StoreStore must precede the card write otherwise
2777 // false
2778 
2779 bool unnecessary_storestore(const Node *storecm)
2780 {
2781   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2782 
2783   // we only ever need to generate a dmb ishst between an object put
2784   // and the associated card mark when we are using CMS without
2785   // conditional card marking
2786 
2787   if (!UseConcMarkSweepGC || UseCondCardMark)
2788     return true;
2789 
2790   // if we are implementing volatile puts using barriers then the
2791   // object put as an str so we must insert the dmb ishst
2792 
2793   if (UseBarriersForVolatile)
2794     return false;
2795 
2796   // we can omit the dmb ishst if this StoreCM is part of a volatile
2797   // put because in thta case the put will be implemented by stlr
2798   //
2799   // we need to check for a normal subgraph feeding this StoreCM.
2800   // that means the StoreCM must be fed Memory from a leading membar,
2801   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2802   // leading membar must be part of a normal subgraph
2803 
2804   Node *x = storecm->in(StoreNode::Memory);
2805 
2806   if (!x->is_Proj())
2807     return false;
2808 
2809   x = x->in(0);
2810 
2811   if (!x->is_MemBar())
2812     return false;
2813 
2814   MemBarNode *leading = x->as_MemBar();
2815 
2816   // reject invalid candidates
2817   if (!leading_membar(leading))
2818     return false;
2819 
2820   // we can omit the StoreStore if it is the head of a normal subgraph
2821   return (leading_to_normal(leading) != NULL);
2822 }
2823 
2824 
2825 #define __ _masm.
2826 
2827 // advance declarations for helper functions to convert register
2828 // indices to register objects
2829 
2830 // the ad file has to provide implementations of certain methods
2831 // expected by the generic code
2832 //
2833 // REQUIRED FUNCTIONALITY
2834 
2835 //=============================================================================
2836 
2837 // !!!!! Special hack to get all types of calls to specify the byte offset
2838 //       from the start of the call to the point where the return address
2839 //       will point.
2840 
2841 int MachCallStaticJavaNode::ret_addr_offset()
2842 {
2843   // call should be a simple bl
2844   int off = 4;
2845   return off;
2846 }
2847 
2848 int MachCallDynamicJavaNode::ret_addr_offset()
2849 {
2850   return 16; // movz, movk, movk, bl
2851 }
2852 
2853 int MachCallRuntimeNode::ret_addr_offset() {
2854   // for generated stubs the call will be
2855   //   far_call(addr)
2856   // for real runtime callouts it will be six instructions
2857   // see aarch64_enc_java_to_runtime
2858   //   adr(rscratch2, retaddr)
2859   //   lea(rscratch1, RuntimeAddress(addr)
2860   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2861   //   blrt rscratch1
2862   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2863   if (cb) {
2864     return MacroAssembler::far_branch_size();
2865   } else {
2866     return 6 * NativeInstruction::instruction_size;
2867   }
2868 }
2869 
2870 // Indicate if the safepoint node needs the polling page as an input
2871 
2872 // the shared code plants the oop data at the start of the generated
2873 // code for the safepoint node and that needs ot be at the load
2874 // instruction itself. so we cannot plant a mov of the safepoint poll
2875 // address followed by a load. setting this to true means the mov is
2876 // scheduled as a prior instruction. that's better for scheduling
2877 // anyway.
2878 
2879 bool SafePointNode::needs_polling_address_input()
2880 {
2881   return true;
2882 }
2883 
2884 //=============================================================================
2885 
2886 #ifndef PRODUCT
2887 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2888   st->print("BREAKPOINT");
2889 }
2890 #endif
2891 
2892 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2893   MacroAssembler _masm(&cbuf);
2894   __ brk(0);
2895 }
2896 
2897 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2898   return MachNode::size(ra_);
2899 }
2900 
2901 //=============================================================================
2902 
2903 #ifndef PRODUCT
2904   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2905     st->print("nop \t# %d bytes pad for loops and calls", _count);
2906   }
2907 #endif
2908 
2909   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2910     MacroAssembler _masm(&cbuf);
2911     for (int i = 0; i < _count; i++) {
2912       __ nop();
2913     }
2914   }
2915 
2916   uint MachNopNode::size(PhaseRegAlloc*) const {
2917     return _count * NativeInstruction::instruction_size;
2918   }
2919 
2920 //=============================================================================
2921 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2922 
2923 int Compile::ConstantTable::calculate_table_base_offset() const {
2924   return 0;  // absolute addressing, no offset
2925 }
2926 
2927 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2928 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2929   ShouldNotReachHere();
2930 }
2931 
2932 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2933   // Empty encoding
2934 }
2935 
2936 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2937   return 0;
2938 }
2939 
2940 #ifndef PRODUCT
2941 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2942   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2943 }
2944 #endif
2945 
2946 #ifndef PRODUCT
2947 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2948   Compile* C = ra_->C;
2949 
2950   int framesize = C->frame_slots() << LogBytesPerInt;
2951 
2952   if (C->need_stack_bang(framesize))
2953     st->print("# stack bang size=%d\n\t", framesize);
2954 
2955   if (framesize < ((1 << 9) + 2 * wordSize)) {
2956     st->print("sub  sp, sp, #%d\n\t", framesize);
2957     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2958     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2959   } else {
2960     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2961     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2962     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2963     st->print("sub  sp, sp, rscratch1");
2964   }
2965 }
2966 #endif
2967 
2968 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2969   Compile* C = ra_->C;
2970   MacroAssembler _masm(&cbuf);
2971 
2972   // n.b. frame size includes space for return pc and rfp
2973   const long framesize = C->frame_size_in_bytes();
2974   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2975 
2976   // insert a nop at the start of the prolog so we can patch in a
2977   // branch if we need to invalidate the method later
2978   __ nop();
2979 
2980   int bangsize = C->bang_size_in_bytes();
2981   if (C->need_stack_bang(bangsize) && UseStackBanging)
2982     __ generate_stack_overflow_check(bangsize);
2983 
2984   __ build_frame(framesize);
2985 
2986   if (NotifySimulator) {
2987     __ notify(Assembler::method_entry);
2988   }
2989 
2990   if (VerifyStackAtCalls) {
2991     Unimplemented();
2992   }
2993 
2994   C->set_frame_complete(cbuf.insts_size());
2995 
2996   if (C->has_mach_constant_base_node()) {
2997     // NOTE: We set the table base offset here because users might be
2998     // emitted before MachConstantBaseNode.
2999     Compile::ConstantTable& constant_table = C->constant_table();
3000     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3001   }
3002 }
3003 
3004 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3005 {
3006   return MachNode::size(ra_); // too many variables; just compute it
3007                               // the hard way
3008 }
3009 
3010 int MachPrologNode::reloc() const
3011 {
3012   return 0;
3013 }
3014 
3015 //=============================================================================
3016 
3017 #ifndef PRODUCT
3018 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3019   Compile* C = ra_->C;
3020   int framesize = C->frame_slots() << LogBytesPerInt;
3021 
3022   st->print("# pop frame %d\n\t",framesize);
3023 
3024   if (framesize == 0) {
3025     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3026   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3027     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3028     st->print("add  sp, sp, #%d\n\t", framesize);
3029   } else {
3030     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3031     st->print("add  sp, sp, rscratch1\n\t");
3032     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3033   }
3034 
3035   if (do_polling() && C->is_method_compilation()) {
3036     st->print("# touch polling page\n\t");
3037     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3038     st->print("ldr zr, [rscratch1]");
3039   }
3040 }
3041 #endif
3042 
3043 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3044   Compile* C = ra_->C;
3045   MacroAssembler _masm(&cbuf);
3046   int framesize = C->frame_slots() << LogBytesPerInt;
3047 
3048   __ remove_frame(framesize);
3049 
3050   if (NotifySimulator) {
3051     __ notify(Assembler::method_reentry);
3052   }
3053 
3054   if (do_polling() && C->is_method_compilation()) {
3055     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3056   }
3057 }
3058 
3059 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3060   // Variable size. Determine dynamically.
3061   return MachNode::size(ra_);
3062 }
3063 
3064 int MachEpilogNode::reloc() const {
3065   // Return number of relocatable values contained in this instruction.
3066   return 1; // 1 for polling page.
3067 }
3068 
3069 const Pipeline * MachEpilogNode::pipeline() const {
3070   return MachNode::pipeline_class();
3071 }
3072 
3073 // This method seems to be obsolete. It is declared in machnode.hpp
3074 // and defined in all *.ad files, but it is never called. Should we
3075 // get rid of it?
3076 int MachEpilogNode::safepoint_offset() const {
3077   assert(do_polling(), "no return for this epilog node");
3078   return 4;
3079 }
3080 
3081 //=============================================================================
3082 
3083 // Figure out which register class each belongs in: rc_int, rc_float or
3084 // rc_stack.
3085 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3086 
3087 static enum RC rc_class(OptoReg::Name reg) {
3088 
3089   if (reg == OptoReg::Bad) {
3090     return rc_bad;
3091   }
3092 
3093   // we have 30 int registers * 2 halves
3094   // (rscratch1 and rscratch2 are omitted)
3095 
3096   if (reg < 60) {
3097     return rc_int;
3098   }
3099 
3100   // we have 32 float register * 2 halves
3101   if (reg < 60 + 128) {
3102     return rc_float;
3103   }
3104 
3105   // Between float regs & stack is the flags regs.
3106   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3107 
3108   return rc_stack;
3109 }
3110 
3111 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3112   Compile* C = ra_->C;
3113 
3114   // Get registers to move.
3115   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3116   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3117   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3118   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3119 
3120   enum RC src_hi_rc = rc_class(src_hi);
3121   enum RC src_lo_rc = rc_class(src_lo);
3122   enum RC dst_hi_rc = rc_class(dst_hi);
3123   enum RC dst_lo_rc = rc_class(dst_lo);
3124 
3125   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3126 
3127   if (src_hi != OptoReg::Bad) {
3128     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3129            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3130            "expected aligned-adjacent pairs");
3131   }
3132 
3133   if (src_lo == dst_lo && src_hi == dst_hi) {
3134     return 0;            // Self copy, no move.
3135   }
3136 
3137   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3138               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3139   int src_offset = ra_->reg2offset(src_lo);
3140   int dst_offset = ra_->reg2offset(dst_lo);
3141 
3142   if (bottom_type()->isa_vect() != NULL) {
3143     uint ireg = ideal_reg();
3144     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3145     if (cbuf) {
3146       MacroAssembler _masm(cbuf);
3147       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3148       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3149         // stack->stack
3150         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3151         if (ireg == Op_VecD) {
3152           __ unspill(rscratch1, true, src_offset);
3153           __ spill(rscratch1, true, dst_offset);
3154         } else {
3155           __ spill_copy128(src_offset, dst_offset);
3156         }
3157       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3158         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3159                ireg == Op_VecD ? __ T8B : __ T16B,
3160                as_FloatRegister(Matcher::_regEncode[src_lo]));
3161       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3162         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3163                        ireg == Op_VecD ? __ D : __ Q,
3164                        ra_->reg2offset(dst_lo));
3165       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3166         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3167                        ireg == Op_VecD ? __ D : __ Q,
3168                        ra_->reg2offset(src_lo));
3169       } else {
3170         ShouldNotReachHere();
3171       }
3172     }
3173   } else if (cbuf) {
3174     MacroAssembler _masm(cbuf);
3175     switch (src_lo_rc) {
3176     case rc_int:
3177       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3178         if (is64) {
3179             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3180                    as_Register(Matcher::_regEncode[src_lo]));
3181         } else {
3182             MacroAssembler _masm(cbuf);
3183             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3184                     as_Register(Matcher::_regEncode[src_lo]));
3185         }
3186       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3187         if (is64) {
3188             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3189                      as_Register(Matcher::_regEncode[src_lo]));
3190         } else {
3191             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3192                      as_Register(Matcher::_regEncode[src_lo]));
3193         }
3194       } else {                    // gpr --> stack spill
3195         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3196         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3197       }
3198       break;
3199     case rc_float:
3200       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3201         if (is64) {
3202             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3203                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3204         } else {
3205             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3206                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3207         }
3208       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3209           if (cbuf) {
3210             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3211                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3212         } else {
3213             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3214                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3215         }
3216       } else {                    // fpr --> stack spill
3217         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3218         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3219                  is64 ? __ D : __ S, dst_offset);
3220       }
3221       break;
3222     case rc_stack:
3223       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3224         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3225       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3226         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3227                    is64 ? __ D : __ S, src_offset);
3228       } else {                    // stack --> stack copy
3229         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3230         __ unspill(rscratch1, is64, src_offset);
3231         __ spill(rscratch1, is64, dst_offset);
3232       }
3233       break;
3234     default:
3235       assert(false, "bad rc_class for spill");
3236       ShouldNotReachHere();
3237     }
3238   }
3239 
3240   if (st) {
3241     st->print("spill ");
3242     if (src_lo_rc == rc_stack) {
3243       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3244     } else {
3245       st->print("%s -> ", Matcher::regName[src_lo]);
3246     }
3247     if (dst_lo_rc == rc_stack) {
3248       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3249     } else {
3250       st->print("%s", Matcher::regName[dst_lo]);
3251     }
3252     if (bottom_type()->isa_vect() != NULL) {
3253       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3254     } else {
3255       st->print("\t# spill size = %d", is64 ? 64:32);
3256     }
3257   }
3258 
3259   return 0;
3260 
3261 }
3262 
3263 #ifndef PRODUCT
3264 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3265   if (!ra_)
3266     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3267   else
3268     implementation(NULL, ra_, false, st);
3269 }
3270 #endif
3271 
3272 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3273   implementation(&cbuf, ra_, false, NULL);
3274 }
3275 
3276 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3277   return MachNode::size(ra_);
3278 }
3279 
3280 //=============================================================================
3281 
3282 #ifndef PRODUCT
3283 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3284   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3285   int reg = ra_->get_reg_first(this);
3286   st->print("add %s, rsp, #%d]\t# box lock",
3287             Matcher::regName[reg], offset);
3288 }
3289 #endif
3290 
3291 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3292   MacroAssembler _masm(&cbuf);
3293 
3294   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3295   int reg    = ra_->get_encode(this);
3296 
3297   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3298     __ add(as_Register(reg), sp, offset);
3299   } else {
3300     ShouldNotReachHere();
3301   }
3302 }
3303 
3304 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3305   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3306   return 4;
3307 }
3308 
3309 //=============================================================================
3310 
3311 #ifndef PRODUCT
3312 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3313 {
3314   st->print_cr("# MachUEPNode");
3315   if (UseCompressedClassPointers) {
3316     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3317     if (Universe::narrow_klass_shift() != 0) {
3318       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3319     }
3320   } else {
3321    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3322   }
3323   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3324   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3325 }
3326 #endif
3327 
3328 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3329 {
3330   // This is the unverified entry point.
3331   MacroAssembler _masm(&cbuf);
3332 
3333   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3334   Label skip;
3335   // TODO
3336   // can we avoid this skip and still use a reloc?
3337   __ br(Assembler::EQ, skip);
3338   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3339   __ bind(skip);
3340 }
3341 
3342 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3343 {
3344   return MachNode::size(ra_);
3345 }
3346 
3347 // REQUIRED EMIT CODE
3348 
3349 //=============================================================================
3350 
3351 // Emit exception handler code.
3352 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3353 {
3354   // mov rscratch1 #exception_blob_entry_point
3355   // br rscratch1
3356   // Note that the code buffer's insts_mark is always relative to insts.
3357   // That's why we must use the macroassembler to generate a handler.
3358   MacroAssembler _masm(&cbuf);
3359   address base = __ start_a_stub(size_exception_handler());
3360   if (base == NULL) {
3361     ciEnv::current()->record_failure("CodeCache is full");
3362     return 0;  // CodeBuffer::expand failed
3363   }
3364   int offset = __ offset();
3365   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3366   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3367   __ end_a_stub();
3368   return offset;
3369 }
3370 
3371 // Emit deopt handler code.
3372 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3373 {
3374   // Note that the code buffer's insts_mark is always relative to insts.
3375   // That's why we must use the macroassembler to generate a handler.
3376   MacroAssembler _masm(&cbuf);
3377   address base = __ start_a_stub(size_deopt_handler());
3378   if (base == NULL) {
3379     ciEnv::current()->record_failure("CodeCache is full");
3380     return 0;  // CodeBuffer::expand failed
3381   }
3382   int offset = __ offset();
3383 
3384   __ adr(lr, __ pc());
3385   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3386 
3387   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3388   __ end_a_stub();
3389   return offset;
3390 }
3391 
3392 // REQUIRED MATCHER CODE
3393 
3394 //=============================================================================
3395 
3396 const bool Matcher::match_rule_supported(int opcode) {
3397 
3398   // TODO
3399   // identify extra cases that we might want to provide match rules for
3400   // e.g. Op_StrEquals and other intrinsics
3401   if (!has_match_rule(opcode)) {
3402     return false;
3403   }
3404 
3405   return true;  // Per default match rules are supported.
3406 }
3407 
3408 int Matcher::regnum_to_fpu_offset(int regnum)
3409 {
3410   Unimplemented();
3411   return 0;
3412 }
3413 
3414 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3415 {
3416   Unimplemented();
3417   return false;
3418 }
3419 
3420 const bool Matcher::isSimpleConstant64(jlong value) {
3421   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3422   // Probably always true, even if a temp register is required.
3423   return true;
3424 }
3425 
3426 // true just means we have fast l2f conversion
3427 const bool Matcher::convL2FSupported(void) {
3428   return true;
3429 }
3430 
3431 // Vector width in bytes.
3432 const int Matcher::vector_width_in_bytes(BasicType bt) {
3433   int size = MIN2(16,(int)MaxVectorSize);
3434   // Minimum 2 values in vector
3435   if (size < 2*type2aelembytes(bt)) size = 0;
3436   // But never < 4
3437   if (size < 4) size = 0;
3438   return size;
3439 }
3440 
3441 // Limits on vector size (number of elements) loaded into vector.
3442 const int Matcher::max_vector_size(const BasicType bt) {
3443   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3444 }
3445 const int Matcher::min_vector_size(const BasicType bt) {
3446 //  For the moment limit the vector size to 8 bytes
3447     int size = 8 / type2aelembytes(bt);
3448     if (size < 2) size = 2;
3449     return size;
3450 }
3451 
3452 // Vector ideal reg.
3453 const int Matcher::vector_ideal_reg(int len) {
3454   switch(len) {
3455     case  8: return Op_VecD;
3456     case 16: return Op_VecX;
3457   }
3458   ShouldNotReachHere();
3459   return 0;
3460 }
3461 
3462 const int Matcher::vector_shift_count_ideal_reg(int size) {
3463   return Op_VecX;
3464 }
3465 
3466 // AES support not yet implemented
3467 const bool Matcher::pass_original_key_for_aes() {
3468   return false;
3469 }
3470 
3471 // x86 supports misaligned vectors store/load.
3472 const bool Matcher::misaligned_vectors_ok() {
3473   return !AlignVector; // can be changed by flag
3474 }
3475 
3476 // false => size gets scaled to BytesPerLong, ok.
3477 const bool Matcher::init_array_count_is_in_bytes = false;
3478 
3479 // Threshold size for cleararray.
3480 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3481 
3482 // Use conditional move (CMOVL)
3483 const int Matcher::long_cmove_cost() {
3484   // long cmoves are no more expensive than int cmoves
3485   return 0;
3486 }
3487 
3488 const int Matcher::float_cmove_cost() {
3489   // float cmoves are no more expensive than int cmoves
3490   return 0;
3491 }
3492 
3493 // Does the CPU require late expand (see block.cpp for description of late expand)?
3494 const bool Matcher::require_postalloc_expand = false;
3495 
3496 // Should the Matcher clone shifts on addressing modes, expecting them
3497 // to be subsumed into complex addressing expressions or compute them
3498 // into registers?  True for Intel but false for most RISCs
3499 const bool Matcher::clone_shift_expressions = false;
3500 
3501 // Do we need to mask the count passed to shift instructions or does
3502 // the cpu only look at the lower 5/6 bits anyway?
3503 const bool Matcher::need_masked_shift_count = false;
3504 
3505 // This affects two different things:
3506 //  - how Decode nodes are matched
3507 //  - how ImplicitNullCheck opportunities are recognized
3508 // If true, the matcher will try to remove all Decodes and match them
3509 // (as operands) into nodes. NullChecks are not prepared to deal with
3510 // Decodes by final_graph_reshaping().
3511 // If false, final_graph_reshaping() forces the decode behind the Cmp
3512 // for a NullCheck. The matcher matches the Decode node into a register.
3513 // Implicit_null_check optimization moves the Decode along with the
3514 // memory operation back up before the NullCheck.
3515 bool Matcher::narrow_oop_use_complex_address() {
3516   return Universe::narrow_oop_shift() == 0;
3517 }
3518 
3519 bool Matcher::narrow_klass_use_complex_address() {
3520 // TODO
3521 // decide whether we need to set this to true
3522   return false;
3523 }
3524 
3525 // Is it better to copy float constants, or load them directly from
3526 // memory?  Intel can load a float constant from a direct address,
3527 // requiring no extra registers.  Most RISCs will have to materialize
3528 // an address into a register first, so they would do better to copy
3529 // the constant from stack.
3530 const bool Matcher::rematerialize_float_constants = false;
3531 
3532 // If CPU can load and store mis-aligned doubles directly then no
3533 // fixup is needed.  Else we split the double into 2 integer pieces
3534 // and move it piece-by-piece.  Only happens when passing doubles into
3535 // C code as the Java calling convention forces doubles to be aligned.
3536 const bool Matcher::misaligned_doubles_ok = true;
3537 
3538 // No-op on amd64
3539 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3540   Unimplemented();
3541 }
3542 
3543 // Advertise here if the CPU requires explicit rounding operations to
3544 // implement the UseStrictFP mode.
3545 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3546 
3547 // Are floats converted to double when stored to stack during
3548 // deoptimization?
3549 bool Matcher::float_in_double() { return true; }
3550 
3551 // Do ints take an entire long register or just half?
3552 // The relevant question is how the int is callee-saved:
3553 // the whole long is written but de-opt'ing will have to extract
3554 // the relevant 32 bits.
3555 const bool Matcher::int_in_long = true;
3556 
3557 // Return whether or not this register is ever used as an argument.
3558 // This function is used on startup to build the trampoline stubs in
3559 // generateOptoStub.  Registers not mentioned will be killed by the VM
3560 // call in the trampoline, and arguments in those registers not be
3561 // available to the callee.
3562 bool Matcher::can_be_java_arg(int reg)
3563 {
3564   return
3565     reg ==  R0_num || reg == R0_H_num ||
3566     reg ==  R1_num || reg == R1_H_num ||
3567     reg ==  R2_num || reg == R2_H_num ||
3568     reg ==  R3_num || reg == R3_H_num ||
3569     reg ==  R4_num || reg == R4_H_num ||
3570     reg ==  R5_num || reg == R5_H_num ||
3571     reg ==  R6_num || reg == R6_H_num ||
3572     reg ==  R7_num || reg == R7_H_num ||
3573     reg ==  V0_num || reg == V0_H_num ||
3574     reg ==  V1_num || reg == V1_H_num ||
3575     reg ==  V2_num || reg == V2_H_num ||
3576     reg ==  V3_num || reg == V3_H_num ||
3577     reg ==  V4_num || reg == V4_H_num ||
3578     reg ==  V5_num || reg == V5_H_num ||
3579     reg ==  V6_num || reg == V6_H_num ||
3580     reg ==  V7_num || reg == V7_H_num;
3581 }
3582 
3583 bool Matcher::is_spillable_arg(int reg)
3584 {
3585   return can_be_java_arg(reg);
3586 }
3587 
3588 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3589   return false;
3590 }
3591 
3592 RegMask Matcher::divI_proj_mask() {
3593   ShouldNotReachHere();
3594   return RegMask();
3595 }
3596 
3597 // Register for MODI projection of divmodI.
3598 RegMask Matcher::modI_proj_mask() {
3599   ShouldNotReachHere();
3600   return RegMask();
3601 }
3602 
3603 // Register for DIVL projection of divmodL.
3604 RegMask Matcher::divL_proj_mask() {
3605   ShouldNotReachHere();
3606   return RegMask();
3607 }
3608 
3609 // Register for MODL projection of divmodL.
3610 RegMask Matcher::modL_proj_mask() {
3611   ShouldNotReachHere();
3612   return RegMask();
3613 }
3614 
3615 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3616   return FP_REG_mask();
3617 }
3618 
3619 // helper for encoding java_to_runtime calls on sim
3620 //
3621 // this is needed to compute the extra arguments required when
3622 // planting a call to the simulator blrt instruction. the TypeFunc
3623 // can be queried to identify the counts for integral, and floating
3624 // arguments and the return type
3625 
3626 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3627 {
3628   int gps = 0;
3629   int fps = 0;
3630   const TypeTuple *domain = tf->domain();
3631   int max = domain->cnt();
3632   for (int i = TypeFunc::Parms; i < max; i++) {
3633     const Type *t = domain->field_at(i);
3634     switch(t->basic_type()) {
3635     case T_FLOAT:
3636     case T_DOUBLE:
3637       fps++;
3638     default:
3639       gps++;
3640     }
3641   }
3642   gpcnt = gps;
3643   fpcnt = fps;
3644   BasicType rt = tf->return_type();
3645   switch (rt) {
3646   case T_VOID:
3647     rtype = MacroAssembler::ret_type_void;
3648     break;
3649   default:
3650     rtype = MacroAssembler::ret_type_integral;
3651     break;
3652   case T_FLOAT:
3653     rtype = MacroAssembler::ret_type_float;
3654     break;
3655   case T_DOUBLE:
3656     rtype = MacroAssembler::ret_type_double;
3657     break;
3658   }
3659 }
3660 
3661 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3662   MacroAssembler _masm(&cbuf);                                          \
3663   {                                                                     \
3664     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3665     guarantee(DISP == 0, "mode not permitted for volatile");            \
3666     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3667     __ INSN(REG, as_Register(BASE));                                    \
3668   }
3669 
3670 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3671 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3672 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3673                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3674 
3675   // Used for all non-volatile memory accesses.  The use of
3676   // $mem->opcode() to discover whether this pattern uses sign-extended
3677   // offsets is something of a kludge.
3678   static void loadStore(MacroAssembler masm, mem_insn insn,
3679                          Register reg, int opcode,
3680                          Register base, int index, int size, int disp)
3681   {
3682     Address::extend scale;
3683 
3684     // Hooboy, this is fugly.  We need a way to communicate to the
3685     // encoder that the index needs to be sign extended, so we have to
3686     // enumerate all the cases.
3687     switch (opcode) {
3688     case INDINDEXSCALEDOFFSETI2L:
3689     case INDINDEXSCALEDI2L:
3690     case INDINDEXSCALEDOFFSETI2LN:
3691     case INDINDEXSCALEDI2LN:
3692     case INDINDEXOFFSETI2L:
3693     case INDINDEXOFFSETI2LN:
3694       scale = Address::sxtw(size);
3695       break;
3696     default:
3697       scale = Address::lsl(size);
3698     }
3699 
3700     if (index == -1) {
3701       (masm.*insn)(reg, Address(base, disp));
3702     } else {
3703       if (disp == 0) {
3704         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3705       } else {
3706         masm.lea(rscratch1, Address(base, disp));
3707         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3708       }
3709     }
3710   }
3711 
3712   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3713                          FloatRegister reg, int opcode,
3714                          Register base, int index, int size, int disp)
3715   {
3716     Address::extend scale;
3717 
3718     switch (opcode) {
3719     case INDINDEXSCALEDOFFSETI2L:
3720     case INDINDEXSCALEDI2L:
3721     case INDINDEXSCALEDOFFSETI2LN:
3722     case INDINDEXSCALEDI2LN:
3723       scale = Address::sxtw(size);
3724       break;
3725     default:
3726       scale = Address::lsl(size);
3727     }
3728 
3729      if (index == -1) {
3730       (masm.*insn)(reg, Address(base, disp));
3731     } else {
3732       if (disp == 0) {
3733         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3734       } else {
3735         masm.lea(rscratch1, Address(base, disp));
3736         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3737       }
3738     }
3739   }
3740 
3741   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3742                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3743                          int opcode, Register base, int index, int size, int disp)
3744   {
3745     if (index == -1) {
3746       (masm.*insn)(reg, T, Address(base, disp));
3747     } else {
3748       assert(disp == 0, "unsupported address mode");
3749       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3750     }
3751   }
3752 
3753 %}
3754 
3755 
3756 
3757 //----------ENCODING BLOCK-----------------------------------------------------
3758 // This block specifies the encoding classes used by the compiler to
3759 // output byte streams.  Encoding classes are parameterized macros
3760 // used by Machine Instruction Nodes in order to generate the bit
3761 // encoding of the instruction.  Operands specify their base encoding
3762 // interface with the interface keyword.  There are currently
3763 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3764 // COND_INTER.  REG_INTER causes an operand to generate a function
3765 // which returns its register number when queried.  CONST_INTER causes
3766 // an operand to generate a function which returns the value of the
3767 // constant when queried.  MEMORY_INTER causes an operand to generate
3768 // four functions which return the Base Register, the Index Register,
3769 // the Scale Value, and the Offset Value of the operand when queried.
3770 // COND_INTER causes an operand to generate six functions which return
3771 // the encoding code (ie - encoding bits for the instruction)
3772 // associated with each basic boolean condition for a conditional
3773 // instruction.
3774 //
3775 // Instructions specify two basic values for encoding.  Again, a
3776 // function is available to check if the constant displacement is an
3777 // oop. They use the ins_encode keyword to specify their encoding
3778 // classes (which must be a sequence of enc_class names, and their
3779 // parameters, specified in the encoding block), and they use the
3780 // opcode keyword to specify, in order, their primary, secondary, and
3781 // tertiary opcode.  Only the opcode sections which a particular
3782 // instruction needs for encoding need to be specified.
3783 encode %{
3784   // Build emit functions for each basic byte or larger field in the
3785   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3786   // from C++ code in the enc_class source block.  Emit functions will
3787   // live in the main source block for now.  In future, we can
3788   // generalize this by adding a syntax that specifies the sizes of
3789   // fields in an order, so that the adlc can build the emit functions
3790   // automagically
3791 
3792   // catch all for unimplemented encodings
3793   enc_class enc_unimplemented %{
3794     MacroAssembler _masm(&cbuf);
3795     __ unimplemented("C2 catch all");
3796   %}
3797 
3798   // BEGIN Non-volatile memory access
3799 
3800   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3801     Register dst_reg = as_Register($dst$$reg);
3802     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3803                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3804   %}
3805 
3806   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3807     Register dst_reg = as_Register($dst$$reg);
3808     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3809                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3810   %}
3811 
3812   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3813     Register dst_reg = as_Register($dst$$reg);
3814     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3815                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3816   %}
3817 
3818   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3819     Register dst_reg = as_Register($dst$$reg);
3820     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3821                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3822   %}
3823 
3824   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3825     Register dst_reg = as_Register($dst$$reg);
3826     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3827                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3828   %}
3829 
3830   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3831     Register dst_reg = as_Register($dst$$reg);
3832     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3833                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3834   %}
3835 
3836   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3837     Register dst_reg = as_Register($dst$$reg);
3838     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3839                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3840   %}
3841 
3842   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3843     Register dst_reg = as_Register($dst$$reg);
3844     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3845                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3846   %}
3847 
3848   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3849     Register dst_reg = as_Register($dst$$reg);
3850     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3851                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3852   %}
3853 
3854   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3855     Register dst_reg = as_Register($dst$$reg);
3856     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3857                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3858   %}
3859 
3860   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3861     Register dst_reg = as_Register($dst$$reg);
3862     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3863                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3864   %}
3865 
3866   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3867     Register dst_reg = as_Register($dst$$reg);
3868     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3870   %}
3871 
3872   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3873     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3874     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3875                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3876   %}
3877 
3878   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3879     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3880     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3881                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3882   %}
3883 
3884   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3885     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3886     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3887        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3888   %}
3889 
3890   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3891     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3892     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3893        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3894   %}
3895 
3896   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3897     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3898     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3899        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3900   %}
3901 
3902   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3903     Register src_reg = as_Register($src$$reg);
3904     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3905                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3906   %}
3907 
3908   enc_class aarch64_enc_strb0(memory mem) %{
3909     MacroAssembler _masm(&cbuf);
3910     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3911                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3912   %}
3913 
3914   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3915     MacroAssembler _masm(&cbuf);
3916     __ membar(Assembler::StoreStore);
3917     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3919   %}
3920 
3921   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3922     Register src_reg = as_Register($src$$reg);
3923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3925   %}
3926 
3927   enc_class aarch64_enc_strh0(memory mem) %{
3928     MacroAssembler _masm(&cbuf);
3929     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3930                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3931   %}
3932 
3933   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3934     Register src_reg = as_Register($src$$reg);
3935     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3936                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3937   %}
3938 
3939   enc_class aarch64_enc_strw0(memory mem) %{
3940     MacroAssembler _masm(&cbuf);
3941     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3943   %}
3944 
3945   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3946     Register src_reg = as_Register($src$$reg);
3947     // we sometimes get asked to store the stack pointer into the
3948     // current thread -- we cannot do that directly on AArch64
3949     if (src_reg == r31_sp) {
3950       MacroAssembler _masm(&cbuf);
3951       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3952       __ mov(rscratch2, sp);
3953       src_reg = rscratch2;
3954     }
3955     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3956                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3957   %}
3958 
3959   enc_class aarch64_enc_str0(memory mem) %{
3960     MacroAssembler _masm(&cbuf);
3961     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3962                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3963   %}
3964 
3965   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3966     FloatRegister src_reg = as_FloatRegister($src$$reg);
3967     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3968                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3969   %}
3970 
3971   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3972     FloatRegister src_reg = as_FloatRegister($src$$reg);
3973     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3974                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3975   %}
3976 
3977   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3978     FloatRegister src_reg = as_FloatRegister($src$$reg);
3979     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3980        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3981   %}
3982 
3983   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3984     FloatRegister src_reg = as_FloatRegister($src$$reg);
3985     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3986        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3987   %}
3988 
3989   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3990     FloatRegister src_reg = as_FloatRegister($src$$reg);
3991     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3992        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3993   %}
3994 
3995   // END Non-volatile memory access
3996 
3997   // volatile loads and stores
3998 
3999   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4000     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4001                  rscratch1, stlrb);
4002   %}
4003 
4004   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4005     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4006                  rscratch1, stlrh);
4007   %}
4008 
4009   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4010     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4011                  rscratch1, stlrw);
4012   %}
4013 
4014 
4015   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4016     Register dst_reg = as_Register($dst$$reg);
4017     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4018              rscratch1, ldarb);
4019     __ sxtbw(dst_reg, dst_reg);
4020   %}
4021 
4022   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4023     Register dst_reg = as_Register($dst$$reg);
4024     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4025              rscratch1, ldarb);
4026     __ sxtb(dst_reg, dst_reg);
4027   %}
4028 
4029   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4030     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4031              rscratch1, ldarb);
4032   %}
4033 
4034   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4035     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4036              rscratch1, ldarb);
4037   %}
4038 
4039   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4040     Register dst_reg = as_Register($dst$$reg);
4041     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4042              rscratch1, ldarh);
4043     __ sxthw(dst_reg, dst_reg);
4044   %}
4045 
4046   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4047     Register dst_reg = as_Register($dst$$reg);
4048     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4049              rscratch1, ldarh);
4050     __ sxth(dst_reg, dst_reg);
4051   %}
4052 
4053   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4054     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4055              rscratch1, ldarh);
4056   %}
4057 
4058   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4059     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4060              rscratch1, ldarh);
4061   %}
4062 
4063   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4064     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4065              rscratch1, ldarw);
4066   %}
4067 
4068   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4069     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4070              rscratch1, ldarw);
4071   %}
4072 
4073   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4074     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4075              rscratch1, ldar);
4076   %}
4077 
4078   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4079     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4080              rscratch1, ldarw);
4081     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4082   %}
4083 
4084   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4085     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4086              rscratch1, ldar);
4087     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4088   %}
4089 
4090   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4091     Register src_reg = as_Register($src$$reg);
4092     // we sometimes get asked to store the stack pointer into the
4093     // current thread -- we cannot do that directly on AArch64
4094     if (src_reg == r31_sp) {
4095         MacroAssembler _masm(&cbuf);
4096       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4097       __ mov(rscratch2, sp);
4098       src_reg = rscratch2;
4099     }
4100     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4101                  rscratch1, stlr);
4102   %}
4103 
4104   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4105     {
4106       MacroAssembler _masm(&cbuf);
4107       FloatRegister src_reg = as_FloatRegister($src$$reg);
4108       __ fmovs(rscratch2, src_reg);
4109     }
4110     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4111                  rscratch1, stlrw);
4112   %}
4113 
4114   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4115     {
4116       MacroAssembler _masm(&cbuf);
4117       FloatRegister src_reg = as_FloatRegister($src$$reg);
4118       __ fmovd(rscratch2, src_reg);
4119     }
4120     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4121                  rscratch1, stlr);
4122   %}
4123 
4124   // synchronized read/update encodings
4125 
4126   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4127     MacroAssembler _masm(&cbuf);
4128     Register dst_reg = as_Register($dst$$reg);
4129     Register base = as_Register($mem$$base);
4130     int index = $mem$$index;
4131     int scale = $mem$$scale;
4132     int disp = $mem$$disp;
4133     if (index == -1) {
4134        if (disp != 0) {
4135         __ lea(rscratch1, Address(base, disp));
4136         __ ldaxr(dst_reg, rscratch1);
4137       } else {
4138         // TODO
4139         // should we ever get anything other than this case?
4140         __ ldaxr(dst_reg, base);
4141       }
4142     } else {
4143       Register index_reg = as_Register(index);
4144       if (disp == 0) {
4145         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4146         __ ldaxr(dst_reg, rscratch1);
4147       } else {
4148         __ lea(rscratch1, Address(base, disp));
4149         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4150         __ ldaxr(dst_reg, rscratch1);
4151       }
4152     }
4153   %}
4154 
4155   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4156     MacroAssembler _masm(&cbuf);
4157     Register src_reg = as_Register($src$$reg);
4158     Register base = as_Register($mem$$base);
4159     int index = $mem$$index;
4160     int scale = $mem$$scale;
4161     int disp = $mem$$disp;
4162     if (index == -1) {
4163        if (disp != 0) {
4164         __ lea(rscratch2, Address(base, disp));
4165         __ stlxr(rscratch1, src_reg, rscratch2);
4166       } else {
4167         // TODO
4168         // should we ever get anything other than this case?
4169         __ stlxr(rscratch1, src_reg, base);
4170       }
4171     } else {
4172       Register index_reg = as_Register(index);
4173       if (disp == 0) {
4174         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4175         __ stlxr(rscratch1, src_reg, rscratch2);
4176       } else {
4177         __ lea(rscratch2, Address(base, disp));
4178         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4179         __ stlxr(rscratch1, src_reg, rscratch2);
4180       }
4181     }
4182     __ cmpw(rscratch1, zr);
4183   %}
4184 
4185   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4186     MacroAssembler _masm(&cbuf);
4187     Register old_reg = as_Register($oldval$$reg);
4188     Register new_reg = as_Register($newval$$reg);
4189     Register base = as_Register($mem$$base);
4190     Register addr_reg;
4191     int index = $mem$$index;
4192     int scale = $mem$$scale;
4193     int disp = $mem$$disp;
4194     if (index == -1) {
4195        if (disp != 0) {
4196         __ lea(rscratch2, Address(base, disp));
4197         addr_reg = rscratch2;
4198       } else {
4199         // TODO
4200         // should we ever get anything other than this case?
4201         addr_reg = base;
4202       }
4203     } else {
4204       Register index_reg = as_Register(index);
4205       if (disp == 0) {
4206         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4207         addr_reg = rscratch2;
4208       } else {
4209         __ lea(rscratch2, Address(base, disp));
4210         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4211         addr_reg = rscratch2;
4212       }
4213     }
4214     Label retry_load, done;
4215     __ bind(retry_load);
4216     __ ldxr(rscratch1, addr_reg);
4217     __ cmp(rscratch1, old_reg);
4218     __ br(Assembler::NE, done);
4219     __ stlxr(rscratch1, new_reg, addr_reg);
4220     __ cbnzw(rscratch1, retry_load);
4221     __ bind(done);
4222   %}
4223 
4224   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4225     MacroAssembler _masm(&cbuf);
4226     Register old_reg = as_Register($oldval$$reg);
4227     Register new_reg = as_Register($newval$$reg);
4228     Register base = as_Register($mem$$base);
4229     Register addr_reg;
4230     int index = $mem$$index;
4231     int scale = $mem$$scale;
4232     int disp = $mem$$disp;
4233     if (index == -1) {
4234        if (disp != 0) {
4235         __ lea(rscratch2, Address(base, disp));
4236         addr_reg = rscratch2;
4237       } else {
4238         // TODO
4239         // should we ever get anything other than this case?
4240         addr_reg = base;
4241       }
4242     } else {
4243       Register index_reg = as_Register(index);
4244       if (disp == 0) {
4245         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4246         addr_reg = rscratch2;
4247       } else {
4248         __ lea(rscratch2, Address(base, disp));
4249         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4250         addr_reg = rscratch2;
4251       }
4252     }
4253     Label retry_load, done;
4254     __ bind(retry_load);
4255     __ ldxrw(rscratch1, addr_reg);
4256     __ cmpw(rscratch1, old_reg);
4257     __ br(Assembler::NE, done);
4258     __ stlxrw(rscratch1, new_reg, addr_reg);
4259     __ cbnzw(rscratch1, retry_load);
4260     __ bind(done);
4261   %}
4262 
4263   // variant of cmpxchg employing an acquiring load which is used by
4264   // CompareAndSwap{LNP} when we are eliding barriers
4265 
4266   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4267     MacroAssembler _masm(&cbuf);
4268     Register old_reg = as_Register($oldval$$reg);
4269     Register new_reg = as_Register($newval$$reg);
4270     Register base = as_Register($mem$$base);
4271     Register addr_reg;
4272     int index = $mem$$index;
4273     int scale = $mem$$scale;
4274     int disp = $mem$$disp;
4275     if (index == -1) {
4276        if (disp != 0) {
4277         __ lea(rscratch2, Address(base, disp));
4278         addr_reg = rscratch2;
4279       } else {
4280         // TODO
4281         // should we ever get anything other than this case?
4282         addr_reg = base;
4283       }
4284     } else {
4285       Register index_reg = as_Register(index);
4286       if (disp == 0) {
4287         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4288         addr_reg = rscratch2;
4289       } else {
4290         __ lea(rscratch2, Address(base, disp));
4291         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4292         addr_reg = rscratch2;
4293       }
4294     }
4295     Label retry_load, done;
4296     __ bind(retry_load);
4297     __ ldaxr(rscratch1, addr_reg);
4298     __ cmp(rscratch1, old_reg);
4299     __ br(Assembler::NE, done);
4300     __ stlxr(rscratch1, new_reg, addr_reg);
4301     __ cbnzw(rscratch1, retry_load);
4302     __ bind(done);
4303   %}
4304 
4305   // variant of cmpxchgw employing an acquiring load which is used by
4306   // CompareAndSwapI when we are eliding barriers
4307 
4308   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4309     MacroAssembler _masm(&cbuf);
4310     Register old_reg = as_Register($oldval$$reg);
4311     Register new_reg = as_Register($newval$$reg);
4312     Register base = as_Register($mem$$base);
4313     Register addr_reg;
4314     int index = $mem$$index;
4315     int scale = $mem$$scale;
4316     int disp = $mem$$disp;
4317     if (index == -1) {
4318        if (disp != 0) {
4319         __ lea(rscratch2, Address(base, disp));
4320         addr_reg = rscratch2;
4321       } else {
4322         // TODO
4323         // should we ever get anything other than this case?
4324         addr_reg = base;
4325       }
4326     } else {
4327       Register index_reg = as_Register(index);
4328       if (disp == 0) {
4329         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4330         addr_reg = rscratch2;
4331       } else {
4332         __ lea(rscratch2, Address(base, disp));
4333         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4334         addr_reg = rscratch2;
4335       }
4336     }
4337     Label retry_load, done;
4338     __ bind(retry_load);
4339     __ ldaxrw(rscratch1, addr_reg);
4340     __ cmpw(rscratch1, old_reg);
4341     __ br(Assembler::NE, done);
4342     __ stlxrw(rscratch1, new_reg, addr_reg);
4343     __ cbnzw(rscratch1, retry_load);
4344     __ bind(done);
4345   %}
4346 
4347   // auxiliary used for CompareAndSwapX to set result register
4348   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4349     MacroAssembler _masm(&cbuf);
4350     Register res_reg = as_Register($res$$reg);
4351     __ cset(res_reg, Assembler::EQ);
4352   %}
4353 
4354   // prefetch encodings
4355 
4356   enc_class aarch64_enc_prefetchw(memory mem) %{
4357     MacroAssembler _masm(&cbuf);
4358     Register base = as_Register($mem$$base);
4359     int index = $mem$$index;
4360     int scale = $mem$$scale;
4361     int disp = $mem$$disp;
4362     if (index == -1) {
4363       __ prfm(Address(base, disp), PSTL1KEEP);
4364       __ nop();
4365     } else {
4366       Register index_reg = as_Register(index);
4367       if (disp == 0) {
4368         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4369       } else {
4370         __ lea(rscratch1, Address(base, disp));
4371         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4372       }
4373     }
4374   %}
4375 
4376   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4377     MacroAssembler _masm(&cbuf);
4378     Register cnt_reg = as_Register($cnt$$reg);
4379     Register base_reg = as_Register($base$$reg);
4380     // base is word aligned
4381     // cnt is count of words
4382 
4383     Label loop;
4384     Label entry;
4385 
4386 //  Algorithm:
4387 //
4388 //    scratch1 = cnt & 7;
4389 //    cnt -= scratch1;
4390 //    p += scratch1;
4391 //    switch (scratch1) {
4392 //      do {
4393 //        cnt -= 8;
4394 //          p[-8] = 0;
4395 //        case 7:
4396 //          p[-7] = 0;
4397 //        case 6:
4398 //          p[-6] = 0;
4399 //          // ...
4400 //        case 1:
4401 //          p[-1] = 0;
4402 //        case 0:
4403 //          p += 8;
4404 //      } while (cnt);
4405 //    }
4406 
4407     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4408 
4409     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4410     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4411     // base_reg always points to the end of the region we're about to zero
4412     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4413     __ adr(rscratch2, entry);
4414     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4415     __ br(rscratch2);
4416     __ bind(loop);
4417     __ sub(cnt_reg, cnt_reg, unroll);
4418     for (int i = -unroll; i < 0; i++)
4419       __ str(zr, Address(base_reg, i * wordSize));
4420     __ bind(entry);
4421     __ add(base_reg, base_reg, unroll * wordSize);
4422     __ cbnz(cnt_reg, loop);
4423   %}
4424 
4425   /// mov envcodings
4426 
4427   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4428     MacroAssembler _masm(&cbuf);
4429     u_int32_t con = (u_int32_t)$src$$constant;
4430     Register dst_reg = as_Register($dst$$reg);
4431     if (con == 0) {
4432       __ movw(dst_reg, zr);
4433     } else {
4434       __ movw(dst_reg, con);
4435     }
4436   %}
4437 
4438   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4439     MacroAssembler _masm(&cbuf);
4440     Register dst_reg = as_Register($dst$$reg);
4441     u_int64_t con = (u_int64_t)$src$$constant;
4442     if (con == 0) {
4443       __ mov(dst_reg, zr);
4444     } else {
4445       __ mov(dst_reg, con);
4446     }
4447   %}
4448 
4449   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4450     MacroAssembler _masm(&cbuf);
4451     Register dst_reg = as_Register($dst$$reg);
4452     address con = (address)$src$$constant;
4453     if (con == NULL || con == (address)1) {
4454       ShouldNotReachHere();
4455     } else {
4456       relocInfo::relocType rtype = $src->constant_reloc();
4457       if (rtype == relocInfo::oop_type) {
4458         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4459       } else if (rtype == relocInfo::metadata_type) {
4460         __ mov_metadata(dst_reg, (Metadata*)con);
4461       } else {
4462         assert(rtype == relocInfo::none, "unexpected reloc type");
4463         if (con < (address)(uintptr_t)os::vm_page_size()) {
4464           __ mov(dst_reg, con);
4465         } else {
4466           unsigned long offset;
4467           __ adrp(dst_reg, con, offset);
4468           __ add(dst_reg, dst_reg, offset);
4469         }
4470       }
4471     }
4472   %}
4473 
4474   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4475     MacroAssembler _masm(&cbuf);
4476     Register dst_reg = as_Register($dst$$reg);
4477     __ mov(dst_reg, zr);
4478   %}
4479 
4480   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4481     MacroAssembler _masm(&cbuf);
4482     Register dst_reg = as_Register($dst$$reg);
4483     __ mov(dst_reg, (u_int64_t)1);
4484   %}
4485 
4486   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4487     MacroAssembler _masm(&cbuf);
4488     address page = (address)$src$$constant;
4489     Register dst_reg = as_Register($dst$$reg);
4490     unsigned long off;
4491     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4492     assert(off == 0, "assumed offset == 0");
4493   %}
4494 
4495   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4496     MacroAssembler _masm(&cbuf);
4497     address page = (address)$src$$constant;
4498     Register dst_reg = as_Register($dst$$reg);
4499     unsigned long off;
4500     __ adrp(dst_reg, ExternalAddress(page), off);
4501     assert(off == 0, "assumed offset == 0");
4502   %}
4503 
4504   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4505     MacroAssembler _masm(&cbuf);
4506     Register dst_reg = as_Register($dst$$reg);
4507     address con = (address)$src$$constant;
4508     if (con == NULL) {
4509       ShouldNotReachHere();
4510     } else {
4511       relocInfo::relocType rtype = $src->constant_reloc();
4512       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4513       __ set_narrow_oop(dst_reg, (jobject)con);
4514     }
4515   %}
4516 
4517   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4518     MacroAssembler _masm(&cbuf);
4519     Register dst_reg = as_Register($dst$$reg);
4520     __ mov(dst_reg, zr);
4521   %}
4522 
4523   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4524     MacroAssembler _masm(&cbuf);
4525     Register dst_reg = as_Register($dst$$reg);
4526     address con = (address)$src$$constant;
4527     if (con == NULL) {
4528       ShouldNotReachHere();
4529     } else {
4530       relocInfo::relocType rtype = $src->constant_reloc();
4531       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4532       __ set_narrow_klass(dst_reg, (Klass *)con);
4533     }
4534   %}
4535 
4536   // arithmetic encodings
4537 
4538   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4539     MacroAssembler _masm(&cbuf);
4540     Register dst_reg = as_Register($dst$$reg);
4541     Register src_reg = as_Register($src1$$reg);
4542     int32_t con = (int32_t)$src2$$constant;
4543     // add has primary == 0, subtract has primary == 1
4544     if ($primary) { con = -con; }
4545     if (con < 0) {
4546       __ subw(dst_reg, src_reg, -con);
4547     } else {
4548       __ addw(dst_reg, src_reg, con);
4549     }
4550   %}
4551 
4552   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4553     MacroAssembler _masm(&cbuf);
4554     Register dst_reg = as_Register($dst$$reg);
4555     Register src_reg = as_Register($src1$$reg);
4556     int32_t con = (int32_t)$src2$$constant;
4557     // add has primary == 0, subtract has primary == 1
4558     if ($primary) { con = -con; }
4559     if (con < 0) {
4560       __ sub(dst_reg, src_reg, -con);
4561     } else {
4562       __ add(dst_reg, src_reg, con);
4563     }
4564   %}
4565 
4566   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4567     MacroAssembler _masm(&cbuf);
4568    Register dst_reg = as_Register($dst$$reg);
4569    Register src1_reg = as_Register($src1$$reg);
4570    Register src2_reg = as_Register($src2$$reg);
4571     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4572   %}
4573 
4574   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4575     MacroAssembler _masm(&cbuf);
4576    Register dst_reg = as_Register($dst$$reg);
4577    Register src1_reg = as_Register($src1$$reg);
4578    Register src2_reg = as_Register($src2$$reg);
4579     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4580   %}
4581 
4582   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4583     MacroAssembler _masm(&cbuf);
4584    Register dst_reg = as_Register($dst$$reg);
4585    Register src1_reg = as_Register($src1$$reg);
4586    Register src2_reg = as_Register($src2$$reg);
4587     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4588   %}
4589 
4590   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4591     MacroAssembler _masm(&cbuf);
4592    Register dst_reg = as_Register($dst$$reg);
4593    Register src1_reg = as_Register($src1$$reg);
4594    Register src2_reg = as_Register($src2$$reg);
4595     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4596   %}
4597 
4598   // compare instruction encodings
4599 
4600   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4601     MacroAssembler _masm(&cbuf);
4602     Register reg1 = as_Register($src1$$reg);
4603     Register reg2 = as_Register($src2$$reg);
4604     __ cmpw(reg1, reg2);
4605   %}
4606 
4607   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4608     MacroAssembler _masm(&cbuf);
4609     Register reg = as_Register($src1$$reg);
4610     int32_t val = $src2$$constant;
4611     if (val >= 0) {
4612       __ subsw(zr, reg, val);
4613     } else {
4614       __ addsw(zr, reg, -val);
4615     }
4616   %}
4617 
4618   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4619     MacroAssembler _masm(&cbuf);
4620     Register reg1 = as_Register($src1$$reg);
4621     u_int32_t val = (u_int32_t)$src2$$constant;
4622     __ movw(rscratch1, val);
4623     __ cmpw(reg1, rscratch1);
4624   %}
4625 
4626   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4627     MacroAssembler _masm(&cbuf);
4628     Register reg1 = as_Register($src1$$reg);
4629     Register reg2 = as_Register($src2$$reg);
4630     __ cmp(reg1, reg2);
4631   %}
4632 
4633   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4634     MacroAssembler _masm(&cbuf);
4635     Register reg = as_Register($src1$$reg);
4636     int64_t val = $src2$$constant;
4637     if (val >= 0) {
4638       __ subs(zr, reg, val);
4639     } else if (val != -val) {
4640       __ adds(zr, reg, -val);
4641     } else {
4642     // aargh, Long.MIN_VALUE is a special case
4643       __ orr(rscratch1, zr, (u_int64_t)val);
4644       __ subs(zr, reg, rscratch1);
4645     }
4646   %}
4647 
4648   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4649     MacroAssembler _masm(&cbuf);
4650     Register reg1 = as_Register($src1$$reg);
4651     u_int64_t val = (u_int64_t)$src2$$constant;
4652     __ mov(rscratch1, val);
4653     __ cmp(reg1, rscratch1);
4654   %}
4655 
4656   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4657     MacroAssembler _masm(&cbuf);
4658     Register reg1 = as_Register($src1$$reg);
4659     Register reg2 = as_Register($src2$$reg);
4660     __ cmp(reg1, reg2);
4661   %}
4662 
4663   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4664     MacroAssembler _masm(&cbuf);
4665     Register reg1 = as_Register($src1$$reg);
4666     Register reg2 = as_Register($src2$$reg);
4667     __ cmpw(reg1, reg2);
4668   %}
4669 
4670   enc_class aarch64_enc_testp(iRegP src) %{
4671     MacroAssembler _masm(&cbuf);
4672     Register reg = as_Register($src$$reg);
4673     __ cmp(reg, zr);
4674   %}
4675 
4676   enc_class aarch64_enc_testn(iRegN src) %{
4677     MacroAssembler _masm(&cbuf);
4678     Register reg = as_Register($src$$reg);
4679     __ cmpw(reg, zr);
4680   %}
4681 
4682   enc_class aarch64_enc_b(label lbl) %{
4683     MacroAssembler _masm(&cbuf);
4684     Label *L = $lbl$$label;
4685     __ b(*L);
4686   %}
4687 
4688   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4689     MacroAssembler _masm(&cbuf);
4690     Label *L = $lbl$$label;
4691     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4692   %}
4693 
4694   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4695     MacroAssembler _masm(&cbuf);
4696     Label *L = $lbl$$label;
4697     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4698   %}
4699 
4700   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4701   %{
4702      Register sub_reg = as_Register($sub$$reg);
4703      Register super_reg = as_Register($super$$reg);
4704      Register temp_reg = as_Register($temp$$reg);
4705      Register result_reg = as_Register($result$$reg);
4706 
4707      Label miss;
4708      MacroAssembler _masm(&cbuf);
4709      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4710                                      NULL, &miss,
4711                                      /*set_cond_codes:*/ true);
4712      if ($primary) {
4713        __ mov(result_reg, zr);
4714      }
4715      __ bind(miss);
4716   %}
4717 
4718   enc_class aarch64_enc_java_static_call(method meth) %{
4719     MacroAssembler _masm(&cbuf);
4720 
4721     address addr = (address)$meth$$method;
4722     address call;
4723     if (!_method) {
4724       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4725       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4726     } else if (_optimized_virtual) {
4727       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4728     } else {
4729       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4730     }
4731     if (call == NULL) {
4732       ciEnv::current()->record_failure("CodeCache is full"); 
4733       return;
4734     }
4735 
4736     if (_method) {
4737       // Emit stub for static call
4738       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4739       if (stub == NULL) {
4740         ciEnv::current()->record_failure("CodeCache is full"); 
4741         return;
4742       }
4743     }
4744   %}
4745 
4746   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4747     MacroAssembler _masm(&cbuf);
4748     address call = __ ic_call((address)$meth$$method);
4749     if (call == NULL) {
4750       ciEnv::current()->record_failure("CodeCache is full"); 
4751       return;
4752     }
4753   %}
4754 
4755   enc_class aarch64_enc_call_epilog() %{
4756     MacroAssembler _masm(&cbuf);
4757     if (VerifyStackAtCalls) {
4758       // Check that stack depth is unchanged: find majik cookie on stack
4759       __ call_Unimplemented();
4760     }
4761   %}
4762 
4763   enc_class aarch64_enc_java_to_runtime(method meth) %{
4764     MacroAssembler _masm(&cbuf);
4765 
4766     // some calls to generated routines (arraycopy code) are scheduled
4767     // by C2 as runtime calls. if so we can call them using a br (they
4768     // will be in a reachable segment) otherwise we have to use a blrt
4769     // which loads the absolute address into a register.
4770     address entry = (address)$meth$$method;
4771     CodeBlob *cb = CodeCache::find_blob(entry);
4772     if (cb) {
4773       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4774       if (call == NULL) {
4775         ciEnv::current()->record_failure("CodeCache is full"); 
4776         return;
4777       }
4778     } else {
4779       int gpcnt;
4780       int fpcnt;
4781       int rtype;
4782       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4783       Label retaddr;
4784       __ adr(rscratch2, retaddr);
4785       __ lea(rscratch1, RuntimeAddress(entry));
4786       // Leave a breadcrumb for JavaThread::pd_last_frame().
4787       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4788       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4789       __ bind(retaddr);
4790       __ add(sp, sp, 2 * wordSize);
4791     }
4792   %}
4793 
4794   enc_class aarch64_enc_rethrow() %{
4795     MacroAssembler _masm(&cbuf);
4796     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4797   %}
4798 
4799   enc_class aarch64_enc_ret() %{
4800     MacroAssembler _masm(&cbuf);
4801     __ ret(lr);
4802   %}
4803 
4804   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4805     MacroAssembler _masm(&cbuf);
4806     Register target_reg = as_Register($jump_target$$reg);
4807     __ br(target_reg);
4808   %}
4809 
4810   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4811     MacroAssembler _masm(&cbuf);
4812     Register target_reg = as_Register($jump_target$$reg);
4813     // exception oop should be in r0
4814     // ret addr has been popped into lr
4815     // callee expects it in r3
4816     __ mov(r3, lr);
4817     __ br(target_reg);
4818   %}
4819 
4820   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4821     MacroAssembler _masm(&cbuf);
4822     Register oop = as_Register($object$$reg);
4823     Register box = as_Register($box$$reg);
4824     Register disp_hdr = as_Register($tmp$$reg);
4825     Register tmp = as_Register($tmp2$$reg);
4826     Label cont;
4827     Label object_has_monitor;
4828     Label cas_failed;
4829 
4830     assert_different_registers(oop, box, tmp, disp_hdr);
4831 
4832     // Load markOop from object into displaced_header.
4833     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4834 
4835     // Always do locking in runtime.
4836     if (EmitSync & 0x01) {
4837       __ cmp(oop, zr);
4838       return;
4839     }
4840 
4841     if (UseBiasedLocking) {
4842       __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
4843     }
4844 
4845     // Handle existing monitor
4846     if (EmitSync & 0x02) {
4847       // we can use AArch64's bit test and branch here but
4848       // markoopDesc does not define a bit index just the bit value
4849       // so assert in case the bit pos changes
4850 #     define __monitor_value_log2 1
4851       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4852       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4853 #     undef __monitor_value_log2
4854     }
4855 
4856     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4857     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4858 
4859     // Load Compare Value application register.
4860 
4861     // Initialize the box. (Must happen before we update the object mark!)
4862     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4863 
4864     // Compare object markOop with mark and if equal exchange scratch1
4865     // with object markOop.
4866     // Note that this is simply a CAS: it does not generate any
4867     // barriers.  These are separately generated by
4868     // membar_acquire_lock().
4869     {
4870       Label retry_load;
4871       __ bind(retry_load);
4872       __ ldxr(tmp, oop);
4873       __ cmp(tmp, disp_hdr);
4874       __ br(Assembler::NE, cas_failed);
4875       // use stlxr to ensure update is immediately visible
4876       __ stlxr(tmp, box, oop);
4877       __ cbzw(tmp, cont);
4878       __ b(retry_load);
4879     }
4880 
4881     // Formerly:
4882     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4883     //               /*newv=*/box,
4884     //               /*addr=*/oop,
4885     //               /*tmp=*/tmp,
4886     //               cont,
4887     //               /*fail*/NULL);
4888 
4889     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4890 
4891     // If the compare-and-exchange succeeded, then we found an unlocked
4892     // object, will have now locked it will continue at label cont
4893 
4894     __ bind(cas_failed);
4895     // We did not see an unlocked object so try the fast recursive case.
4896 
4897     // Check if the owner is self by comparing the value in the
4898     // markOop of object (disp_hdr) with the stack pointer.
4899     __ mov(rscratch1, sp);
4900     __ sub(disp_hdr, disp_hdr, rscratch1);
4901     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4902     // If condition is true we are cont and hence we can store 0 as the
4903     // displaced header in the box, which indicates that it is a recursive lock.
4904     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4905     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4906 
4907     // Handle existing monitor.
4908     if ((EmitSync & 0x02) == 0) {
4909       __ b(cont);
4910 
4911       __ bind(object_has_monitor);
4912       // The object's monitor m is unlocked iff m->owner == NULL,
4913       // otherwise m->owner may contain a thread or a stack address.
4914       //
4915       // Try to CAS m->owner from NULL to current thread.
4916       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4917       __ mov(disp_hdr, zr);
4918 
4919       {
4920         Label retry_load, fail;
4921         __ bind(retry_load);
4922         __ ldxr(rscratch1, tmp);
4923         __ cmp(disp_hdr, rscratch1);
4924         __ br(Assembler::NE, fail);
4925         // use stlxr to ensure update is immediately visible
4926         __ stlxr(rscratch1, rthread, tmp);
4927         __ cbnzw(rscratch1, retry_load);
4928         __ bind(fail);
4929       }
4930 
4931       // Label next;
4932       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4933       //               /*newv=*/rthread,
4934       //               /*addr=*/tmp,
4935       //               /*tmp=*/rscratch1,
4936       //               /*succeed*/next,
4937       //               /*fail*/NULL);
4938       // __ bind(next);
4939 
4940       // store a non-null value into the box.
4941       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4942 
4943       // PPC port checks the following invariants
4944       // #ifdef ASSERT
4945       // bne(flag, cont);
4946       // We have acquired the monitor, check some invariants.
4947       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4948       // Invariant 1: _recursions should be 0.
4949       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4950       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4951       //                        "monitor->_recursions should be 0", -1);
4952       // Invariant 2: OwnerIsThread shouldn't be 0.
4953       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4954       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4955       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4956       // #endif
4957     }
4958 
4959     __ bind(cont);
4960     // flag == EQ indicates success
4961     // flag == NE indicates failure
4962 
4963   %}
4964 
4965   // TODO
4966   // reimplement this with custom cmpxchgptr code
4967   // which avoids some of the unnecessary branching
4968   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4969     MacroAssembler _masm(&cbuf);
4970     Register oop = as_Register($object$$reg);
4971     Register box = as_Register($box$$reg);
4972     Register disp_hdr = as_Register($tmp$$reg);
4973     Register tmp = as_Register($tmp2$$reg);
4974     Label cont;
4975     Label object_has_monitor;
4976     Label cas_failed;
4977 
4978     assert_different_registers(oop, box, tmp, disp_hdr);
4979 
4980     // Always do locking in runtime.
4981     if (EmitSync & 0x01) {
4982       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4983       return;
4984     }
4985 
4986     if (UseBiasedLocking) {
4987       __ biased_locking_exit(oop, tmp, cont);
4988     }
4989 
4990     // Find the lock address and load the displaced header from the stack.
4991     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4992 
4993     // If the displaced header is 0, we have a recursive unlock.
4994     __ cmp(disp_hdr, zr);
4995     __ br(Assembler::EQ, cont);
4996 
4997 
4998     // Handle existing monitor.
4999     if ((EmitSync & 0x02) == 0) {
5000       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5001       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5002     }
5003 
5004     // Check if it is still a light weight lock, this is is true if we
5005     // see the stack address of the basicLock in the markOop of the
5006     // object.
5007 
5008       {
5009         Label retry_load;
5010         __ bind(retry_load);
5011         __ ldxr(tmp, oop);
5012         __ cmp(box, tmp);
5013         __ br(Assembler::NE, cas_failed);
5014         // use stlxr to ensure update is immediately visible
5015         __ stlxr(tmp, disp_hdr, oop);
5016         __ cbzw(tmp, cont);
5017         __ b(retry_load);
5018       }
5019 
5020     // __ cmpxchgptr(/*compare_value=*/box,
5021     //               /*exchange_value=*/disp_hdr,
5022     //               /*where=*/oop,
5023     //               /*result=*/tmp,
5024     //               cont,
5025     //               /*cas_failed*/NULL);
5026     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5027 
5028     __ bind(cas_failed);
5029 
5030     // Handle existing monitor.
5031     if ((EmitSync & 0x02) == 0) {
5032       __ b(cont);
5033 
5034       __ bind(object_has_monitor);
5035       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5036       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5037       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5038       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5039       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5040       __ cmp(rscratch1, zr);
5041       __ br(Assembler::NE, cont);
5042 
5043       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5044       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5045       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5046       __ cmp(rscratch1, zr);
5047       __ cbnz(rscratch1, cont);
5048       // need a release store here
5049       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5050       __ stlr(rscratch1, tmp); // rscratch1 is zero
5051     }
5052 
5053     __ bind(cont);
5054     // flag == EQ indicates success
5055     // flag == NE indicates failure
5056   %}
5057 
5058 %}
5059 
5060 //----------FRAME--------------------------------------------------------------
5061 // Definition of frame structure and management information.
5062 //
5063 //  S T A C K   L A Y O U T    Allocators stack-slot number
5064 //                             |   (to get allocators register number
5065 //  G  Owned by    |        |  v    add OptoReg::stack0())
5066 //  r   CALLER     |        |
5067 //  o     |        +--------+      pad to even-align allocators stack-slot
5068 //  w     V        |  pad0  |        numbers; owned by CALLER
5069 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5070 //  h     ^        |   in   |  5
5071 //        |        |  args  |  4   Holes in incoming args owned by SELF
5072 //  |     |        |        |  3
5073 //  |     |        +--------+
5074 //  V     |        | old out|      Empty on Intel, window on Sparc
5075 //        |    old |preserve|      Must be even aligned.
5076 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5077 //        |        |   in   |  3   area for Intel ret address
5078 //     Owned by    |preserve|      Empty on Sparc.
5079 //       SELF      +--------+
5080 //        |        |  pad2  |  2   pad to align old SP
5081 //        |        +--------+  1
5082 //        |        | locks  |  0
5083 //        |        +--------+----> OptoReg::stack0(), even aligned
5084 //        |        |  pad1  | 11   pad to align new SP
5085 //        |        +--------+
5086 //        |        |        | 10
5087 //        |        | spills |  9   spills
5088 //        V        |        |  8   (pad0 slot for callee)
5089 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5090 //        ^        |  out   |  7
5091 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5092 //     Owned by    +--------+
5093 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5094 //        |    new |preserve|      Must be even-aligned.
5095 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5096 //        |        |        |
5097 //
5098 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5099 //         known from SELF's arguments and the Java calling convention.
5100 //         Region 6-7 is determined per call site.
5101 // Note 2: If the calling convention leaves holes in the incoming argument
5102 //         area, those holes are owned by SELF.  Holes in the outgoing area
5103 //         are owned by the CALLEE.  Holes should not be nessecary in the
5104 //         incoming area, as the Java calling convention is completely under
5105 //         the control of the AD file.  Doubles can be sorted and packed to
5106 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5107 //         varargs C calling conventions.
5108 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5109 //         even aligned with pad0 as needed.
5110 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5111 //           (the latter is true on Intel but is it false on AArch64?)
5112 //         region 6-11 is even aligned; it may be padded out more so that
5113 //         the region from SP to FP meets the minimum stack alignment.
5114 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5115 //         alignment.  Region 11, pad1, may be dynamically extended so that
5116 //         SP meets the minimum alignment.
5117 
5118 frame %{
5119   // What direction does stack grow in (assumed to be same for C & Java)
5120   stack_direction(TOWARDS_LOW);
5121 
5122   // These three registers define part of the calling convention
5123   // between compiled code and the interpreter.
5124 
5125   // Inline Cache Register or methodOop for I2C.
5126   inline_cache_reg(R12);
5127 
5128   // Method Oop Register when calling interpreter.
5129   interpreter_method_oop_reg(R12);
5130 
5131   // Number of stack slots consumed by locking an object
5132   sync_stack_slots(2);
5133 
5134   // Compiled code's Frame Pointer
5135   frame_pointer(R31);
5136 
5137   // Interpreter stores its frame pointer in a register which is
5138   // stored to the stack by I2CAdaptors.
5139   // I2CAdaptors convert from interpreted java to compiled java.
5140   interpreter_frame_pointer(R29);
5141 
5142   // Stack alignment requirement
5143   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5144 
5145   // Number of stack slots between incoming argument block and the start of
5146   // a new frame.  The PROLOG must add this many slots to the stack.  The
5147   // EPILOG must remove this many slots. aarch64 needs two slots for
5148   // return address and fp.
5149   // TODO think this is correct but check
5150   in_preserve_stack_slots(4);
5151 
5152   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5153   // for calls to C.  Supports the var-args backing area for register parms.
5154   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5155 
5156   // The after-PROLOG location of the return address.  Location of
5157   // return address specifies a type (REG or STACK) and a number
5158   // representing the register number (i.e. - use a register name) or
5159   // stack slot.
5160   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5161   // Otherwise, it is above the locks and verification slot and alignment word
5162   // TODO this may well be correct but need to check why that - 2 is there
5163   // ppc port uses 0 but we definitely need to allow for fixed_slots
5164   // which folds in the space used for monitors
5165   return_addr(STACK - 2 +
5166               round_to((Compile::current()->in_preserve_stack_slots() +
5167                         Compile::current()->fixed_slots()),
5168                        stack_alignment_in_slots()));
5169 
5170   // Body of function which returns an integer array locating
5171   // arguments either in registers or in stack slots.  Passed an array
5172   // of ideal registers called "sig" and a "length" count.  Stack-slot
5173   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5174   // arguments for a CALLEE.  Incoming stack arguments are
5175   // automatically biased by the preserve_stack_slots field above.
5176 
5177   calling_convention
5178   %{
5179     // No difference between ingoing/outgoing just pass false
5180     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5181   %}
5182 
5183   c_calling_convention
5184   %{
5185     // This is obviously always outgoing
5186     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5187   %}
5188 
5189   // Location of compiled Java return values.  Same as C for now.
5190   return_value
5191   %{
5192     // TODO do we allow ideal_reg == Op_RegN???
5193     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5194            "only return normal values");
5195 
5196     static const int lo[Op_RegL + 1] = { // enum name
5197       0,                                 // Op_Node
5198       0,                                 // Op_Set
5199       R0_num,                            // Op_RegN
5200       R0_num,                            // Op_RegI
5201       R0_num,                            // Op_RegP
5202       V0_num,                            // Op_RegF
5203       V0_num,                            // Op_RegD
5204       R0_num                             // Op_RegL
5205     };
5206 
5207     static const int hi[Op_RegL + 1] = { // enum name
5208       0,                                 // Op_Node
5209       0,                                 // Op_Set
5210       OptoReg::Bad,                       // Op_RegN
5211       OptoReg::Bad,                      // Op_RegI
5212       R0_H_num,                          // Op_RegP
5213       OptoReg::Bad,                      // Op_RegF
5214       V0_H_num,                          // Op_RegD
5215       R0_H_num                           // Op_RegL
5216     };
5217 
5218     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5219   %}
5220 %}
5221 
5222 //----------ATTRIBUTES---------------------------------------------------------
5223 //----------Operand Attributes-------------------------------------------------
5224 op_attrib op_cost(1);        // Required cost attribute
5225 
5226 //----------Instruction Attributes---------------------------------------------
5227 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5228 ins_attrib ins_size(32);        // Required size attribute (in bits)
5229 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5230                                 // a non-matching short branch variant
5231                                 // of some long branch?
5232 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5233                                 // be a power of 2) specifies the
5234                                 // alignment that some part of the
5235                                 // instruction (not necessarily the
5236                                 // start) requires.  If > 1, a
5237                                 // compute_padding() function must be
5238                                 // provided for the instruction
5239 
5240 //----------OPERANDS-----------------------------------------------------------
5241 // Operand definitions must precede instruction definitions for correct parsing
5242 // in the ADLC because operands constitute user defined types which are used in
5243 // instruction definitions.
5244 
5245 //----------Simple Operands----------------------------------------------------
5246 
5247 // Integer operands 32 bit
5248 // 32 bit immediate
5249 operand immI()
5250 %{
5251   match(ConI);
5252 
5253   op_cost(0);
5254   format %{ %}
5255   interface(CONST_INTER);
5256 %}
5257 
5258 // 32 bit zero
5259 operand immI0()
5260 %{
5261   predicate(n->get_int() == 0);
5262   match(ConI);
5263 
5264   op_cost(0);
5265   format %{ %}
5266   interface(CONST_INTER);
5267 %}
5268 
5269 // 32 bit unit increment
5270 operand immI_1()
5271 %{
5272   predicate(n->get_int() == 1);
5273   match(ConI);
5274 
5275   op_cost(0);
5276   format %{ %}
5277   interface(CONST_INTER);
5278 %}
5279 
5280 // 32 bit unit decrement
5281 operand immI_M1()
5282 %{
5283   predicate(n->get_int() == -1);
5284   match(ConI);
5285 
5286   op_cost(0);
5287   format %{ %}
5288   interface(CONST_INTER);
5289 %}
5290 
5291 operand immI_le_4()
5292 %{
5293   predicate(n->get_int() <= 4);
5294   match(ConI);
5295 
5296   op_cost(0);
5297   format %{ %}
5298   interface(CONST_INTER);
5299 %}
5300 
5301 operand immI_31()
5302 %{
5303   predicate(n->get_int() == 31);
5304   match(ConI);
5305 
5306   op_cost(0);
5307   format %{ %}
5308   interface(CONST_INTER);
5309 %}
5310 
5311 operand immI_8()
5312 %{
5313   predicate(n->get_int() == 8);
5314   match(ConI);
5315 
5316   op_cost(0);
5317   format %{ %}
5318   interface(CONST_INTER);
5319 %}
5320 
5321 operand immI_16()
5322 %{
5323   predicate(n->get_int() == 16);
5324   match(ConI);
5325 
5326   op_cost(0);
5327   format %{ %}
5328   interface(CONST_INTER);
5329 %}
5330 
5331 operand immI_24()
5332 %{
5333   predicate(n->get_int() == 24);
5334   match(ConI);
5335 
5336   op_cost(0);
5337   format %{ %}
5338   interface(CONST_INTER);
5339 %}
5340 
5341 operand immI_32()
5342 %{
5343   predicate(n->get_int() == 32);
5344   match(ConI);
5345 
5346   op_cost(0);
5347   format %{ %}
5348   interface(CONST_INTER);
5349 %}
5350 
5351 operand immI_48()
5352 %{
5353   predicate(n->get_int() == 48);
5354   match(ConI);
5355 
5356   op_cost(0);
5357   format %{ %}
5358   interface(CONST_INTER);
5359 %}
5360 
5361 operand immI_56()
5362 %{
5363   predicate(n->get_int() == 56);
5364   match(ConI);
5365 
5366   op_cost(0);
5367   format %{ %}
5368   interface(CONST_INTER);
5369 %}
5370 
5371 operand immI_64()
5372 %{
5373   predicate(n->get_int() == 64);
5374   match(ConI);
5375 
5376   op_cost(0);
5377   format %{ %}
5378   interface(CONST_INTER);
5379 %}
5380 
5381 operand immI_255()
5382 %{
5383   predicate(n->get_int() == 255);
5384   match(ConI);
5385 
5386   op_cost(0);
5387   format %{ %}
5388   interface(CONST_INTER);
5389 %}
5390 
5391 operand immI_65535()
5392 %{
5393   predicate(n->get_int() == 65535);
5394   match(ConI);
5395 
5396   op_cost(0);
5397   format %{ %}
5398   interface(CONST_INTER);
5399 %}
5400 
5401 operand immL_63()
5402 %{
5403   predicate(n->get_int() == 63);
5404   match(ConI);
5405 
5406   op_cost(0);
5407   format %{ %}
5408   interface(CONST_INTER);
5409 %}
5410 
5411 operand immL_255()
5412 %{
5413   predicate(n->get_int() == 255);
5414   match(ConI);
5415 
5416   op_cost(0);
5417   format %{ %}
5418   interface(CONST_INTER);
5419 %}
5420 
5421 operand immL_65535()
5422 %{
5423   predicate(n->get_long() == 65535L);
5424   match(ConL);
5425 
5426   op_cost(0);
5427   format %{ %}
5428   interface(CONST_INTER);
5429 %}
5430 
5431 operand immL_4294967295()
5432 %{
5433   predicate(n->get_long() == 4294967295L);
5434   match(ConL);
5435 
5436   op_cost(0);
5437   format %{ %}
5438   interface(CONST_INTER);
5439 %}
5440 
5441 operand immL_bitmask()
5442 %{
5443   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5444             && is_power_of_2(n->get_long() + 1));
5445   match(ConL);
5446 
5447   op_cost(0);
5448   format %{ %}
5449   interface(CONST_INTER);
5450 %}
5451 
5452 operand immI_bitmask()
5453 %{
5454   predicate(((n->get_int() & 0xc0000000) == 0)
5455             && is_power_of_2(n->get_int() + 1));
5456   match(ConI);
5457 
5458   op_cost(0);
5459   format %{ %}
5460   interface(CONST_INTER);
5461 %}
5462 
5463 // Scale values for scaled offset addressing modes (up to long but not quad)
5464 operand immIScale()
5465 %{
5466   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5467   match(ConI);
5468 
5469   op_cost(0);
5470   format %{ %}
5471   interface(CONST_INTER);
5472 %}
5473 
5474 // 26 bit signed offset -- for pc-relative branches
5475 operand immI26()
5476 %{
5477   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5478   match(ConI);
5479 
5480   op_cost(0);
5481   format %{ %}
5482   interface(CONST_INTER);
5483 %}
5484 
5485 // 19 bit signed offset -- for pc-relative loads
5486 operand immI19()
5487 %{
5488   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5489   match(ConI);
5490 
5491   op_cost(0);
5492   format %{ %}
5493   interface(CONST_INTER);
5494 %}
5495 
5496 // 12 bit unsigned offset -- for base plus immediate loads
5497 operand immIU12()
5498 %{
5499   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5500   match(ConI);
5501 
5502   op_cost(0);
5503   format %{ %}
5504   interface(CONST_INTER);
5505 %}
5506 
5507 operand immLU12()
5508 %{
5509   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5510   match(ConL);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // Offset for scaled or unscaled immediate loads and stores
5518 operand immIOffset()
5519 %{
5520   predicate(Address::offset_ok_for_immed(n->get_int()));
5521   match(ConI);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 operand immLoffset()
5529 %{
5530   predicate(Address::offset_ok_for_immed(n->get_long()));
5531   match(ConL);
5532 
5533   op_cost(0);
5534   format %{ %}
5535   interface(CONST_INTER);
5536 %}
5537 
5538 // 32 bit integer valid for add sub immediate
5539 operand immIAddSub()
5540 %{
5541   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5542   match(ConI);
5543   op_cost(0);
5544   format %{ %}
5545   interface(CONST_INTER);
5546 %}
5547 
5548 // 32 bit unsigned integer valid for logical immediate
5549 // TODO -- check this is right when e.g the mask is 0x80000000
5550 operand immILog()
5551 %{
5552   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5553   match(ConI);
5554 
5555   op_cost(0);
5556   format %{ %}
5557   interface(CONST_INTER);
5558 %}
5559 
5560 // Integer operands 64 bit
5561 // 64 bit immediate
5562 operand immL()
5563 %{
5564   match(ConL);
5565 
5566   op_cost(0);
5567   format %{ %}
5568   interface(CONST_INTER);
5569 %}
5570 
5571 // 64 bit zero
5572 operand immL0()
5573 %{
5574   predicate(n->get_long() == 0);
5575   match(ConL);
5576 
5577   op_cost(0);
5578   format %{ %}
5579   interface(CONST_INTER);
5580 %}
5581 
5582 // 64 bit unit increment
5583 operand immL_1()
5584 %{
5585   predicate(n->get_long() == 1);
5586   match(ConL);
5587 
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 // 64 bit unit decrement
5594 operand immL_M1()
5595 %{
5596   predicate(n->get_long() == -1);
5597   match(ConL);
5598 
5599   op_cost(0);
5600   format %{ %}
5601   interface(CONST_INTER);
5602 %}
5603 
5604 // 32 bit offset of pc in thread anchor
5605 
5606 operand immL_pc_off()
5607 %{
5608   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5609                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5610   match(ConL);
5611 
5612   op_cost(0);
5613   format %{ %}
5614   interface(CONST_INTER);
5615 %}
5616 
5617 // 64 bit integer valid for add sub immediate
5618 operand immLAddSub()
5619 %{
5620   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5621   match(ConL);
5622   op_cost(0);
5623   format %{ %}
5624   interface(CONST_INTER);
5625 %}
5626 
5627 // 64 bit integer valid for logical immediate
5628 operand immLLog()
5629 %{
5630   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5631   match(ConL);
5632   op_cost(0);
5633   format %{ %}
5634   interface(CONST_INTER);
5635 %}
5636 
5637 // Long Immediate: low 32-bit mask
5638 operand immL_32bits()
5639 %{
5640   predicate(n->get_long() == 0xFFFFFFFFL);
5641   match(ConL);
5642   op_cost(0);
5643   format %{ %}
5644   interface(CONST_INTER);
5645 %}
5646 
5647 // Pointer operands
5648 // Pointer Immediate
5649 operand immP()
5650 %{
5651   match(ConP);
5652 
5653   op_cost(0);
5654   format %{ %}
5655   interface(CONST_INTER);
5656 %}
5657 
5658 // NULL Pointer Immediate
5659 operand immP0()
5660 %{
5661   predicate(n->get_ptr() == 0);
5662   match(ConP);
5663 
5664   op_cost(0);
5665   format %{ %}
5666   interface(CONST_INTER);
5667 %}
5668 
5669 // Pointer Immediate One
5670 // this is used in object initialization (initial object header)
5671 operand immP_1()
5672 %{
5673   predicate(n->get_ptr() == 1);
5674   match(ConP);
5675 
5676   op_cost(0);
5677   format %{ %}
5678   interface(CONST_INTER);
5679 %}
5680 
5681 // Polling Page Pointer Immediate
5682 operand immPollPage()
5683 %{
5684   predicate((address)n->get_ptr() == os::get_polling_page());
5685   match(ConP);
5686 
5687   op_cost(0);
5688   format %{ %}
5689   interface(CONST_INTER);
5690 %}
5691 
5692 // Card Table Byte Map Base
5693 operand immByteMapBase()
5694 %{
5695   // Get base of card map
5696   predicate((jbyte*)n->get_ptr() ==
5697         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5698   match(ConP);
5699 
5700   op_cost(0);
5701   format %{ %}
5702   interface(CONST_INTER);
5703 %}
5704 
5705 // Pointer Immediate Minus One
5706 // this is used when we want to write the current PC to the thread anchor
5707 operand immP_M1()
5708 %{
5709   predicate(n->get_ptr() == -1);
5710   match(ConP);
5711 
5712   op_cost(0);
5713   format %{ %}
5714   interface(CONST_INTER);
5715 %}
5716 
5717 // Pointer Immediate Minus Two
5718 // this is used when we want to write the current PC to the thread anchor
5719 operand immP_M2()
5720 %{
5721   predicate(n->get_ptr() == -2);
5722   match(ConP);
5723 
5724   op_cost(0);
5725   format %{ %}
5726   interface(CONST_INTER);
5727 %}
5728 
5729 // Float and Double operands
5730 // Double Immediate
5731 operand immD()
5732 %{
5733   match(ConD);
5734   op_cost(0);
5735   format %{ %}
5736   interface(CONST_INTER);
5737 %}
5738 
5739 // Double Immediate: +0.0d
5740 operand immD0()
5741 %{
5742   predicate(jlong_cast(n->getd()) == 0);
5743   match(ConD);
5744 
5745   op_cost(0);
5746   format %{ %}
5747   interface(CONST_INTER);
5748 %}
5749 
5750 // constant 'double +0.0'.
5751 operand immDPacked()
5752 %{
5753   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5754   match(ConD);
5755   op_cost(0);
5756   format %{ %}
5757   interface(CONST_INTER);
5758 %}
5759 
5760 // Float Immediate
5761 operand immF()
5762 %{
5763   match(ConF);
5764   op_cost(0);
5765   format %{ %}
5766   interface(CONST_INTER);
5767 %}
5768 
5769 // Float Immediate: +0.0f.
5770 operand immF0()
5771 %{
5772   predicate(jint_cast(n->getf()) == 0);
5773   match(ConF);
5774 
5775   op_cost(0);
5776   format %{ %}
5777   interface(CONST_INTER);
5778 %}
5779 
5780 //
5781 operand immFPacked()
5782 %{
5783   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5784   match(ConF);
5785   op_cost(0);
5786   format %{ %}
5787   interface(CONST_INTER);
5788 %}
5789 
5790 // Narrow pointer operands
5791 // Narrow Pointer Immediate
5792 operand immN()
5793 %{
5794   match(ConN);
5795 
5796   op_cost(0);
5797   format %{ %}
5798   interface(CONST_INTER);
5799 %}
5800 
5801 // Narrow NULL Pointer Immediate
5802 operand immN0()
5803 %{
5804   predicate(n->get_narrowcon() == 0);
5805   match(ConN);
5806 
5807   op_cost(0);
5808   format %{ %}
5809   interface(CONST_INTER);
5810 %}
5811 
5812 operand immNKlass()
5813 %{
5814   match(ConNKlass);
5815 
5816   op_cost(0);
5817   format %{ %}
5818   interface(CONST_INTER);
5819 %}
5820 
5821 // Integer 32 bit Register Operands
5822 // Integer 32 bitRegister (excludes SP)
5823 operand iRegI()
5824 %{
5825   constraint(ALLOC_IN_RC(any_reg32));
5826   match(RegI);
5827   match(iRegINoSp);
5828   op_cost(0);
5829   format %{ %}
5830   interface(REG_INTER);
5831 %}
5832 
5833 // Integer 32 bit Register not Special
5834 operand iRegINoSp()
5835 %{
5836   constraint(ALLOC_IN_RC(no_special_reg32));
5837   match(RegI);
5838   op_cost(0);
5839   format %{ %}
5840   interface(REG_INTER);
5841 %}
5842 
5843 // Integer 64 bit Register Operands
5844 // Integer 64 bit Register (includes SP)
5845 operand iRegL()
5846 %{
5847   constraint(ALLOC_IN_RC(any_reg));
5848   match(RegL);
5849   match(iRegLNoSp);
5850   op_cost(0);
5851   format %{ %}
5852   interface(REG_INTER);
5853 %}
5854 
5855 // Integer 64 bit Register not Special
5856 operand iRegLNoSp()
5857 %{
5858   constraint(ALLOC_IN_RC(no_special_reg));
5859   match(RegL);
5860   format %{ %}
5861   interface(REG_INTER);
5862 %}
5863 
5864 // Pointer Register Operands
5865 // Pointer Register
5866 operand iRegP()
5867 %{
5868   constraint(ALLOC_IN_RC(ptr_reg));
5869   match(RegP);
5870   match(iRegPNoSp);
5871   match(iRegP_R0);
5872   //match(iRegP_R2);
5873   //match(iRegP_R4);
5874   //match(iRegP_R5);
5875   match(thread_RegP);
5876   op_cost(0);
5877   format %{ %}
5878   interface(REG_INTER);
5879 %}
5880 
5881 // Pointer 64 bit Register not Special
5882 operand iRegPNoSp()
5883 %{
5884   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5885   match(RegP);
5886   // match(iRegP);
5887   // match(iRegP_R0);
5888   // match(iRegP_R2);
5889   // match(iRegP_R4);
5890   // match(iRegP_R5);
5891   // match(thread_RegP);
5892   op_cost(0);
5893   format %{ %}
5894   interface(REG_INTER);
5895 %}
5896 
5897 // Pointer 64 bit Register R0 only
5898 operand iRegP_R0()
5899 %{
5900   constraint(ALLOC_IN_RC(r0_reg));
5901   match(RegP);
5902   // match(iRegP);
5903   match(iRegPNoSp);
5904   op_cost(0);
5905   format %{ %}
5906   interface(REG_INTER);
5907 %}
5908 
5909 // Pointer 64 bit Register R1 only
5910 operand iRegP_R1()
5911 %{
5912   constraint(ALLOC_IN_RC(r1_reg));
5913   match(RegP);
5914   // match(iRegP);
5915   match(iRegPNoSp);
5916   op_cost(0);
5917   format %{ %}
5918   interface(REG_INTER);
5919 %}
5920 
5921 // Pointer 64 bit Register R2 only
5922 operand iRegP_R2()
5923 %{
5924   constraint(ALLOC_IN_RC(r2_reg));
5925   match(RegP);
5926   // match(iRegP);
5927   match(iRegPNoSp);
5928   op_cost(0);
5929   format %{ %}
5930   interface(REG_INTER);
5931 %}
5932 
5933 // Pointer 64 bit Register R3 only
5934 operand iRegP_R3()
5935 %{
5936   constraint(ALLOC_IN_RC(r3_reg));
5937   match(RegP);
5938   // match(iRegP);
5939   match(iRegPNoSp);
5940   op_cost(0);
5941   format %{ %}
5942   interface(REG_INTER);
5943 %}
5944 
5945 // Pointer 64 bit Register R4 only
5946 operand iRegP_R4()
5947 %{
5948   constraint(ALLOC_IN_RC(r4_reg));
5949   match(RegP);
5950   // match(iRegP);
5951   match(iRegPNoSp);
5952   op_cost(0);
5953   format %{ %}
5954   interface(REG_INTER);
5955 %}
5956 
5957 // Pointer 64 bit Register R5 only
5958 operand iRegP_R5()
5959 %{
5960   constraint(ALLOC_IN_RC(r5_reg));
5961   match(RegP);
5962   // match(iRegP);
5963   match(iRegPNoSp);
5964   op_cost(0);
5965   format %{ %}
5966   interface(REG_INTER);
5967 %}
5968 
5969 // Pointer 64 bit Register R10 only
5970 operand iRegP_R10()
5971 %{
5972   constraint(ALLOC_IN_RC(r10_reg));
5973   match(RegP);
5974   // match(iRegP);
5975   match(iRegPNoSp);
5976   op_cost(0);
5977   format %{ %}
5978   interface(REG_INTER);
5979 %}
5980 
5981 // Long 64 bit Register R11 only
5982 operand iRegL_R11()
5983 %{
5984   constraint(ALLOC_IN_RC(r11_reg));
5985   match(RegL);
5986   match(iRegLNoSp);
5987   op_cost(0);
5988   format %{ %}
5989   interface(REG_INTER);
5990 %}
5991 
5992 // Pointer 64 bit Register FP only
5993 operand iRegP_FP()
5994 %{
5995   constraint(ALLOC_IN_RC(fp_reg));
5996   match(RegP);
5997   // match(iRegP);
5998   op_cost(0);
5999   format %{ %}
6000   interface(REG_INTER);
6001 %}
6002 
6003 // Register R0 only
6004 operand iRegI_R0()
6005 %{
6006   constraint(ALLOC_IN_RC(int_r0_reg));
6007   match(RegI);
6008   match(iRegINoSp);
6009   op_cost(0);
6010   format %{ %}
6011   interface(REG_INTER);
6012 %}
6013 
6014 // Register R2 only
6015 operand iRegI_R2()
6016 %{
6017   constraint(ALLOC_IN_RC(int_r2_reg));
6018   match(RegI);
6019   match(iRegINoSp);
6020   op_cost(0);
6021   format %{ %}
6022   interface(REG_INTER);
6023 %}
6024 
6025 // Register R3 only
6026 operand iRegI_R3()
6027 %{
6028   constraint(ALLOC_IN_RC(int_r3_reg));
6029   match(RegI);
6030   match(iRegINoSp);
6031   op_cost(0);
6032   format %{ %}
6033   interface(REG_INTER);
6034 %}
6035 
6036 
6037 // Register R2 only
6038 operand iRegI_R4()
6039 %{
6040   constraint(ALLOC_IN_RC(int_r4_reg));
6041   match(RegI);
6042   match(iRegINoSp);
6043   op_cost(0);
6044   format %{ %}
6045   interface(REG_INTER);
6046 %}
6047 
6048 
6049 // Pointer Register Operands
6050 // Narrow Pointer Register
6051 operand iRegN()
6052 %{
6053   constraint(ALLOC_IN_RC(any_reg32));
6054   match(RegN);
6055   match(iRegNNoSp);
6056   op_cost(0);
6057   format %{ %}
6058   interface(REG_INTER);
6059 %}
6060 
6061 // Integer 64 bit Register not Special
6062 operand iRegNNoSp()
6063 %{
6064   constraint(ALLOC_IN_RC(no_special_reg32));
6065   match(RegN);
6066   op_cost(0);
6067   format %{ %}
6068   interface(REG_INTER);
6069 %}
6070 
6071 // heap base register -- used for encoding immN0
6072 
6073 operand iRegIHeapbase()
6074 %{
6075   constraint(ALLOC_IN_RC(heapbase_reg));
6076   match(RegI);
6077   op_cost(0);
6078   format %{ %}
6079   interface(REG_INTER);
6080 %}
6081 
6082 // Float Register
6083 // Float register operands
6084 operand vRegF()
6085 %{
6086   constraint(ALLOC_IN_RC(float_reg));
6087   match(RegF);
6088 
6089   op_cost(0);
6090   format %{ %}
6091   interface(REG_INTER);
6092 %}
6093 
6094 // Double Register
6095 // Double register operands
6096 operand vRegD()
6097 %{
6098   constraint(ALLOC_IN_RC(double_reg));
6099   match(RegD);
6100 
6101   op_cost(0);
6102   format %{ %}
6103   interface(REG_INTER);
6104 %}
6105 
6106 operand vecD()
6107 %{
6108   constraint(ALLOC_IN_RC(vectord_reg));
6109   match(VecD);
6110 
6111   op_cost(0);
6112   format %{ %}
6113   interface(REG_INTER);
6114 %}
6115 
6116 operand vecX()
6117 %{
6118   constraint(ALLOC_IN_RC(vectorx_reg));
6119   match(VecX);
6120 
6121   op_cost(0);
6122   format %{ %}
6123   interface(REG_INTER);
6124 %}
6125 
6126 operand vRegD_V0()
6127 %{
6128   constraint(ALLOC_IN_RC(v0_reg));
6129   match(RegD);
6130   op_cost(0);
6131   format %{ %}
6132   interface(REG_INTER);
6133 %}
6134 
6135 operand vRegD_V1()
6136 %{
6137   constraint(ALLOC_IN_RC(v1_reg));
6138   match(RegD);
6139   op_cost(0);
6140   format %{ %}
6141   interface(REG_INTER);
6142 %}
6143 
6144 operand vRegD_V2()
6145 %{
6146   constraint(ALLOC_IN_RC(v2_reg));
6147   match(RegD);
6148   op_cost(0);
6149   format %{ %}
6150   interface(REG_INTER);
6151 %}
6152 
6153 operand vRegD_V3()
6154 %{
6155   constraint(ALLOC_IN_RC(v3_reg));
6156   match(RegD);
6157   op_cost(0);
6158   format %{ %}
6159   interface(REG_INTER);
6160 %}
6161 
6162 // Flags register, used as output of signed compare instructions
6163 
6164 // note that on AArch64 we also use this register as the output for
6165 // for floating point compare instructions (CmpF CmpD). this ensures
6166 // that ordered inequality tests use GT, GE, LT or LE none of which
6167 // pass through cases where the result is unordered i.e. one or both
6168 // inputs to the compare is a NaN. this means that the ideal code can
6169 // replace e.g. a GT with an LE and not end up capturing the NaN case
6170 // (where the comparison should always fail). EQ and NE tests are
6171 // always generated in ideal code so that unordered folds into the NE
6172 // case, matching the behaviour of AArch64 NE.
6173 //
6174 // This differs from x86 where the outputs of FP compares use a
6175 // special FP flags registers and where compares based on this
6176 // register are distinguished into ordered inequalities (cmpOpUCF) and
6177 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6178 // to explicitly handle the unordered case in branches. x86 also has
6179 // to include extra CMoveX rules to accept a cmpOpUCF input.
6180 
6181 operand rFlagsReg()
6182 %{
6183   constraint(ALLOC_IN_RC(int_flags));
6184   match(RegFlags);
6185 
6186   op_cost(0);
6187   format %{ "RFLAGS" %}
6188   interface(REG_INTER);
6189 %}
6190 
6191 // Flags register, used as output of unsigned compare instructions
6192 operand rFlagsRegU()
6193 %{
6194   constraint(ALLOC_IN_RC(int_flags));
6195   match(RegFlags);
6196 
6197   op_cost(0);
6198   format %{ "RFLAGSU" %}
6199   interface(REG_INTER);
6200 %}
6201 
6202 // Special Registers
6203 
6204 // Method Register
6205 operand inline_cache_RegP(iRegP reg)
6206 %{
6207   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6208   match(reg);
6209   match(iRegPNoSp);
6210   op_cost(0);
6211   format %{ %}
6212   interface(REG_INTER);
6213 %}
6214 
6215 operand interpreter_method_oop_RegP(iRegP reg)
6216 %{
6217   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6218   match(reg);
6219   match(iRegPNoSp);
6220   op_cost(0);
6221   format %{ %}
6222   interface(REG_INTER);
6223 %}
6224 
6225 // Thread Register
6226 operand thread_RegP(iRegP reg)
6227 %{
6228   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6229   match(reg);
6230   op_cost(0);
6231   format %{ %}
6232   interface(REG_INTER);
6233 %}
6234 
6235 operand lr_RegP(iRegP reg)
6236 %{
6237   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6238   match(reg);
6239   op_cost(0);
6240   format %{ %}
6241   interface(REG_INTER);
6242 %}
6243 
6244 //----------Memory Operands----------------------------------------------------
6245 
6246 operand indirect(iRegP reg)
6247 %{
6248   constraint(ALLOC_IN_RC(ptr_reg));
6249   match(reg);
6250   op_cost(0);
6251   format %{ "[$reg]" %}
6252   interface(MEMORY_INTER) %{
6253     base($reg);
6254     index(0xffffffff);
6255     scale(0x0);
6256     disp(0x0);
6257   %}
6258 %}
6259 
6260 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6261 %{
6262   constraint(ALLOC_IN_RC(ptr_reg));
6263   match(AddP (AddP reg (LShiftL lreg scale)) off);
6264   op_cost(INSN_COST);
6265   format %{ "$reg, $lreg lsl($scale), $off" %}
6266   interface(MEMORY_INTER) %{
6267     base($reg);
6268     index($lreg);
6269     scale($scale);
6270     disp($off);
6271   %}
6272 %}
6273 
6274 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6275 %{
6276   constraint(ALLOC_IN_RC(ptr_reg));
6277   match(AddP (AddP reg (LShiftL lreg scale)) off);
6278   op_cost(INSN_COST);
6279   format %{ "$reg, $lreg lsl($scale), $off" %}
6280   interface(MEMORY_INTER) %{
6281     base($reg);
6282     index($lreg);
6283     scale($scale);
6284     disp($off);
6285   %}
6286 %}
6287 
6288 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6289 %{
6290   constraint(ALLOC_IN_RC(ptr_reg));
6291   match(AddP (AddP reg (ConvI2L ireg)) off);
6292   op_cost(INSN_COST);
6293   format %{ "$reg, $ireg, $off I2L" %}
6294   interface(MEMORY_INTER) %{
6295     base($reg);
6296     index($ireg);
6297     scale(0x0);
6298     disp($off);
6299   %}
6300 %}
6301 
6302 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6303 %{
6304   constraint(ALLOC_IN_RC(ptr_reg));
6305   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6306   op_cost(INSN_COST);
6307   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6308   interface(MEMORY_INTER) %{
6309     base($reg);
6310     index($ireg);
6311     scale($scale);
6312     disp($off);
6313   %}
6314 %}
6315 
6316 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6317 %{
6318   constraint(ALLOC_IN_RC(ptr_reg));
6319   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6320   op_cost(0);
6321   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6322   interface(MEMORY_INTER) %{
6323     base($reg);
6324     index($ireg);
6325     scale($scale);
6326     disp(0x0);
6327   %}
6328 %}
6329 
6330 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6331 %{
6332   constraint(ALLOC_IN_RC(ptr_reg));
6333   match(AddP reg (LShiftL lreg scale));
6334   op_cost(0);
6335   format %{ "$reg, $lreg lsl($scale)" %}
6336   interface(MEMORY_INTER) %{
6337     base($reg);
6338     index($lreg);
6339     scale($scale);
6340     disp(0x0);
6341   %}
6342 %}
6343 
6344 operand indIndex(iRegP reg, iRegL lreg)
6345 %{
6346   constraint(ALLOC_IN_RC(ptr_reg));
6347   match(AddP reg lreg);
6348   op_cost(0);
6349   format %{ "$reg, $lreg" %}
6350   interface(MEMORY_INTER) %{
6351     base($reg);
6352     index($lreg);
6353     scale(0x0);
6354     disp(0x0);
6355   %}
6356 %}
6357 
6358 operand indOffI(iRegP reg, immIOffset off)
6359 %{
6360   constraint(ALLOC_IN_RC(ptr_reg));
6361   match(AddP reg off);
6362   op_cost(0);
6363   format %{ "[$reg, $off]" %}
6364   interface(MEMORY_INTER) %{
6365     base($reg);
6366     index(0xffffffff);
6367     scale(0x0);
6368     disp($off);
6369   %}
6370 %}
6371 
6372 operand indOffL(iRegP reg, immLoffset off)
6373 %{
6374   constraint(ALLOC_IN_RC(ptr_reg));
6375   match(AddP reg off);
6376   op_cost(0);
6377   format %{ "[$reg, $off]" %}
6378   interface(MEMORY_INTER) %{
6379     base($reg);
6380     index(0xffffffff);
6381     scale(0x0);
6382     disp($off);
6383   %}
6384 %}
6385 
6386 
6387 operand indirectN(iRegN reg)
6388 %{
6389   predicate(Universe::narrow_oop_shift() == 0);
6390   constraint(ALLOC_IN_RC(ptr_reg));
6391   match(DecodeN reg);
6392   op_cost(0);
6393   format %{ "[$reg]\t# narrow" %}
6394   interface(MEMORY_INTER) %{
6395     base($reg);
6396     index(0xffffffff);
6397     scale(0x0);
6398     disp(0x0);
6399   %}
6400 %}
6401 
6402 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6403 %{
6404   predicate(Universe::narrow_oop_shift() == 0);
6405   constraint(ALLOC_IN_RC(ptr_reg));
6406   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6407   op_cost(0);
6408   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6409   interface(MEMORY_INTER) %{
6410     base($reg);
6411     index($lreg);
6412     scale($scale);
6413     disp($off);
6414   %}
6415 %}
6416 
6417 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6418 %{
6419   predicate(Universe::narrow_oop_shift() == 0);
6420   constraint(ALLOC_IN_RC(ptr_reg));
6421   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6422   op_cost(INSN_COST);
6423   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6424   interface(MEMORY_INTER) %{
6425     base($reg);
6426     index($lreg);
6427     scale($scale);
6428     disp($off);
6429   %}
6430 %}
6431 
6432 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6433 %{
6434   predicate(Universe::narrow_oop_shift() == 0);
6435   constraint(ALLOC_IN_RC(ptr_reg));
6436   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6437   op_cost(INSN_COST);
6438   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6439   interface(MEMORY_INTER) %{
6440     base($reg);
6441     index($ireg);
6442     scale(0x0);
6443     disp($off);
6444   %}
6445 %}
6446 
6447 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6448 %{
6449   predicate(Universe::narrow_oop_shift() == 0);
6450   constraint(ALLOC_IN_RC(ptr_reg));
6451   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6452   op_cost(INSN_COST);
6453   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6454   interface(MEMORY_INTER) %{
6455     base($reg);
6456     index($ireg);
6457     scale($scale);
6458     disp($off);
6459   %}
6460 %}
6461 
6462 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6463 %{
6464   predicate(Universe::narrow_oop_shift() == 0);
6465   constraint(ALLOC_IN_RC(ptr_reg));
6466   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6467   op_cost(0);
6468   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6469   interface(MEMORY_INTER) %{
6470     base($reg);
6471     index($ireg);
6472     scale($scale);
6473     disp(0x0);
6474   %}
6475 %}
6476 
6477 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6478 %{
6479   predicate(Universe::narrow_oop_shift() == 0);
6480   constraint(ALLOC_IN_RC(ptr_reg));
6481   match(AddP (DecodeN reg) (LShiftL lreg scale));
6482   op_cost(0);
6483   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6484   interface(MEMORY_INTER) %{
6485     base($reg);
6486     index($lreg);
6487     scale($scale);
6488     disp(0x0);
6489   %}
6490 %}
6491 
6492 operand indIndexN(iRegN reg, iRegL lreg)
6493 %{
6494   predicate(Universe::narrow_oop_shift() == 0);
6495   constraint(ALLOC_IN_RC(ptr_reg));
6496   match(AddP (DecodeN reg) lreg);
6497   op_cost(0);
6498   format %{ "$reg, $lreg\t# narrow" %}
6499   interface(MEMORY_INTER) %{
6500     base($reg);
6501     index($lreg);
6502     scale(0x0);
6503     disp(0x0);
6504   %}
6505 %}
6506 
6507 operand indOffIN(iRegN reg, immIOffset off)
6508 %{
6509   predicate(Universe::narrow_oop_shift() == 0);
6510   constraint(ALLOC_IN_RC(ptr_reg));
6511   match(AddP (DecodeN reg) off);
6512   op_cost(0);
6513   format %{ "[$reg, $off]\t# narrow" %}
6514   interface(MEMORY_INTER) %{
6515     base($reg);
6516     index(0xffffffff);
6517     scale(0x0);
6518     disp($off);
6519   %}
6520 %}
6521 
6522 operand indOffLN(iRegN reg, immLoffset off)
6523 %{
6524   predicate(Universe::narrow_oop_shift() == 0);
6525   constraint(ALLOC_IN_RC(ptr_reg));
6526   match(AddP (DecodeN reg) off);
6527   op_cost(0);
6528   format %{ "[$reg, $off]\t# narrow" %}
6529   interface(MEMORY_INTER) %{
6530     base($reg);
6531     index(0xffffffff);
6532     scale(0x0);
6533     disp($off);
6534   %}
6535 %}
6536 
6537 
6538 
6539 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6540 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6541 %{
6542   constraint(ALLOC_IN_RC(ptr_reg));
6543   match(AddP reg off);
6544   op_cost(0);
6545   format %{ "[$reg, $off]" %}
6546   interface(MEMORY_INTER) %{
6547     base($reg);
6548     index(0xffffffff);
6549     scale(0x0);
6550     disp($off);
6551   %}
6552 %}
6553 
6554 //----------Special Memory Operands--------------------------------------------
6555 // Stack Slot Operand - This operand is used for loading and storing temporary
6556 //                      values on the stack where a match requires a value to
6557 //                      flow through memory.
6558 operand stackSlotP(sRegP reg)
6559 %{
6560   constraint(ALLOC_IN_RC(stack_slots));
6561   op_cost(100);
6562   // No match rule because this operand is only generated in matching
6563   // match(RegP);
6564   format %{ "[$reg]" %}
6565   interface(MEMORY_INTER) %{
6566     base(0x1e);  // RSP
6567     index(0x0);  // No Index
6568     scale(0x0);  // No Scale
6569     disp($reg);  // Stack Offset
6570   %}
6571 %}
6572 
6573 operand stackSlotI(sRegI reg)
6574 %{
6575   constraint(ALLOC_IN_RC(stack_slots));
6576   // No match rule because this operand is only generated in matching
6577   // match(RegI);
6578   format %{ "[$reg]" %}
6579   interface(MEMORY_INTER) %{
6580     base(0x1e);  // RSP
6581     index(0x0);  // No Index
6582     scale(0x0);  // No Scale
6583     disp($reg);  // Stack Offset
6584   %}
6585 %}
6586 
6587 operand stackSlotF(sRegF reg)
6588 %{
6589   constraint(ALLOC_IN_RC(stack_slots));
6590   // No match rule because this operand is only generated in matching
6591   // match(RegF);
6592   format %{ "[$reg]" %}
6593   interface(MEMORY_INTER) %{
6594     base(0x1e);  // RSP
6595     index(0x0);  // No Index
6596     scale(0x0);  // No Scale
6597     disp($reg);  // Stack Offset
6598   %}
6599 %}
6600 
6601 operand stackSlotD(sRegD reg)
6602 %{
6603   constraint(ALLOC_IN_RC(stack_slots));
6604   // No match rule because this operand is only generated in matching
6605   // match(RegD);
6606   format %{ "[$reg]" %}
6607   interface(MEMORY_INTER) %{
6608     base(0x1e);  // RSP
6609     index(0x0);  // No Index
6610     scale(0x0);  // No Scale
6611     disp($reg);  // Stack Offset
6612   %}
6613 %}
6614 
6615 operand stackSlotL(sRegL reg)
6616 %{
6617   constraint(ALLOC_IN_RC(stack_slots));
6618   // No match rule because this operand is only generated in matching
6619   // match(RegL);
6620   format %{ "[$reg]" %}
6621   interface(MEMORY_INTER) %{
6622     base(0x1e);  // RSP
6623     index(0x0);  // No Index
6624     scale(0x0);  // No Scale
6625     disp($reg);  // Stack Offset
6626   %}
6627 %}
6628 
6629 // Operands for expressing Control Flow
6630 // NOTE: Label is a predefined operand which should not be redefined in
6631 //       the AD file. It is generically handled within the ADLC.
6632 
6633 //----------Conditional Branch Operands----------------------------------------
6634 // Comparison Op  - This is the operation of the comparison, and is limited to
6635 //                  the following set of codes:
6636 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6637 //
6638 // Other attributes of the comparison, such as unsignedness, are specified
6639 // by the comparison instruction that sets a condition code flags register.
6640 // That result is represented by a flags operand whose subtype is appropriate
6641 // to the unsignedness (etc.) of the comparison.
6642 //
6643 // Later, the instruction which matches both the Comparison Op (a Bool) and
6644 // the flags (produced by the Cmp) specifies the coding of the comparison op
6645 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6646 
6647 // used for signed integral comparisons and fp comparisons
6648 
6649 operand cmpOp()
6650 %{
6651   match(Bool);
6652 
6653   format %{ "" %}
6654   interface(COND_INTER) %{
6655     equal(0x0, "eq");
6656     not_equal(0x1, "ne");
6657     less(0xb, "lt");
6658     greater_equal(0xa, "ge");
6659     less_equal(0xd, "le");
6660     greater(0xc, "gt");
6661     overflow(0x6, "vs");
6662     no_overflow(0x7, "vc");
6663   %}
6664 %}
6665 
6666 // used for unsigned integral comparisons
6667 
6668 operand cmpOpU()
6669 %{
6670   match(Bool);
6671 
6672   format %{ "" %}
6673   interface(COND_INTER) %{
6674     equal(0x0, "eq");
6675     not_equal(0x1, "ne");
6676     less(0x3, "lo");
6677     greater_equal(0x2, "hs");
6678     less_equal(0x9, "ls");
6679     greater(0x8, "hi");
6680     overflow(0x6, "vs");
6681     no_overflow(0x7, "vc");
6682   %}
6683 %}
6684 
6685 // Special operand allowing long args to int ops to be truncated for free
6686 
6687 operand iRegL2I(iRegL reg) %{
6688 
6689   op_cost(0);
6690 
6691   match(ConvL2I reg);
6692 
6693   format %{ "l2i($reg)" %}
6694 
6695   interface(REG_INTER)
6696 %}
6697 
6698 opclass vmem(indirect, indIndex, indOffI, indOffL);
6699 
6700 //----------OPERAND CLASSES----------------------------------------------------
6701 // Operand Classes are groups of operands that are used as to simplify
6702 // instruction definitions by not requiring the AD writer to specify
6703 // separate instructions for every form of operand when the
6704 // instruction accepts multiple operand types with the same basic
6705 // encoding and format. The classic case of this is memory operands.
6706 
6707 // memory is used to define read/write location for load/store
6708 // instruction defs. we can turn a memory op into an Address
6709 
6710 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6711                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6712 
6713 
6714 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6715 // operations. it allows the src to be either an iRegI or a (ConvL2I
6716 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6717 // can be elided because the 32-bit instruction will just employ the
6718 // lower 32 bits anyway.
6719 //
6720 // n.b. this does not elide all L2I conversions. if the truncated
6721 // value is consumed by more than one operation then the ConvL2I
6722 // cannot be bundled into the consuming nodes so an l2i gets planted
6723 // (actually a movw $dst $src) and the downstream instructions consume
6724 // the result of the l2i as an iRegI input. That's a shame since the
6725 // movw is actually redundant but its not too costly.
6726 
6727 opclass iRegIorL2I(iRegI, iRegL2I);
6728 
6729 //----------PIPELINE-----------------------------------------------------------
6730 // Rules which define the behavior of the target architectures pipeline.
6731 // Integer ALU reg operation
6732 pipeline %{
6733 
6734 attributes %{
6735   // ARM instructions are of fixed length
6736   fixed_size_instructions;        // Fixed size instructions TODO does
6737   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6738   // ARM instructions come in 32-bit word units
6739   instruction_unit_size = 4;         // An instruction is 4 bytes long
6740   instruction_fetch_unit_size = 64;  // The processor fetches one line
6741   instruction_fetch_units = 1;       // of 64 bytes
6742 
6743   // List of nop instructions
6744   nops( MachNop );
6745 %}
6746 
6747 // We don't use an actual pipeline model so don't care about resources
6748 // or description. we do use pipeline classes to introduce fixed
6749 // latencies
6750 
6751 //----------RESOURCES----------------------------------------------------------
6752 // Resources are the functional units available to the machine
6753 
6754 resources( INS0, INS1, INS01 = INS0 | INS1,
6755            ALU0, ALU1, ALU = ALU0 | ALU1,
6756            MAC,
6757            DIV,
6758            BRANCH,
6759            LDST,
6760            NEON_FP);
6761 
6762 //----------PIPELINE DESCRIPTION-----------------------------------------------
6763 // Pipeline Description specifies the stages in the machine's pipeline
6764 
6765 pipe_desc(ISS, EX1, EX2, WR);
6766 
6767 //----------PIPELINE CLASSES---------------------------------------------------
6768 // Pipeline Classes describe the stages in which input and output are
6769 // referenced by the hardware pipeline.
6770 
6771 //------- Integer ALU operations --------------------------
6772 
6773 // Integer ALU reg-reg operation
6774 // Operands needed in EX1, result generated in EX2
6775 // Eg.  ADD     x0, x1, x2
6776 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6777 %{
6778   single_instruction;
6779   dst    : EX2(write);
6780   src1   : EX1(read);
6781   src2   : EX1(read);
6782   INS01  : ISS; // Dual issue as instruction 0 or 1
6783   ALU    : EX2;
6784 %}
6785 
6786 // Integer ALU reg-reg operation with constant shift
6787 // Shifted register must be available in LATE_ISS instead of EX1
6788 // Eg.  ADD     x0, x1, x2, LSL #2
6789 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6790 %{
6791   single_instruction;
6792   dst    : EX2(write);
6793   src1   : EX1(read);
6794   src2   : ISS(read);
6795   INS01  : ISS;
6796   ALU    : EX2;
6797 %}
6798 
6799 // Integer ALU reg operation with constant shift
6800 // Eg.  LSL     x0, x1, #shift
6801 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6802 %{
6803   single_instruction;
6804   dst    : EX2(write);
6805   src1   : ISS(read);
6806   INS01  : ISS;
6807   ALU    : EX2;
6808 %}
6809 
6810 // Integer ALU reg-reg operation with variable shift
6811 // Both operands must be available in LATE_ISS instead of EX1
6812 // Result is available in EX1 instead of EX2
6813 // Eg.  LSLV    x0, x1, x2
6814 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6815 %{
6816   single_instruction;
6817   dst    : EX1(write);
6818   src1   : ISS(read);
6819   src2   : ISS(read);
6820   INS01  : ISS;
6821   ALU    : EX1;
6822 %}
6823 
6824 // Integer ALU reg-reg operation with extract
6825 // As for _vshift above, but result generated in EX2
6826 // Eg.  EXTR    x0, x1, x2, #N
6827 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6828 %{
6829   single_instruction;
6830   dst    : EX2(write);
6831   src1   : ISS(read);
6832   src2   : ISS(read);
6833   INS1   : ISS; // Can only dual issue as Instruction 1
6834   ALU    : EX1;
6835 %}
6836 
6837 // Integer ALU reg operation
6838 // Eg.  NEG     x0, x1
6839 pipe_class ialu_reg(iRegI dst, iRegI src)
6840 %{
6841   single_instruction;
6842   dst    : EX2(write);
6843   src    : EX1(read);
6844   INS01  : ISS;
6845   ALU    : EX2;
6846 %}
6847 
6848 // Integer ALU reg mmediate operation
6849 // Eg.  ADD     x0, x1, #N
6850 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6851 %{
6852   single_instruction;
6853   dst    : EX2(write);
6854   src1   : EX1(read);
6855   INS01  : ISS;
6856   ALU    : EX2;
6857 %}
6858 
6859 // Integer ALU immediate operation (no source operands)
6860 // Eg.  MOV     x0, #N
6861 pipe_class ialu_imm(iRegI dst)
6862 %{
6863   single_instruction;
6864   dst    : EX1(write);
6865   INS01  : ISS;
6866   ALU    : EX1;
6867 %}
6868 
6869 //------- Compare operation -------------------------------
6870 
6871 // Compare reg-reg
6872 // Eg.  CMP     x0, x1
6873 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6874 %{
6875   single_instruction;
6876 //  fixed_latency(16);
6877   cr     : EX2(write);
6878   op1    : EX1(read);
6879   op2    : EX1(read);
6880   INS01  : ISS;
6881   ALU    : EX2;
6882 %}
6883 
6884 // Compare reg-reg
6885 // Eg.  CMP     x0, #N
6886 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6887 %{
6888   single_instruction;
6889 //  fixed_latency(16);
6890   cr     : EX2(write);
6891   op1    : EX1(read);
6892   INS01  : ISS;
6893   ALU    : EX2;
6894 %}
6895 
6896 //------- Conditional instructions ------------------------
6897 
6898 // Conditional no operands
6899 // Eg.  CSINC   x0, zr, zr, <cond>
6900 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6901 %{
6902   single_instruction;
6903   cr     : EX1(read);
6904   dst    : EX2(write);
6905   INS01  : ISS;
6906   ALU    : EX2;
6907 %}
6908 
6909 // Conditional 2 operand
6910 // EG.  CSEL    X0, X1, X2, <cond>
6911 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6912 %{
6913   single_instruction;
6914   cr     : EX1(read);
6915   src1   : EX1(read);
6916   src2   : EX1(read);
6917   dst    : EX2(write);
6918   INS01  : ISS;
6919   ALU    : EX2;
6920 %}
6921 
6922 // Conditional 2 operand
6923 // EG.  CSEL    X0, X1, X2, <cond>
6924 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6925 %{
6926   single_instruction;
6927   cr     : EX1(read);
6928   src    : EX1(read);
6929   dst    : EX2(write);
6930   INS01  : ISS;
6931   ALU    : EX2;
6932 %}
6933 
6934 //------- Multiply pipeline operations --------------------
6935 
6936 // Multiply reg-reg
6937 // Eg.  MUL     w0, w1, w2
6938 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6939 %{
6940   single_instruction;
6941   dst    : WR(write);
6942   src1   : ISS(read);
6943   src2   : ISS(read);
6944   INS01  : ISS;
6945   MAC    : WR;
6946 %}
6947 
6948 // Multiply accumulate
6949 // Eg.  MADD    w0, w1, w2, w3
6950 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6951 %{
6952   single_instruction;
6953   dst    : WR(write);
6954   src1   : ISS(read);
6955   src2   : ISS(read);
6956   src3   : ISS(read);
6957   INS01  : ISS;
6958   MAC    : WR;
6959 %}
6960 
6961 // Eg.  MUL     w0, w1, w2
6962 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6963 %{
6964   single_instruction;
6965   fixed_latency(3); // Maximum latency for 64 bit mul
6966   dst    : WR(write);
6967   src1   : ISS(read);
6968   src2   : ISS(read);
6969   INS01  : ISS;
6970   MAC    : WR;
6971 %}
6972 
6973 // Multiply accumulate
6974 // Eg.  MADD    w0, w1, w2, w3
6975 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6976 %{
6977   single_instruction;
6978   fixed_latency(3); // Maximum latency for 64 bit mul
6979   dst    : WR(write);
6980   src1   : ISS(read);
6981   src2   : ISS(read);
6982   src3   : ISS(read);
6983   INS01  : ISS;
6984   MAC    : WR;
6985 %}
6986 
6987 //------- Divide pipeline operations --------------------
6988 
6989 // Eg.  SDIV    w0, w1, w2
6990 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6991 %{
6992   single_instruction;
6993   fixed_latency(8); // Maximum latency for 32 bit divide
6994   dst    : WR(write);
6995   src1   : ISS(read);
6996   src2   : ISS(read);
6997   INS0   : ISS; // Can only dual issue as instruction 0
6998   DIV    : WR;
6999 %}
7000 
7001 // Eg.  SDIV    x0, x1, x2
7002 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7003 %{
7004   single_instruction;
7005   fixed_latency(16); // Maximum latency for 64 bit divide
7006   dst    : WR(write);
7007   src1   : ISS(read);
7008   src2   : ISS(read);
7009   INS0   : ISS; // Can only dual issue as instruction 0
7010   DIV    : WR;
7011 %}
7012 
7013 //------- Load pipeline operations ------------------------
7014 
7015 // Load - prefetch
7016 // Eg.  PFRM    <mem>
7017 pipe_class iload_prefetch(memory mem)
7018 %{
7019   single_instruction;
7020   mem    : ISS(read);
7021   INS01  : ISS;
7022   LDST   : WR;
7023 %}
7024 
7025 // Load - reg, mem
7026 // Eg.  LDR     x0, <mem>
7027 pipe_class iload_reg_mem(iRegI dst, memory mem)
7028 %{
7029   single_instruction;
7030   dst    : WR(write);
7031   mem    : ISS(read);
7032   INS01  : ISS;
7033   LDST   : WR;
7034 %}
7035 
7036 // Load - reg, reg
7037 // Eg.  LDR     x0, [sp, x1]
7038 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7039 %{
7040   single_instruction;
7041   dst    : WR(write);
7042   src    : ISS(read);
7043   INS01  : ISS;
7044   LDST   : WR;
7045 %}
7046 
7047 //------- Store pipeline operations -----------------------
7048 
7049 // Store - zr, mem
7050 // Eg.  STR     zr, <mem>
7051 pipe_class istore_mem(memory mem)
7052 %{
7053   single_instruction;
7054   mem    : ISS(read);
7055   INS01  : ISS;
7056   LDST   : WR;
7057 %}
7058 
7059 // Store - reg, mem
7060 // Eg.  STR     x0, <mem>
7061 pipe_class istore_reg_mem(iRegI src, memory mem)
7062 %{
7063   single_instruction;
7064   mem    : ISS(read);
7065   src    : EX2(read);
7066   INS01  : ISS;
7067   LDST   : WR;
7068 %}
7069 
7070 // Store - reg, reg
7071 // Eg. STR      x0, [sp, x1]
7072 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7073 %{
7074   single_instruction;
7075   dst    : ISS(read);
7076   src    : EX2(read);
7077   INS01  : ISS;
7078   LDST   : WR;
7079 %}
7080 
7081 //------- Store pipeline operations -----------------------
7082 
7083 // Branch
7084 pipe_class pipe_branch()
7085 %{
7086   single_instruction;
7087   INS01  : ISS;
7088   BRANCH : EX1;
7089 %}
7090 
7091 // Conditional branch
7092 pipe_class pipe_branch_cond(rFlagsReg cr)
7093 %{
7094   single_instruction;
7095   cr     : EX1(read);
7096   INS01  : ISS;
7097   BRANCH : EX1;
7098 %}
7099 
7100 // Compare & Branch
7101 // EG.  CBZ/CBNZ
7102 pipe_class pipe_cmp_branch(iRegI op1)
7103 %{
7104   single_instruction;
7105   op1    : EX1(read);
7106   INS01  : ISS;
7107   BRANCH : EX1;
7108 %}
7109 
7110 //------- Synchronisation operations ----------------------
7111 
7112 // Any operation requiring serialization.
7113 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7114 pipe_class pipe_serial()
7115 %{
7116   single_instruction;
7117   force_serialization;
7118   fixed_latency(16);
7119   INS01  : ISS(2); // Cannot dual issue with any other instruction
7120   LDST   : WR;
7121 %}
7122 
7123 // Generic big/slow expanded idiom - also serialized
7124 pipe_class pipe_slow()
7125 %{
7126   instruction_count(10);
7127   multiple_bundles;
7128   force_serialization;
7129   fixed_latency(16);
7130   INS01  : ISS(2); // Cannot dual issue with any other instruction
7131   LDST   : WR;
7132 %}
7133 
7134 // Empty pipeline class
7135 pipe_class pipe_class_empty()
7136 %{
7137   single_instruction;
7138   fixed_latency(0);
7139 %}
7140 
7141 // Default pipeline class.
7142 pipe_class pipe_class_default()
7143 %{
7144   single_instruction;
7145   fixed_latency(2);
7146 %}
7147 
7148 // Pipeline class for compares.
7149 pipe_class pipe_class_compare()
7150 %{
7151   single_instruction;
7152   fixed_latency(16);
7153 %}
7154 
7155 // Pipeline class for memory operations.
7156 pipe_class pipe_class_memory()
7157 %{
7158   single_instruction;
7159   fixed_latency(16);
7160 %}
7161 
7162 // Pipeline class for call.
7163 pipe_class pipe_class_call()
7164 %{
7165   single_instruction;
7166   fixed_latency(100);
7167 %}
7168 
7169 // Define the class for the Nop node.
7170 define %{
7171    MachNop = pipe_class_empty;
7172 %}
7173 
7174 %}
7175 //----------INSTRUCTIONS-------------------------------------------------------
7176 //
7177 // match      -- States which machine-independent subtree may be replaced
7178 //               by this instruction.
7179 // ins_cost   -- The estimated cost of this instruction is used by instruction
7180 //               selection to identify a minimum cost tree of machine
7181 //               instructions that matches a tree of machine-independent
7182 //               instructions.
7183 // format     -- A string providing the disassembly for this instruction.
7184 //               The value of an instruction's operand may be inserted
7185 //               by referring to it with a '$' prefix.
7186 // opcode     -- Three instruction opcodes may be provided.  These are referred
7187 //               to within an encode class as $primary, $secondary, and $tertiary
7188 //               rrspectively.  The primary opcode is commonly used to
7189 //               indicate the type of machine instruction, while secondary
7190 //               and tertiary are often used for prefix options or addressing
7191 //               modes.
7192 // ins_encode -- A list of encode classes with parameters. The encode class
7193 //               name must have been defined in an 'enc_class' specification
7194 //               in the encode section of the architecture description.
7195 
7196 // ============================================================================
7197 // Memory (Load/Store) Instructions
7198 
7199 // Load Instructions
7200 
7201 // Load Byte (8 bit signed)
7202 instruct loadB(iRegINoSp dst, memory mem)
7203 %{
7204   match(Set dst (LoadB mem));
7205   predicate(!needs_acquiring_load(n));
7206 
7207   ins_cost(4 * INSN_COST);
7208   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7209 
7210   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7211 
7212   ins_pipe(iload_reg_mem);
7213 %}
7214 
7215 // Load Byte (8 bit signed) into long
7216 instruct loadB2L(iRegLNoSp dst, memory mem)
7217 %{
7218   match(Set dst (ConvI2L (LoadB mem)));
7219   predicate(!needs_acquiring_load(n->in(1)));
7220 
7221   ins_cost(4 * INSN_COST);
7222   format %{ "ldrsb  $dst, $mem\t# byte" %}
7223 
7224   ins_encode(aarch64_enc_ldrsb(dst, mem));
7225 
7226   ins_pipe(iload_reg_mem);
7227 %}
7228 
7229 // Load Byte (8 bit unsigned)
7230 instruct loadUB(iRegINoSp dst, memory mem)
7231 %{
7232   match(Set dst (LoadUB mem));
7233   predicate(!needs_acquiring_load(n));
7234 
7235   ins_cost(4 * INSN_COST);
7236   format %{ "ldrbw  $dst, $mem\t# byte" %}
7237 
7238   ins_encode(aarch64_enc_ldrb(dst, mem));
7239 
7240   ins_pipe(iload_reg_mem);
7241 %}
7242 
7243 // Load Byte (8 bit unsigned) into long
7244 instruct loadUB2L(iRegLNoSp dst, memory mem)
7245 %{
7246   match(Set dst (ConvI2L (LoadUB mem)));
7247   predicate(!needs_acquiring_load(n->in(1)));
7248 
7249   ins_cost(4 * INSN_COST);
7250   format %{ "ldrb  $dst, $mem\t# byte" %}
7251 
7252   ins_encode(aarch64_enc_ldrb(dst, mem));
7253 
7254   ins_pipe(iload_reg_mem);
7255 %}
7256 
7257 // Load Short (16 bit signed)
7258 instruct loadS(iRegINoSp dst, memory mem)
7259 %{
7260   match(Set dst (LoadS mem));
7261   predicate(!needs_acquiring_load(n));
7262 
7263   ins_cost(4 * INSN_COST);
7264   format %{ "ldrshw  $dst, $mem\t# short" %}
7265 
7266   ins_encode(aarch64_enc_ldrshw(dst, mem));
7267 
7268   ins_pipe(iload_reg_mem);
7269 %}
7270 
7271 // Load Short (16 bit signed) into long
7272 instruct loadS2L(iRegLNoSp dst, memory mem)
7273 %{
7274   match(Set dst (ConvI2L (LoadS mem)));
7275   predicate(!needs_acquiring_load(n->in(1)));
7276 
7277   ins_cost(4 * INSN_COST);
7278   format %{ "ldrsh  $dst, $mem\t# short" %}
7279 
7280   ins_encode(aarch64_enc_ldrsh(dst, mem));
7281 
7282   ins_pipe(iload_reg_mem);
7283 %}
7284 
7285 // Load Char (16 bit unsigned)
7286 instruct loadUS(iRegINoSp dst, memory mem)
7287 %{
7288   match(Set dst (LoadUS mem));
7289   predicate(!needs_acquiring_load(n));
7290 
7291   ins_cost(4 * INSN_COST);
7292   format %{ "ldrh  $dst, $mem\t# short" %}
7293 
7294   ins_encode(aarch64_enc_ldrh(dst, mem));
7295 
7296   ins_pipe(iload_reg_mem);
7297 %}
7298 
7299 // Load Short/Char (16 bit unsigned) into long
7300 instruct loadUS2L(iRegLNoSp dst, memory mem)
7301 %{
7302   match(Set dst (ConvI2L (LoadUS mem)));
7303   predicate(!needs_acquiring_load(n->in(1)));
7304 
7305   ins_cost(4 * INSN_COST);
7306   format %{ "ldrh  $dst, $mem\t# short" %}
7307 
7308   ins_encode(aarch64_enc_ldrh(dst, mem));
7309 
7310   ins_pipe(iload_reg_mem);
7311 %}
7312 
7313 // Load Integer (32 bit signed)
7314 instruct loadI(iRegINoSp dst, memory mem)
7315 %{
7316   match(Set dst (LoadI mem));
7317   predicate(!needs_acquiring_load(n));
7318 
7319   ins_cost(4 * INSN_COST);
7320   format %{ "ldrw  $dst, $mem\t# int" %}
7321 
7322   ins_encode(aarch64_enc_ldrw(dst, mem));
7323 
7324   ins_pipe(iload_reg_mem);
7325 %}
7326 
7327 // Load Integer (32 bit signed) into long
7328 instruct loadI2L(iRegLNoSp dst, memory mem)
7329 %{
7330   match(Set dst (ConvI2L (LoadI mem)));
7331   predicate(!needs_acquiring_load(n->in(1)));
7332 
7333   ins_cost(4 * INSN_COST);
7334   format %{ "ldrsw  $dst, $mem\t# int" %}
7335 
7336   ins_encode(aarch64_enc_ldrsw(dst, mem));
7337 
7338   ins_pipe(iload_reg_mem);
7339 %}
7340 
7341 // Load Integer (32 bit unsigned) into long
7342 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7343 %{
7344   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7345   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7346 
7347   ins_cost(4 * INSN_COST);
7348   format %{ "ldrw  $dst, $mem\t# int" %}
7349 
7350   ins_encode(aarch64_enc_ldrw(dst, mem));
7351 
7352   ins_pipe(iload_reg_mem);
7353 %}
7354 
7355 // Load Long (64 bit signed)
7356 instruct loadL(iRegLNoSp dst, memory mem)
7357 %{
7358   match(Set dst (LoadL mem));
7359   predicate(!needs_acquiring_load(n));
7360 
7361   ins_cost(4 * INSN_COST);
7362   format %{ "ldr  $dst, $mem\t# int" %}
7363 
7364   ins_encode(aarch64_enc_ldr(dst, mem));
7365 
7366   ins_pipe(iload_reg_mem);
7367 %}
7368 
7369 // Load Range
7370 instruct loadRange(iRegINoSp dst, memory mem)
7371 %{
7372   match(Set dst (LoadRange mem));
7373 
7374   ins_cost(4 * INSN_COST);
7375   format %{ "ldrw  $dst, $mem\t# range" %}
7376 
7377   ins_encode(aarch64_enc_ldrw(dst, mem));
7378 
7379   ins_pipe(iload_reg_mem);
7380 %}
7381 
7382 // Load Pointer
7383 instruct loadP(iRegPNoSp dst, memory mem)
7384 %{
7385   match(Set dst (LoadP mem));
7386   predicate(!needs_acquiring_load(n));
7387 
7388   ins_cost(4 * INSN_COST);
7389   format %{ "ldr  $dst, $mem\t# ptr" %}
7390 
7391   ins_encode(aarch64_enc_ldr(dst, mem));
7392 
7393   ins_pipe(iload_reg_mem);
7394 %}
7395 
7396 // Load Compressed Pointer
7397 instruct loadN(iRegNNoSp dst, memory mem)
7398 %{
7399   match(Set dst (LoadN mem));
7400   predicate(!needs_acquiring_load(n));
7401 
7402   ins_cost(4 * INSN_COST);
7403   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7404 
7405   ins_encode(aarch64_enc_ldrw(dst, mem));
7406 
7407   ins_pipe(iload_reg_mem);
7408 %}
7409 
7410 // Load Klass Pointer
7411 instruct loadKlass(iRegPNoSp dst, memory mem)
7412 %{
7413   match(Set dst (LoadKlass mem));
7414   predicate(!needs_acquiring_load(n));
7415 
7416   ins_cost(4 * INSN_COST);
7417   format %{ "ldr  $dst, $mem\t# class" %}
7418 
7419   ins_encode(aarch64_enc_ldr(dst, mem));
7420 
7421   ins_pipe(iload_reg_mem);
7422 %}
7423 
7424 // Load Narrow Klass Pointer
7425 instruct loadNKlass(iRegNNoSp dst, memory mem)
7426 %{
7427   match(Set dst (LoadNKlass mem));
7428   predicate(!needs_acquiring_load(n));
7429 
7430   ins_cost(4 * INSN_COST);
7431   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7432 
7433   ins_encode(aarch64_enc_ldrw(dst, mem));
7434 
7435   ins_pipe(iload_reg_mem);
7436 %}
7437 
7438 // Load Float
7439 instruct loadF(vRegF dst, memory mem)
7440 %{
7441   match(Set dst (LoadF mem));
7442   predicate(!needs_acquiring_load(n));
7443 
7444   ins_cost(4 * INSN_COST);
7445   format %{ "ldrs  $dst, $mem\t# float" %}
7446 
7447   ins_encode( aarch64_enc_ldrs(dst, mem) );
7448 
7449   ins_pipe(pipe_class_memory);
7450 %}
7451 
7452 // Load Double
7453 instruct loadD(vRegD dst, memory mem)
7454 %{
7455   match(Set dst (LoadD mem));
7456   predicate(!needs_acquiring_load(n));
7457 
7458   ins_cost(4 * INSN_COST);
7459   format %{ "ldrd  $dst, $mem\t# double" %}
7460 
7461   ins_encode( aarch64_enc_ldrd(dst, mem) );
7462 
7463   ins_pipe(pipe_class_memory);
7464 %}
7465 
7466 
7467 // Load Int Constant
7468 instruct loadConI(iRegINoSp dst, immI src)
7469 %{
7470   match(Set dst src);
7471 
7472   ins_cost(INSN_COST);
7473   format %{ "mov $dst, $src\t# int" %}
7474 
7475   ins_encode( aarch64_enc_movw_imm(dst, src) );
7476 
7477   ins_pipe(ialu_imm);
7478 %}
7479 
7480 // Load Long Constant
7481 instruct loadConL(iRegLNoSp dst, immL src)
7482 %{
7483   match(Set dst src);
7484 
7485   ins_cost(INSN_COST);
7486   format %{ "mov $dst, $src\t# long" %}
7487 
7488   ins_encode( aarch64_enc_mov_imm(dst, src) );
7489 
7490   ins_pipe(ialu_imm);
7491 %}
7492 
7493 // Load Pointer Constant
7494 
7495 instruct loadConP(iRegPNoSp dst, immP con)
7496 %{
7497   match(Set dst con);
7498 
7499   ins_cost(INSN_COST * 4);
7500   format %{
7501     "mov  $dst, $con\t# ptr\n\t"
7502   %}
7503 
7504   ins_encode(aarch64_enc_mov_p(dst, con));
7505 
7506   ins_pipe(ialu_imm);
7507 %}
7508 
7509 // Load Null Pointer Constant
7510 
7511 instruct loadConP0(iRegPNoSp dst, immP0 con)
7512 %{
7513   match(Set dst con);
7514 
7515   ins_cost(INSN_COST);
7516   format %{ "mov  $dst, $con\t# NULL ptr" %}
7517 
7518   ins_encode(aarch64_enc_mov_p0(dst, con));
7519 
7520   ins_pipe(ialu_imm);
7521 %}
7522 
7523 // Load Pointer Constant One
7524 
7525 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7526 %{
7527   match(Set dst con);
7528 
7529   ins_cost(INSN_COST);
7530   format %{ "mov  $dst, $con\t# NULL ptr" %}
7531 
7532   ins_encode(aarch64_enc_mov_p1(dst, con));
7533 
7534   ins_pipe(ialu_imm);
7535 %}
7536 
7537 // Load Poll Page Constant
7538 
7539 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7540 %{
7541   match(Set dst con);
7542 
7543   ins_cost(INSN_COST);
7544   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7545 
7546   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7547 
7548   ins_pipe(ialu_imm);
7549 %}
7550 
7551 // Load Byte Map Base Constant
7552 
7553 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7554 %{
7555   match(Set dst con);
7556 
7557   ins_cost(INSN_COST);
7558   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7559 
7560   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7561 
7562   ins_pipe(ialu_imm);
7563 %}
7564 
7565 // Load Narrow Pointer Constant
7566 
7567 instruct loadConN(iRegNNoSp dst, immN con)
7568 %{
7569   match(Set dst con);
7570 
7571   ins_cost(INSN_COST * 4);
7572   format %{ "mov  $dst, $con\t# compressed ptr" %}
7573 
7574   ins_encode(aarch64_enc_mov_n(dst, con));
7575 
7576   ins_pipe(ialu_imm);
7577 %}
7578 
7579 // Load Narrow Null Pointer Constant
7580 
7581 instruct loadConN0(iRegNNoSp dst, immN0 con)
7582 %{
7583   match(Set dst con);
7584 
7585   ins_cost(INSN_COST);
7586   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7587 
7588   ins_encode(aarch64_enc_mov_n0(dst, con));
7589 
7590   ins_pipe(ialu_imm);
7591 %}
7592 
7593 // Load Narrow Klass Constant
7594 
7595 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7596 %{
7597   match(Set dst con);
7598 
7599   ins_cost(INSN_COST);
7600   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7601 
7602   ins_encode(aarch64_enc_mov_nk(dst, con));
7603 
7604   ins_pipe(ialu_imm);
7605 %}
7606 
7607 // Load Packed Float Constant
7608 
7609 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7610   match(Set dst con);
7611   ins_cost(INSN_COST * 4);
7612   format %{ "fmovs  $dst, $con"%}
7613   ins_encode %{
7614     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7615   %}
7616 
7617   ins_pipe(pipe_class_default);
7618 %}
7619 
7620 // Load Float Constant
7621 
7622 instruct loadConF(vRegF dst, immF con) %{
7623   match(Set dst con);
7624 
7625   ins_cost(INSN_COST * 4);
7626 
7627   format %{
7628     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7629   %}
7630 
7631   ins_encode %{
7632     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7633   %}
7634 
7635   ins_pipe(pipe_class_default);
7636 %}
7637 
7638 // Load Packed Double Constant
7639 
7640 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7641   match(Set dst con);
7642   ins_cost(INSN_COST);
7643   format %{ "fmovd  $dst, $con"%}
7644   ins_encode %{
7645     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7646   %}
7647 
7648   ins_pipe(pipe_class_default);
7649 %}
7650 
7651 // Load Double Constant
7652 
7653 instruct loadConD(vRegD dst, immD con) %{
7654   match(Set dst con);
7655 
7656   ins_cost(INSN_COST * 5);
7657   format %{
7658     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7659   %}
7660 
7661   ins_encode %{
7662     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7663   %}
7664 
7665   ins_pipe(pipe_class_default);
7666 %}
7667 
7668 // Store Instructions
7669 
7670 // Store CMS card-mark Immediate
7671 instruct storeimmCM0(immI0 zero, memory mem)
7672 %{
7673   match(Set mem (StoreCM mem zero));
7674   predicate(unnecessary_storestore(n));
7675 
7676   ins_cost(INSN_COST);
7677   format %{ "strb zr, $mem\t# byte" %}
7678 
7679   ins_encode(aarch64_enc_strb0(mem));
7680 
7681   ins_pipe(istore_mem);
7682 %}
7683 
7684 // Store CMS card-mark Immediate with intervening StoreStore
7685 // needed when using CMS with no conditional card marking
7686 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7687 %{
7688   match(Set mem (StoreCM mem zero));
7689 
7690   ins_cost(INSN_COST * 2);
7691   format %{ "dmb ishst"
7692       "\n\tstrb zr, $mem\t# byte" %}
7693 
7694   ins_encode(aarch64_enc_strb0_ordered(mem));
7695 
7696   ins_pipe(istore_mem);
7697 %}
7698 
7699 // Store Byte
7700 instruct storeB(iRegIorL2I src, memory mem)
7701 %{
7702   match(Set mem (StoreB mem src));
7703   predicate(!needs_releasing_store(n));
7704 
7705   ins_cost(INSN_COST);
7706   format %{ "strb  $src, $mem\t# byte" %}
7707 
7708   ins_encode(aarch64_enc_strb(src, mem));
7709 
7710   ins_pipe(istore_reg_mem);
7711 %}
7712 
7713 
7714 instruct storeimmB0(immI0 zero, memory mem)
7715 %{
7716   match(Set mem (StoreB mem zero));
7717   predicate(!needs_releasing_store(n));
7718 
7719   ins_cost(INSN_COST);
7720   format %{ "strb rscractch2, $mem\t# byte" %}
7721 
7722   ins_encode(aarch64_enc_strb0(mem));
7723 
7724   ins_pipe(istore_mem);
7725 %}
7726 
7727 // Store Char/Short
7728 instruct storeC(iRegIorL2I src, memory mem)
7729 %{
7730   match(Set mem (StoreC mem src));
7731   predicate(!needs_releasing_store(n));
7732 
7733   ins_cost(INSN_COST);
7734   format %{ "strh  $src, $mem\t# short" %}
7735 
7736   ins_encode(aarch64_enc_strh(src, mem));
7737 
7738   ins_pipe(istore_reg_mem);
7739 %}
7740 
7741 instruct storeimmC0(immI0 zero, memory mem)
7742 %{
7743   match(Set mem (StoreC mem zero));
7744   predicate(!needs_releasing_store(n));
7745 
7746   ins_cost(INSN_COST);
7747   format %{ "strh  zr, $mem\t# short" %}
7748 
7749   ins_encode(aarch64_enc_strh0(mem));
7750 
7751   ins_pipe(istore_mem);
7752 %}
7753 
7754 // Store Integer
7755 
7756 instruct storeI(iRegIorL2I src, memory mem)
7757 %{
7758   match(Set mem(StoreI mem src));
7759   predicate(!needs_releasing_store(n));
7760 
7761   ins_cost(INSN_COST);
7762   format %{ "strw  $src, $mem\t# int" %}
7763 
7764   ins_encode(aarch64_enc_strw(src, mem));
7765 
7766   ins_pipe(istore_reg_mem);
7767 %}
7768 
7769 instruct storeimmI0(immI0 zero, memory mem)
7770 %{
7771   match(Set mem(StoreI mem zero));
7772   predicate(!needs_releasing_store(n));
7773 
7774   ins_cost(INSN_COST);
7775   format %{ "strw  zr, $mem\t# int" %}
7776 
7777   ins_encode(aarch64_enc_strw0(mem));
7778 
7779   ins_pipe(istore_mem);
7780 %}
7781 
7782 // Store Long (64 bit signed)
7783 instruct storeL(iRegL src, memory mem)
7784 %{
7785   match(Set mem (StoreL mem src));
7786   predicate(!needs_releasing_store(n));
7787 
7788   ins_cost(INSN_COST);
7789   format %{ "str  $src, $mem\t# int" %}
7790 
7791   ins_encode(aarch64_enc_str(src, mem));
7792 
7793   ins_pipe(istore_reg_mem);
7794 %}
7795 
7796 // Store Long (64 bit signed)
7797 instruct storeimmL0(immL0 zero, memory mem)
7798 %{
7799   match(Set mem (StoreL mem zero));
7800   predicate(!needs_releasing_store(n));
7801 
7802   ins_cost(INSN_COST);
7803   format %{ "str  zr, $mem\t# int" %}
7804 
7805   ins_encode(aarch64_enc_str0(mem));
7806 
7807   ins_pipe(istore_mem);
7808 %}
7809 
7810 // Store Pointer
7811 instruct storeP(iRegP src, memory mem)
7812 %{
7813   match(Set mem (StoreP mem src));
7814   predicate(!needs_releasing_store(n));
7815 
7816   ins_cost(INSN_COST);
7817   format %{ "str  $src, $mem\t# ptr" %}
7818 
7819   ins_encode(aarch64_enc_str(src, mem));
7820 
7821   ins_pipe(istore_reg_mem);
7822 %}
7823 
7824 // Store Pointer
7825 instruct storeimmP0(immP0 zero, memory mem)
7826 %{
7827   match(Set mem (StoreP mem zero));
7828   predicate(!needs_releasing_store(n));
7829 
7830   ins_cost(INSN_COST);
7831   format %{ "str zr, $mem\t# ptr" %}
7832 
7833   ins_encode(aarch64_enc_str0(mem));
7834 
7835   ins_pipe(istore_mem);
7836 %}
7837 
7838 // Store Compressed Pointer
7839 instruct storeN(iRegN src, memory mem)
7840 %{
7841   match(Set mem (StoreN mem src));
7842   predicate(!needs_releasing_store(n));
7843 
7844   ins_cost(INSN_COST);
7845   format %{ "strw  $src, $mem\t# compressed ptr" %}
7846 
7847   ins_encode(aarch64_enc_strw(src, mem));
7848 
7849   ins_pipe(istore_reg_mem);
7850 %}
7851 
7852 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7853 %{
7854   match(Set mem (StoreN mem zero));
7855   predicate(Universe::narrow_oop_base() == NULL &&
7856             Universe::narrow_klass_base() == NULL &&
7857             (!needs_releasing_store(n)));
7858 
7859   ins_cost(INSN_COST);
7860   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7861 
7862   ins_encode(aarch64_enc_strw(heapbase, mem));
7863 
7864   ins_pipe(istore_reg_mem);
7865 %}
7866 
7867 // Store Float
7868 instruct storeF(vRegF src, memory mem)
7869 %{
7870   match(Set mem (StoreF mem src));
7871   predicate(!needs_releasing_store(n));
7872 
7873   ins_cost(INSN_COST);
7874   format %{ "strs  $src, $mem\t# float" %}
7875 
7876   ins_encode( aarch64_enc_strs(src, mem) );
7877 
7878   ins_pipe(pipe_class_memory);
7879 %}
7880 
7881 // TODO
7882 // implement storeImmF0 and storeFImmPacked
7883 
7884 // Store Double
7885 instruct storeD(vRegD src, memory mem)
7886 %{
7887   match(Set mem (StoreD mem src));
7888   predicate(!needs_releasing_store(n));
7889 
7890   ins_cost(INSN_COST);
7891   format %{ "strd  $src, $mem\t# double" %}
7892 
7893   ins_encode( aarch64_enc_strd(src, mem) );
7894 
7895   ins_pipe(pipe_class_memory);
7896 %}
7897 
7898 // Store Compressed Klass Pointer
7899 instruct storeNKlass(iRegN src, memory mem)
7900 %{
7901   predicate(!needs_releasing_store(n));
7902   match(Set mem (StoreNKlass mem src));
7903 
7904   ins_cost(INSN_COST);
7905   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7906 
7907   ins_encode(aarch64_enc_strw(src, mem));
7908 
7909   ins_pipe(istore_reg_mem);
7910 %}
7911 
7912 // TODO
7913 // implement storeImmD0 and storeDImmPacked
7914 
7915 // prefetch instructions
7916 // Must be safe to execute with invalid address (cannot fault).
7917 
7918 instruct prefetchalloc( memory mem ) %{
7919   match(PrefetchAllocation mem);
7920 
7921   ins_cost(INSN_COST);
7922   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7923 
7924   ins_encode( aarch64_enc_prefetchw(mem) );
7925 
7926   ins_pipe(iload_prefetch);
7927 %}
7928 
7929 //  ---------------- volatile loads and stores ----------------
7930 
7931 // Load Byte (8 bit signed)
7932 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7933 %{
7934   match(Set dst (LoadB mem));
7935 
7936   ins_cost(VOLATILE_REF_COST);
7937   format %{ "ldarsb  $dst, $mem\t# byte" %}
7938 
7939   ins_encode(aarch64_enc_ldarsb(dst, mem));
7940 
7941   ins_pipe(pipe_serial);
7942 %}
7943 
7944 // Load Byte (8 bit signed) into long
7945 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7946 %{
7947   match(Set dst (ConvI2L (LoadB mem)));
7948 
7949   ins_cost(VOLATILE_REF_COST);
7950   format %{ "ldarsb  $dst, $mem\t# byte" %}
7951 
7952   ins_encode(aarch64_enc_ldarsb(dst, mem));
7953 
7954   ins_pipe(pipe_serial);
7955 %}
7956 
7957 // Load Byte (8 bit unsigned)
7958 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7959 %{
7960   match(Set dst (LoadUB mem));
7961 
7962   ins_cost(VOLATILE_REF_COST);
7963   format %{ "ldarb  $dst, $mem\t# byte" %}
7964 
7965   ins_encode(aarch64_enc_ldarb(dst, mem));
7966 
7967   ins_pipe(pipe_serial);
7968 %}
7969 
7970 // Load Byte (8 bit unsigned) into long
7971 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7972 %{
7973   match(Set dst (ConvI2L (LoadUB mem)));
7974 
7975   ins_cost(VOLATILE_REF_COST);
7976   format %{ "ldarb  $dst, $mem\t# byte" %}
7977 
7978   ins_encode(aarch64_enc_ldarb(dst, mem));
7979 
7980   ins_pipe(pipe_serial);
7981 %}
7982 
7983 // Load Short (16 bit signed)
7984 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7985 %{
7986   match(Set dst (LoadS mem));
7987 
7988   ins_cost(VOLATILE_REF_COST);
7989   format %{ "ldarshw  $dst, $mem\t# short" %}
7990 
7991   ins_encode(aarch64_enc_ldarshw(dst, mem));
7992 
7993   ins_pipe(pipe_serial);
7994 %}
7995 
7996 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7997 %{
7998   match(Set dst (LoadUS mem));
7999 
8000   ins_cost(VOLATILE_REF_COST);
8001   format %{ "ldarhw  $dst, $mem\t# short" %}
8002 
8003   ins_encode(aarch64_enc_ldarhw(dst, mem));
8004 
8005   ins_pipe(pipe_serial);
8006 %}
8007 
8008 // Load Short/Char (16 bit unsigned) into long
8009 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8010 %{
8011   match(Set dst (ConvI2L (LoadUS mem)));
8012 
8013   ins_cost(VOLATILE_REF_COST);
8014   format %{ "ldarh  $dst, $mem\t# short" %}
8015 
8016   ins_encode(aarch64_enc_ldarh(dst, mem));
8017 
8018   ins_pipe(pipe_serial);
8019 %}
8020 
8021 // Load Short/Char (16 bit signed) into long
8022 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8023 %{
8024   match(Set dst (ConvI2L (LoadS mem)));
8025 
8026   ins_cost(VOLATILE_REF_COST);
8027   format %{ "ldarh  $dst, $mem\t# short" %}
8028 
8029   ins_encode(aarch64_enc_ldarsh(dst, mem));
8030 
8031   ins_pipe(pipe_serial);
8032 %}
8033 
8034 // Load Integer (32 bit signed)
8035 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8036 %{
8037   match(Set dst (LoadI mem));
8038 
8039   ins_cost(VOLATILE_REF_COST);
8040   format %{ "ldarw  $dst, $mem\t# int" %}
8041 
8042   ins_encode(aarch64_enc_ldarw(dst, mem));
8043 
8044   ins_pipe(pipe_serial);
8045 %}
8046 
8047 // Load Integer (32 bit unsigned) into long
8048 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8049 %{
8050   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8051 
8052   ins_cost(VOLATILE_REF_COST);
8053   format %{ "ldarw  $dst, $mem\t# int" %}
8054 
8055   ins_encode(aarch64_enc_ldarw(dst, mem));
8056 
8057   ins_pipe(pipe_serial);
8058 %}
8059 
8060 // Load Long (64 bit signed)
8061 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8062 %{
8063   match(Set dst (LoadL mem));
8064 
8065   ins_cost(VOLATILE_REF_COST);
8066   format %{ "ldar  $dst, $mem\t# int" %}
8067 
8068   ins_encode(aarch64_enc_ldar(dst, mem));
8069 
8070   ins_pipe(pipe_serial);
8071 %}
8072 
8073 // Load Pointer
8074 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8075 %{
8076   match(Set dst (LoadP mem));
8077 
8078   ins_cost(VOLATILE_REF_COST);
8079   format %{ "ldar  $dst, $mem\t# ptr" %}
8080 
8081   ins_encode(aarch64_enc_ldar(dst, mem));
8082 
8083   ins_pipe(pipe_serial);
8084 %}
8085 
8086 // Load Compressed Pointer
8087 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8088 %{
8089   match(Set dst (LoadN mem));
8090 
8091   ins_cost(VOLATILE_REF_COST);
8092   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8093 
8094   ins_encode(aarch64_enc_ldarw(dst, mem));
8095 
8096   ins_pipe(pipe_serial);
8097 %}
8098 
8099 // Load Float
8100 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8101 %{
8102   match(Set dst (LoadF mem));
8103 
8104   ins_cost(VOLATILE_REF_COST);
8105   format %{ "ldars  $dst, $mem\t# float" %}
8106 
8107   ins_encode( aarch64_enc_fldars(dst, mem) );
8108 
8109   ins_pipe(pipe_serial);
8110 %}
8111 
8112 // Load Double
8113 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8114 %{
8115   match(Set dst (LoadD mem));
8116 
8117   ins_cost(VOLATILE_REF_COST);
8118   format %{ "ldard  $dst, $mem\t# double" %}
8119 
8120   ins_encode( aarch64_enc_fldard(dst, mem) );
8121 
8122   ins_pipe(pipe_serial);
8123 %}
8124 
8125 // Store Byte
8126 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8127 %{
8128   match(Set mem (StoreB mem src));
8129 
8130   ins_cost(VOLATILE_REF_COST);
8131   format %{ "stlrb  $src, $mem\t# byte" %}
8132 
8133   ins_encode(aarch64_enc_stlrb(src, mem));
8134 
8135   ins_pipe(pipe_class_memory);
8136 %}
8137 
8138 // Store Char/Short
8139 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8140 %{
8141   match(Set mem (StoreC mem src));
8142 
8143   ins_cost(VOLATILE_REF_COST);
8144   format %{ "stlrh  $src, $mem\t# short" %}
8145 
8146   ins_encode(aarch64_enc_stlrh(src, mem));
8147 
8148   ins_pipe(pipe_class_memory);
8149 %}
8150 
8151 // Store Integer
8152 
8153 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8154 %{
8155   match(Set mem(StoreI mem src));
8156 
8157   ins_cost(VOLATILE_REF_COST);
8158   format %{ "stlrw  $src, $mem\t# int" %}
8159 
8160   ins_encode(aarch64_enc_stlrw(src, mem));
8161 
8162   ins_pipe(pipe_class_memory);
8163 %}
8164 
8165 // Store Long (64 bit signed)
8166 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8167 %{
8168   match(Set mem (StoreL mem src));
8169 
8170   ins_cost(VOLATILE_REF_COST);
8171   format %{ "stlr  $src, $mem\t# int" %}
8172 
8173   ins_encode(aarch64_enc_stlr(src, mem));
8174 
8175   ins_pipe(pipe_class_memory);
8176 %}
8177 
8178 // Store Pointer
8179 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8180 %{
8181   match(Set mem (StoreP mem src));
8182 
8183   ins_cost(VOLATILE_REF_COST);
8184   format %{ "stlr  $src, $mem\t# ptr" %}
8185 
8186   ins_encode(aarch64_enc_stlr(src, mem));
8187 
8188   ins_pipe(pipe_class_memory);
8189 %}
8190 
8191 // Store Compressed Pointer
8192 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8193 %{
8194   match(Set mem (StoreN mem src));
8195 
8196   ins_cost(VOLATILE_REF_COST);
8197   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8198 
8199   ins_encode(aarch64_enc_stlrw(src, mem));
8200 
8201   ins_pipe(pipe_class_memory);
8202 %}
8203 
8204 // Store Float
8205 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8206 %{
8207   match(Set mem (StoreF mem src));
8208 
8209   ins_cost(VOLATILE_REF_COST);
8210   format %{ "stlrs  $src, $mem\t# float" %}
8211 
8212   ins_encode( aarch64_enc_fstlrs(src, mem) );
8213 
8214   ins_pipe(pipe_class_memory);
8215 %}
8216 
8217 // TODO
8218 // implement storeImmF0 and storeFImmPacked
8219 
8220 // Store Double
8221 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8222 %{
8223   match(Set mem (StoreD mem src));
8224 
8225   ins_cost(VOLATILE_REF_COST);
8226   format %{ "stlrd  $src, $mem\t# double" %}
8227 
8228   ins_encode( aarch64_enc_fstlrd(src, mem) );
8229 
8230   ins_pipe(pipe_class_memory);
8231 %}
8232 
8233 //  ---------------- end of volatile loads and stores ----------------
8234 
8235 // ============================================================================
8236 // BSWAP Instructions
8237 
8238 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8239   match(Set dst (ReverseBytesI src));
8240 
8241   ins_cost(INSN_COST);
8242   format %{ "revw  $dst, $src" %}
8243 
8244   ins_encode %{
8245     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8246   %}
8247 
8248   ins_pipe(ialu_reg);
8249 %}
8250 
8251 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8252   match(Set dst (ReverseBytesL src));
8253 
8254   ins_cost(INSN_COST);
8255   format %{ "rev  $dst, $src" %}
8256 
8257   ins_encode %{
8258     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8259   %}
8260 
8261   ins_pipe(ialu_reg);
8262 %}
8263 
8264 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8265   match(Set dst (ReverseBytesUS src));
8266 
8267   ins_cost(INSN_COST);
8268   format %{ "rev16w  $dst, $src" %}
8269 
8270   ins_encode %{
8271     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8272   %}
8273 
8274   ins_pipe(ialu_reg);
8275 %}
8276 
8277 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8278   match(Set dst (ReverseBytesS src));
8279 
8280   ins_cost(INSN_COST);
8281   format %{ "rev16w  $dst, $src\n\t"
8282             "sbfmw $dst, $dst, #0, #15" %}
8283 
8284   ins_encode %{
8285     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8286     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8287   %}
8288 
8289   ins_pipe(ialu_reg);
8290 %}
8291 
8292 // ============================================================================
8293 // Zero Count Instructions
8294 
8295 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8296   match(Set dst (CountLeadingZerosI src));
8297 
8298   ins_cost(INSN_COST);
8299   format %{ "clzw  $dst, $src" %}
8300   ins_encode %{
8301     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8302   %}
8303 
8304   ins_pipe(ialu_reg);
8305 %}
8306 
8307 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8308   match(Set dst (CountLeadingZerosL src));
8309 
8310   ins_cost(INSN_COST);
8311   format %{ "clz   $dst, $src" %}
8312   ins_encode %{
8313     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8314   %}
8315 
8316   ins_pipe(ialu_reg);
8317 %}
8318 
8319 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8320   match(Set dst (CountTrailingZerosI src));
8321 
8322   ins_cost(INSN_COST * 2);
8323   format %{ "rbitw  $dst, $src\n\t"
8324             "clzw   $dst, $dst" %}
8325   ins_encode %{
8326     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8327     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8328   %}
8329 
8330   ins_pipe(ialu_reg);
8331 %}
8332 
8333 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8334   match(Set dst (CountTrailingZerosL src));
8335 
8336   ins_cost(INSN_COST * 2);
8337   format %{ "rbit   $dst, $src\n\t"
8338             "clz    $dst, $dst" %}
8339   ins_encode %{
8340     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8341     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8342   %}
8343 
8344   ins_pipe(ialu_reg);
8345 %}
8346 
8347 //---------- Population Count Instructions -------------------------------------
8348 //
8349 
8350 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8351   predicate(UsePopCountInstruction);
8352   match(Set dst (PopCountI src));
8353   effect(TEMP tmp);
8354   ins_cost(INSN_COST * 13);
8355 
8356   format %{ "movw   $src, $src\n\t"
8357             "mov    $tmp, $src\t# vector (1D)\n\t"
8358             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8359             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8360             "mov    $dst, $tmp\t# vector (1D)" %}
8361   ins_encode %{
8362     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8363     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8364     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8365     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8366     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8367   %}
8368 
8369   ins_pipe(pipe_class_default);
8370 %}
8371 
8372 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8373   predicate(UsePopCountInstruction);
8374   match(Set dst (PopCountI (LoadI mem)));
8375   effect(TEMP tmp);
8376   ins_cost(INSN_COST * 13);
8377 
8378   format %{ "ldrs   $tmp, $mem\n\t"
8379             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8380             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8381             "mov    $dst, $tmp\t# vector (1D)" %}
8382   ins_encode %{
8383     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8384     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8385                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8386     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8387     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8388     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8389   %}
8390 
8391   ins_pipe(pipe_class_default);
8392 %}
8393 
8394 // Note: Long.bitCount(long) returns an int.
8395 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8396   predicate(UsePopCountInstruction);
8397   match(Set dst (PopCountL src));
8398   effect(TEMP tmp);
8399   ins_cost(INSN_COST * 13);
8400 
8401   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8402             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8403             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8404             "mov    $dst, $tmp\t# vector (1D)" %}
8405   ins_encode %{
8406     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8407     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8408     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8409     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8410   %}
8411 
8412   ins_pipe(pipe_class_default);
8413 %}
8414 
8415 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8416   predicate(UsePopCountInstruction);
8417   match(Set dst (PopCountL (LoadL mem)));
8418   effect(TEMP tmp);
8419   ins_cost(INSN_COST * 13);
8420 
8421   format %{ "ldrd   $tmp, $mem\n\t"
8422             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8423             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8424             "mov    $dst, $tmp\t# vector (1D)" %}
8425   ins_encode %{
8426     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8427     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8428                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8429     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8430     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8431     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8432   %}
8433 
8434   ins_pipe(pipe_class_default);
8435 %}
8436 
8437 // ============================================================================
8438 // MemBar Instruction
8439 
8440 instruct load_fence() %{
8441   match(LoadFence);
8442   ins_cost(VOLATILE_REF_COST);
8443 
8444   format %{ "load_fence" %}
8445 
8446   ins_encode %{
8447     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8448   %}
8449   ins_pipe(pipe_serial);
8450 %}
8451 
8452 instruct unnecessary_membar_acquire() %{
8453   predicate(unnecessary_acquire(n));
8454   match(MemBarAcquire);
8455   ins_cost(0);
8456 
8457   format %{ "membar_acquire (elided)" %}
8458 
8459   ins_encode %{
8460     __ block_comment("membar_acquire (elided)");
8461   %}
8462 
8463   ins_pipe(pipe_class_empty);
8464 %}
8465 
8466 instruct membar_acquire() %{
8467   match(MemBarAcquire);
8468   ins_cost(VOLATILE_REF_COST);
8469 
8470   format %{ "membar_acquire" %}
8471 
8472   ins_encode %{
8473     __ block_comment("membar_acquire");
8474     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8475   %}
8476 
8477   ins_pipe(pipe_serial);
8478 %}
8479 
8480 
8481 instruct membar_acquire_lock() %{
8482   match(MemBarAcquireLock);
8483   ins_cost(VOLATILE_REF_COST);
8484 
8485   format %{ "membar_acquire_lock" %}
8486 
8487   ins_encode %{
8488     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8489   %}
8490 
8491   ins_pipe(pipe_serial);
8492 %}
8493 
8494 instruct store_fence() %{
8495   match(StoreFence);
8496   ins_cost(VOLATILE_REF_COST);
8497 
8498   format %{ "store_fence" %}
8499 
8500   ins_encode %{
8501     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8502   %}
8503   ins_pipe(pipe_serial);
8504 %}
8505 
8506 instruct unnecessary_membar_release() %{
8507   predicate(unnecessary_release(n));
8508   match(MemBarRelease);
8509   ins_cost(0);
8510 
8511   format %{ "membar_release (elided)" %}
8512 
8513   ins_encode %{
8514     __ block_comment("membar_release (elided)");
8515   %}
8516   ins_pipe(pipe_serial);
8517 %}
8518 
8519 instruct membar_release() %{
8520   match(MemBarRelease);
8521   ins_cost(VOLATILE_REF_COST);
8522 
8523   format %{ "membar_release" %}
8524 
8525   ins_encode %{
8526     __ block_comment("membar_release");
8527     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8528   %}
8529   ins_pipe(pipe_serial);
8530 %}
8531 
8532 instruct membar_storestore() %{
8533   match(MemBarStoreStore);
8534   ins_cost(VOLATILE_REF_COST);
8535 
8536   format %{ "MEMBAR-store-store" %}
8537 
8538   ins_encode %{
8539     __ membar(Assembler::StoreStore);
8540   %}
8541   ins_pipe(pipe_serial);
8542 %}
8543 
8544 instruct membar_release_lock() %{
8545   match(MemBarReleaseLock);
8546   ins_cost(VOLATILE_REF_COST);
8547 
8548   format %{ "membar_release_lock" %}
8549 
8550   ins_encode %{
8551     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8552   %}
8553 
8554   ins_pipe(pipe_serial);
8555 %}
8556 
8557 instruct unnecessary_membar_volatile() %{
8558   predicate(unnecessary_volatile(n));
8559   match(MemBarVolatile);
8560   ins_cost(0);
8561 
8562   format %{ "membar_volatile (elided)" %}
8563 
8564   ins_encode %{
8565     __ block_comment("membar_volatile (elided)");
8566   %}
8567 
8568   ins_pipe(pipe_serial);
8569 %}
8570 
8571 instruct membar_volatile() %{
8572   match(MemBarVolatile);
8573   ins_cost(VOLATILE_REF_COST*100);
8574 
8575   format %{ "membar_volatile" %}
8576 
8577   ins_encode %{
8578     __ block_comment("membar_volatile");
8579     __ membar(Assembler::StoreLoad);
8580   %}
8581 
8582   ins_pipe(pipe_serial);
8583 %}
8584 
8585 // ============================================================================
8586 // Cast/Convert Instructions
8587 
8588 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8589   match(Set dst (CastX2P src));
8590 
8591   ins_cost(INSN_COST);
8592   format %{ "mov $dst, $src\t# long -> ptr" %}
8593 
8594   ins_encode %{
8595     if ($dst$$reg != $src$$reg) {
8596       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8597     }
8598   %}
8599 
8600   ins_pipe(ialu_reg);
8601 %}
8602 
8603 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8604   match(Set dst (CastP2X src));
8605 
8606   ins_cost(INSN_COST);
8607   format %{ "mov $dst, $src\t# ptr -> long" %}
8608 
8609   ins_encode %{
8610     if ($dst$$reg != $src$$reg) {
8611       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8612     }
8613   %}
8614 
8615   ins_pipe(ialu_reg);
8616 %}
8617 
8618 // Convert oop into int for vectors alignment masking
8619 instruct convP2I(iRegINoSp dst, iRegP src) %{
8620   match(Set dst (ConvL2I (CastP2X src)));
8621 
8622   ins_cost(INSN_COST);
8623   format %{ "movw $dst, $src\t# ptr -> int" %}
8624   ins_encode %{
8625     __ movw($dst$$Register, $src$$Register);
8626   %}
8627 
8628   ins_pipe(ialu_reg);
8629 %}
8630 
8631 // Convert compressed oop into int for vectors alignment masking
8632 // in case of 32bit oops (heap < 4Gb).
8633 instruct convN2I(iRegINoSp dst, iRegN src)
8634 %{
8635   predicate(Universe::narrow_oop_shift() == 0);
8636   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8637 
8638   ins_cost(INSN_COST);
8639   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8640   ins_encode %{
8641     __ movw($dst$$Register, $src$$Register);
8642   %}
8643 
8644   ins_pipe(ialu_reg);
8645 %}
8646 
8647 
8648 // Convert oop pointer into compressed form
8649 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8650   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8651   match(Set dst (EncodeP src));
8652   effect(KILL cr);
8653   ins_cost(INSN_COST * 3);
8654   format %{ "encode_heap_oop $dst, $src" %}
8655   ins_encode %{
8656     Register s = $src$$Register;
8657     Register d = $dst$$Register;
8658     __ encode_heap_oop(d, s);
8659   %}
8660   ins_pipe(ialu_reg);
8661 %}
8662 
8663 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8664   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8665   match(Set dst (EncodeP src));
8666   ins_cost(INSN_COST * 3);
8667   format %{ "encode_heap_oop_not_null $dst, $src" %}
8668   ins_encode %{
8669     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8670   %}
8671   ins_pipe(ialu_reg);
8672 %}
8673 
8674 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8675   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8676             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8677   match(Set dst (DecodeN src));
8678   ins_cost(INSN_COST * 3);
8679   format %{ "decode_heap_oop $dst, $src" %}
8680   ins_encode %{
8681     Register s = $src$$Register;
8682     Register d = $dst$$Register;
8683     __ decode_heap_oop(d, s);
8684   %}
8685   ins_pipe(ialu_reg);
8686 %}
8687 
8688 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8689   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8690             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8691   match(Set dst (DecodeN src));
8692   ins_cost(INSN_COST * 3);
8693   format %{ "decode_heap_oop_not_null $dst, $src" %}
8694   ins_encode %{
8695     Register s = $src$$Register;
8696     Register d = $dst$$Register;
8697     __ decode_heap_oop_not_null(d, s);
8698   %}
8699   ins_pipe(ialu_reg);
8700 %}
8701 
8702 // n.b. AArch64 implementations of encode_klass_not_null and
8703 // decode_klass_not_null do not modify the flags register so, unlike
8704 // Intel, we don't kill CR as a side effect here
8705 
8706 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8707   match(Set dst (EncodePKlass src));
8708 
8709   ins_cost(INSN_COST * 3);
8710   format %{ "encode_klass_not_null $dst,$src" %}
8711 
8712   ins_encode %{
8713     Register src_reg = as_Register($src$$reg);
8714     Register dst_reg = as_Register($dst$$reg);
8715     __ encode_klass_not_null(dst_reg, src_reg);
8716   %}
8717 
8718    ins_pipe(ialu_reg);
8719 %}
8720 
8721 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8722   match(Set dst (DecodeNKlass src));
8723 
8724   ins_cost(INSN_COST * 3);
8725   format %{ "decode_klass_not_null $dst,$src" %}
8726 
8727   ins_encode %{
8728     Register src_reg = as_Register($src$$reg);
8729     Register dst_reg = as_Register($dst$$reg);
8730     if (dst_reg != src_reg) {
8731       __ decode_klass_not_null(dst_reg, src_reg);
8732     } else {
8733       __ decode_klass_not_null(dst_reg);
8734     }
8735   %}
8736 
8737    ins_pipe(ialu_reg);
8738 %}
8739 
8740 instruct checkCastPP(iRegPNoSp dst)
8741 %{
8742   match(Set dst (CheckCastPP dst));
8743 
8744   size(0);
8745   format %{ "# checkcastPP of $dst" %}
8746   ins_encode(/* empty encoding */);
8747   ins_pipe(pipe_class_empty);
8748 %}
8749 
8750 instruct castPP(iRegPNoSp dst)
8751 %{
8752   match(Set dst (CastPP dst));
8753 
8754   size(0);
8755   format %{ "# castPP of $dst" %}
8756   ins_encode(/* empty encoding */);
8757   ins_pipe(pipe_class_empty);
8758 %}
8759 
8760 instruct castII(iRegI dst)
8761 %{
8762   match(Set dst (CastII dst));
8763 
8764   size(0);
8765   format %{ "# castII of $dst" %}
8766   ins_encode(/* empty encoding */);
8767   ins_cost(0);
8768   ins_pipe(pipe_class_empty);
8769 %}
8770 
8771 // ============================================================================
8772 // Atomic operation instructions
8773 //
8774 // Intel and SPARC both implement Ideal Node LoadPLocked and
8775 // Store{PIL}Conditional instructions using a normal load for the
8776 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8777 //
8778 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8779 // pair to lock object allocations from Eden space when not using
8780 // TLABs.
8781 //
8782 // There does not appear to be a Load{IL}Locked Ideal Node and the
8783 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8784 // and to use StoreIConditional only for 32-bit and StoreLConditional
8785 // only for 64-bit.
8786 //
8787 // We implement LoadPLocked and StorePLocked instructions using,
8788 // respectively the AArch64 hw load-exclusive and store-conditional
8789 // instructions. Whereas we must implement each of
8790 // Store{IL}Conditional using a CAS which employs a pair of
8791 // instructions comprising a load-exclusive followed by a
8792 // store-conditional.
8793 
8794 
8795 // Locked-load (linked load) of the current heap-top
8796 // used when updating the eden heap top
8797 // implemented using ldaxr on AArch64
8798 
8799 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8800 %{
8801   match(Set dst (LoadPLocked mem));
8802 
8803   ins_cost(VOLATILE_REF_COST);
8804 
8805   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8806 
8807   ins_encode(aarch64_enc_ldaxr(dst, mem));
8808 
8809   ins_pipe(pipe_serial);
8810 %}
8811 
8812 // Conditional-store of the updated heap-top.
8813 // Used during allocation of the shared heap.
8814 // Sets flag (EQ) on success.
8815 // implemented using stlxr on AArch64.
8816 
8817 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8818 %{
8819   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8820 
8821   ins_cost(VOLATILE_REF_COST);
8822 
8823  // TODO
8824  // do we need to do a store-conditional release or can we just use a
8825  // plain store-conditional?
8826 
8827   format %{
8828     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8829     "cmpw rscratch1, zr\t# EQ on successful write"
8830   %}
8831 
8832   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8833 
8834   ins_pipe(pipe_serial);
8835 %}
8836 
8837 // this has to be implemented as a CAS
8838 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8839 %{
8840   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8841 
8842   ins_cost(VOLATILE_REF_COST);
8843 
8844   format %{
8845     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8846     "cmpw rscratch1, zr\t# EQ on successful write"
8847   %}
8848 
8849   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
8850 
8851   ins_pipe(pipe_slow);
8852 %}
8853 
8854 // this has to be implemented as a CAS
8855 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8856 %{
8857   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8858 
8859   ins_cost(VOLATILE_REF_COST);
8860 
8861   format %{
8862     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8863     "cmpw rscratch1, zr\t# EQ on successful write"
8864   %}
8865 
8866   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
8867 
8868   ins_pipe(pipe_slow);
8869 %}
8870 
8871 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8872 // can't match them
8873 
8874 // standard CompareAndSwapX when we are using barriers
8875 
8876 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8877 
8878   predicate(!needs_acquiring_load_exclusive(n));
8879   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8880   ins_cost(VOLATILE_REF_COST);
8881 
8882   effect(KILL cr);
8883 
8884  format %{
8885     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8886     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8887  %}
8888 
8889  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8890             aarch64_enc_cset_eq(res));
8891 
8892   ins_pipe(pipe_slow);
8893 %}
8894 
8895 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8896 
8897   predicate(!needs_acquiring_load_exclusive(n));
8898   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8899   ins_cost(VOLATILE_REF_COST);
8900 
8901   effect(KILL cr);
8902 
8903  format %{
8904     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8905     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8906  %}
8907 
8908  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8909             aarch64_enc_cset_eq(res));
8910 
8911   ins_pipe(pipe_slow);
8912 %}
8913 
8914 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8915 
8916   predicate(!needs_acquiring_load_exclusive(n));
8917   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8918   ins_cost(VOLATILE_REF_COST);
8919 
8920   effect(KILL cr);
8921 
8922  format %{
8923     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8924     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8925  %}
8926 
8927  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8928             aarch64_enc_cset_eq(res));
8929 
8930   ins_pipe(pipe_slow);
8931 %}
8932 
8933 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8934 
8935   predicate(!needs_acquiring_load_exclusive(n));
8936   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8937   ins_cost(VOLATILE_REF_COST);
8938 
8939   effect(KILL cr);
8940 
8941  format %{
8942     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8943     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8944  %}
8945 
8946  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8947             aarch64_enc_cset_eq(res));
8948 
8949   ins_pipe(pipe_slow);
8950 %}
8951 
8952 // alternative CompareAndSwapX when we are eliding barriers
8953 
8954 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8955 
8956   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8957   ins_cost(2 * VOLATILE_REF_COST);
8958 
8959   effect(KILL cr);
8960 
8961  format %{
8962     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8963     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8964  %}
8965 
8966  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8967             aarch64_enc_cset_eq(res));
8968 
8969   ins_pipe(pipe_slow);
8970 %}
8971 
8972 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8973 
8974   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8975   ins_cost(2 * VOLATILE_REF_COST);
8976 
8977   effect(KILL cr);
8978 
8979  format %{
8980     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8981     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8982  %}
8983 
8984  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8985             aarch64_enc_cset_eq(res));
8986 
8987   ins_pipe(pipe_slow);
8988 %}
8989 
8990 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8991 
8992   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8993   ins_cost(2 * VOLATILE_REF_COST);
8994 
8995   effect(KILL cr);
8996 
8997  format %{
8998     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8999     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9000  %}
9001 
9002  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9003             aarch64_enc_cset_eq(res));
9004 
9005   ins_pipe(pipe_slow);
9006 %}
9007 
9008 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9009 
9010   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9011   ins_cost(2 * VOLATILE_REF_COST);
9012 
9013   effect(KILL cr);
9014 
9015  format %{
9016     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9017     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018  %}
9019 
9020  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9021             aarch64_enc_cset_eq(res));
9022 
9023   ins_pipe(pipe_slow);
9024 %}
9025 
9026 
9027 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9028   match(Set prev (GetAndSetI mem newv));
9029   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9030   ins_encode %{
9031     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9032   %}
9033   ins_pipe(pipe_serial);
9034 %}
9035 
9036 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9037   match(Set prev (GetAndSetL mem newv));
9038   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9039   ins_encode %{
9040     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9041   %}
9042   ins_pipe(pipe_serial);
9043 %}
9044 
9045 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9046   match(Set prev (GetAndSetN mem newv));
9047   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9048   ins_encode %{
9049     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9050   %}
9051   ins_pipe(pipe_serial);
9052 %}
9053 
9054 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9055   match(Set prev (GetAndSetP mem newv));
9056   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9057   ins_encode %{
9058     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9059   %}
9060   ins_pipe(pipe_serial);
9061 %}
9062 
9063 
9064 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9065   match(Set newval (GetAndAddL mem incr));
9066   ins_cost(INSN_COST * 10);
9067   format %{ "get_and_addL $newval, [$mem], $incr" %}
9068   ins_encode %{
9069     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9070   %}
9071   ins_pipe(pipe_serial);
9072 %}
9073 
9074 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9075   predicate(n->as_LoadStore()->result_not_used());
9076   match(Set dummy (GetAndAddL mem incr));
9077   ins_cost(INSN_COST * 9);
9078   format %{ "get_and_addL [$mem], $incr" %}
9079   ins_encode %{
9080     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9081   %}
9082   ins_pipe(pipe_serial);
9083 %}
9084 
9085 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9086   match(Set newval (GetAndAddL mem incr));
9087   ins_cost(INSN_COST * 10);
9088   format %{ "get_and_addL $newval, [$mem], $incr" %}
9089   ins_encode %{
9090     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9091   %}
9092   ins_pipe(pipe_serial);
9093 %}
9094 
9095 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9096   predicate(n->as_LoadStore()->result_not_used());
9097   match(Set dummy (GetAndAddL mem incr));
9098   ins_cost(INSN_COST * 9);
9099   format %{ "get_and_addL [$mem], $incr" %}
9100   ins_encode %{
9101     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9102   %}
9103   ins_pipe(pipe_serial);
9104 %}
9105 
9106 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9107   match(Set newval (GetAndAddI mem incr));
9108   ins_cost(INSN_COST * 10);
9109   format %{ "get_and_addI $newval, [$mem], $incr" %}
9110   ins_encode %{
9111     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9112   %}
9113   ins_pipe(pipe_serial);
9114 %}
9115 
9116 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9117   predicate(n->as_LoadStore()->result_not_used());
9118   match(Set dummy (GetAndAddI mem incr));
9119   ins_cost(INSN_COST * 9);
9120   format %{ "get_and_addI [$mem], $incr" %}
9121   ins_encode %{
9122     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9123   %}
9124   ins_pipe(pipe_serial);
9125 %}
9126 
9127 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9128   match(Set newval (GetAndAddI mem incr));
9129   ins_cost(INSN_COST * 10);
9130   format %{ "get_and_addI $newval, [$mem], $incr" %}
9131   ins_encode %{
9132     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9133   %}
9134   ins_pipe(pipe_serial);
9135 %}
9136 
9137 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9138   predicate(n->as_LoadStore()->result_not_used());
9139   match(Set dummy (GetAndAddI mem incr));
9140   ins_cost(INSN_COST * 9);
9141   format %{ "get_and_addI [$mem], $incr" %}
9142   ins_encode %{
9143     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9144   %}
9145   ins_pipe(pipe_serial);
9146 %}
9147 
9148 // Manifest a CmpL result in an integer register.
9149 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9150 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9151 %{
9152   match(Set dst (CmpL3 src1 src2));
9153   effect(KILL flags);
9154 
9155   ins_cost(INSN_COST * 6);
9156   format %{
9157       "cmp $src1, $src2"
9158       "csetw $dst, ne"
9159       "cnegw $dst, lt"
9160   %}
9161   // format %{ "CmpL3 $dst, $src1, $src2" %}
9162   ins_encode %{
9163     __ cmp($src1$$Register, $src2$$Register);
9164     __ csetw($dst$$Register, Assembler::NE);
9165     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9166   %}
9167 
9168   ins_pipe(pipe_class_default);
9169 %}
9170 
9171 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9172 %{
9173   match(Set dst (CmpL3 src1 src2));
9174   effect(KILL flags);
9175 
9176   ins_cost(INSN_COST * 6);
9177   format %{
9178       "cmp $src1, $src2"
9179       "csetw $dst, ne"
9180       "cnegw $dst, lt"
9181   %}
9182   ins_encode %{
9183     int32_t con = (int32_t)$src2$$constant;
9184      if (con < 0) {
9185       __ adds(zr, $src1$$Register, -con);
9186     } else {
9187       __ subs(zr, $src1$$Register, con);
9188     }
9189     __ csetw($dst$$Register, Assembler::NE);
9190     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9191   %}
9192 
9193   ins_pipe(pipe_class_default);
9194 %}
9195 
9196 // ============================================================================
9197 // Conditional Move Instructions
9198 
9199 // n.b. we have identical rules for both a signed compare op (cmpOp)
9200 // and an unsigned compare op (cmpOpU). it would be nice if we could
9201 // define an op class which merged both inputs and use it to type the
9202 // argument to a single rule. unfortunatelyt his fails because the
9203 // opclass does not live up to the COND_INTER interface of its
9204 // component operands. When the generic code tries to negate the
9205 // operand it ends up running the generci Machoper::negate method
9206 // which throws a ShouldNotHappen. So, we have to provide two flavours
9207 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9208 
9209 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9210   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9211 
9212   ins_cost(INSN_COST * 2);
9213   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9214 
9215   ins_encode %{
9216     __ cselw(as_Register($dst$$reg),
9217              as_Register($src2$$reg),
9218              as_Register($src1$$reg),
9219              (Assembler::Condition)$cmp$$cmpcode);
9220   %}
9221 
9222   ins_pipe(icond_reg_reg);
9223 %}
9224 
9225 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9226   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9227 
9228   ins_cost(INSN_COST * 2);
9229   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9230 
9231   ins_encode %{
9232     __ cselw(as_Register($dst$$reg),
9233              as_Register($src2$$reg),
9234              as_Register($src1$$reg),
9235              (Assembler::Condition)$cmp$$cmpcode);
9236   %}
9237 
9238   ins_pipe(icond_reg_reg);
9239 %}
9240 
9241 // special cases where one arg is zero
9242 
9243 // n.b. this is selected in preference to the rule above because it
9244 // avoids loading constant 0 into a source register
9245 
9246 // TODO
9247 // we ought only to be able to cull one of these variants as the ideal
9248 // transforms ought always to order the zero consistently (to left/right?)
9249 
9250 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9251   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9252 
9253   ins_cost(INSN_COST * 2);
9254   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9255 
9256   ins_encode %{
9257     __ cselw(as_Register($dst$$reg),
9258              as_Register($src$$reg),
9259              zr,
9260              (Assembler::Condition)$cmp$$cmpcode);
9261   %}
9262 
9263   ins_pipe(icond_reg);
9264 %}
9265 
9266 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9267   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9268 
9269   ins_cost(INSN_COST * 2);
9270   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9271 
9272   ins_encode %{
9273     __ cselw(as_Register($dst$$reg),
9274              as_Register($src$$reg),
9275              zr,
9276              (Assembler::Condition)$cmp$$cmpcode);
9277   %}
9278 
9279   ins_pipe(icond_reg);
9280 %}
9281 
9282 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9283   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9284 
9285   ins_cost(INSN_COST * 2);
9286   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9287 
9288   ins_encode %{
9289     __ cselw(as_Register($dst$$reg),
9290              zr,
9291              as_Register($src$$reg),
9292              (Assembler::Condition)$cmp$$cmpcode);
9293   %}
9294 
9295   ins_pipe(icond_reg);
9296 %}
9297 
9298 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9299   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9300 
9301   ins_cost(INSN_COST * 2);
9302   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9303 
9304   ins_encode %{
9305     __ cselw(as_Register($dst$$reg),
9306              zr,
9307              as_Register($src$$reg),
9308              (Assembler::Condition)$cmp$$cmpcode);
9309   %}
9310 
9311   ins_pipe(icond_reg);
9312 %}
9313 
9314 // special case for creating a boolean 0 or 1
9315 
9316 // n.b. this is selected in preference to the rule above because it
9317 // avoids loading constants 0 and 1 into a source register
9318 
9319 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9320   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9321 
9322   ins_cost(INSN_COST * 2);
9323   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9324 
9325   ins_encode %{
9326     // equivalently
9327     // cset(as_Register($dst$$reg),
9328     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9329     __ csincw(as_Register($dst$$reg),
9330              zr,
9331              zr,
9332              (Assembler::Condition)$cmp$$cmpcode);
9333   %}
9334 
9335   ins_pipe(icond_none);
9336 %}
9337 
9338 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9339   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9340 
9341   ins_cost(INSN_COST * 2);
9342   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9343 
9344   ins_encode %{
9345     // equivalently
9346     // cset(as_Register($dst$$reg),
9347     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9348     __ csincw(as_Register($dst$$reg),
9349              zr,
9350              zr,
9351              (Assembler::Condition)$cmp$$cmpcode);
9352   %}
9353 
9354   ins_pipe(icond_none);
9355 %}
9356 
9357 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9358   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9359 
9360   ins_cost(INSN_COST * 2);
9361   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9362 
9363   ins_encode %{
9364     __ csel(as_Register($dst$$reg),
9365             as_Register($src2$$reg),
9366             as_Register($src1$$reg),
9367             (Assembler::Condition)$cmp$$cmpcode);
9368   %}
9369 
9370   ins_pipe(icond_reg_reg);
9371 %}
9372 
9373 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9374   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9375 
9376   ins_cost(INSN_COST * 2);
9377   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9378 
9379   ins_encode %{
9380     __ csel(as_Register($dst$$reg),
9381             as_Register($src2$$reg),
9382             as_Register($src1$$reg),
9383             (Assembler::Condition)$cmp$$cmpcode);
9384   %}
9385 
9386   ins_pipe(icond_reg_reg);
9387 %}
9388 
9389 // special cases where one arg is zero
9390 
9391 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9392   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9393 
9394   ins_cost(INSN_COST * 2);
9395   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9396 
9397   ins_encode %{
9398     __ csel(as_Register($dst$$reg),
9399             zr,
9400             as_Register($src$$reg),
9401             (Assembler::Condition)$cmp$$cmpcode);
9402   %}
9403 
9404   ins_pipe(icond_reg);
9405 %}
9406 
9407 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9408   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9409 
9410   ins_cost(INSN_COST * 2);
9411   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9412 
9413   ins_encode %{
9414     __ csel(as_Register($dst$$reg),
9415             zr,
9416             as_Register($src$$reg),
9417             (Assembler::Condition)$cmp$$cmpcode);
9418   %}
9419 
9420   ins_pipe(icond_reg);
9421 %}
9422 
9423 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9424   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9425 
9426   ins_cost(INSN_COST * 2);
9427   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9428 
9429   ins_encode %{
9430     __ csel(as_Register($dst$$reg),
9431             as_Register($src$$reg),
9432             zr,
9433             (Assembler::Condition)$cmp$$cmpcode);
9434   %}
9435 
9436   ins_pipe(icond_reg);
9437 %}
9438 
9439 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9440   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9441 
9442   ins_cost(INSN_COST * 2);
9443   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9444 
9445   ins_encode %{
9446     __ csel(as_Register($dst$$reg),
9447             as_Register($src$$reg),
9448             zr,
9449             (Assembler::Condition)$cmp$$cmpcode);
9450   %}
9451 
9452   ins_pipe(icond_reg);
9453 %}
9454 
9455 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9456   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9457 
9458   ins_cost(INSN_COST * 2);
9459   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9460 
9461   ins_encode %{
9462     __ csel(as_Register($dst$$reg),
9463             as_Register($src2$$reg),
9464             as_Register($src1$$reg),
9465             (Assembler::Condition)$cmp$$cmpcode);
9466   %}
9467 
9468   ins_pipe(icond_reg_reg);
9469 %}
9470 
9471 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9472   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9473 
9474   ins_cost(INSN_COST * 2);
9475   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9476 
9477   ins_encode %{
9478     __ csel(as_Register($dst$$reg),
9479             as_Register($src2$$reg),
9480             as_Register($src1$$reg),
9481             (Assembler::Condition)$cmp$$cmpcode);
9482   %}
9483 
9484   ins_pipe(icond_reg_reg);
9485 %}
9486 
9487 // special cases where one arg is zero
9488 
9489 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9490   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9491 
9492   ins_cost(INSN_COST * 2);
9493   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9494 
9495   ins_encode %{
9496     __ csel(as_Register($dst$$reg),
9497             zr,
9498             as_Register($src$$reg),
9499             (Assembler::Condition)$cmp$$cmpcode);
9500   %}
9501 
9502   ins_pipe(icond_reg);
9503 %}
9504 
9505 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9506   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9507 
9508   ins_cost(INSN_COST * 2);
9509   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9510 
9511   ins_encode %{
9512     __ csel(as_Register($dst$$reg),
9513             zr,
9514             as_Register($src$$reg),
9515             (Assembler::Condition)$cmp$$cmpcode);
9516   %}
9517 
9518   ins_pipe(icond_reg);
9519 %}
9520 
9521 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9522   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9523 
9524   ins_cost(INSN_COST * 2);
9525   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9526 
9527   ins_encode %{
9528     __ csel(as_Register($dst$$reg),
9529             as_Register($src$$reg),
9530             zr,
9531             (Assembler::Condition)$cmp$$cmpcode);
9532   %}
9533 
9534   ins_pipe(icond_reg);
9535 %}
9536 
9537 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9538   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9539 
9540   ins_cost(INSN_COST * 2);
9541   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9542 
9543   ins_encode %{
9544     __ csel(as_Register($dst$$reg),
9545             as_Register($src$$reg),
9546             zr,
9547             (Assembler::Condition)$cmp$$cmpcode);
9548   %}
9549 
9550   ins_pipe(icond_reg);
9551 %}
9552 
9553 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9554   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9555 
9556   ins_cost(INSN_COST * 2);
9557   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9558 
9559   ins_encode %{
9560     __ cselw(as_Register($dst$$reg),
9561              as_Register($src2$$reg),
9562              as_Register($src1$$reg),
9563              (Assembler::Condition)$cmp$$cmpcode);
9564   %}
9565 
9566   ins_pipe(icond_reg_reg);
9567 %}
9568 
9569 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9570   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9571 
9572   ins_cost(INSN_COST * 2);
9573   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9574 
9575   ins_encode %{
9576     __ cselw(as_Register($dst$$reg),
9577              as_Register($src2$$reg),
9578              as_Register($src1$$reg),
9579              (Assembler::Condition)$cmp$$cmpcode);
9580   %}
9581 
9582   ins_pipe(icond_reg_reg);
9583 %}
9584 
9585 // special cases where one arg is zero
9586 
9587 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9588   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9589 
9590   ins_cost(INSN_COST * 2);
9591   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9592 
9593   ins_encode %{
9594     __ cselw(as_Register($dst$$reg),
9595              zr,
9596              as_Register($src$$reg),
9597              (Assembler::Condition)$cmp$$cmpcode);
9598   %}
9599 
9600   ins_pipe(icond_reg);
9601 %}
9602 
9603 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9604   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9605 
9606   ins_cost(INSN_COST * 2);
9607   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9608 
9609   ins_encode %{
9610     __ cselw(as_Register($dst$$reg),
9611              zr,
9612              as_Register($src$$reg),
9613              (Assembler::Condition)$cmp$$cmpcode);
9614   %}
9615 
9616   ins_pipe(icond_reg);
9617 %}
9618 
9619 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9620   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9621 
9622   ins_cost(INSN_COST * 2);
9623   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9624 
9625   ins_encode %{
9626     __ cselw(as_Register($dst$$reg),
9627              as_Register($src$$reg),
9628              zr,
9629              (Assembler::Condition)$cmp$$cmpcode);
9630   %}
9631 
9632   ins_pipe(icond_reg);
9633 %}
9634 
9635 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9636   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9637 
9638   ins_cost(INSN_COST * 2);
9639   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9640 
9641   ins_encode %{
9642     __ cselw(as_Register($dst$$reg),
9643              as_Register($src$$reg),
9644              zr,
9645              (Assembler::Condition)$cmp$$cmpcode);
9646   %}
9647 
9648   ins_pipe(icond_reg);
9649 %}
9650 
9651 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9652 %{
9653   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9654 
9655   ins_cost(INSN_COST * 3);
9656 
9657   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9658   ins_encode %{
9659     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9660     __ fcsels(as_FloatRegister($dst$$reg),
9661               as_FloatRegister($src2$$reg),
9662               as_FloatRegister($src1$$reg),
9663               cond);
9664   %}
9665 
9666   ins_pipe(pipe_class_default);
9667 %}
9668 
9669 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9670 %{
9671   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9672 
9673   ins_cost(INSN_COST * 3);
9674 
9675   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9676   ins_encode %{
9677     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9678     __ fcsels(as_FloatRegister($dst$$reg),
9679               as_FloatRegister($src2$$reg),
9680               as_FloatRegister($src1$$reg),
9681               cond);
9682   %}
9683 
9684   ins_pipe(pipe_class_default);
9685 %}
9686 
9687 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9688 %{
9689   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9690 
9691   ins_cost(INSN_COST * 3);
9692 
9693   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9694   ins_encode %{
9695     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9696     __ fcseld(as_FloatRegister($dst$$reg),
9697               as_FloatRegister($src2$$reg),
9698               as_FloatRegister($src1$$reg),
9699               cond);
9700   %}
9701 
9702   ins_pipe(pipe_class_default);
9703 %}
9704 
9705 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9706 %{
9707   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9708 
9709   ins_cost(INSN_COST * 3);
9710 
9711   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9712   ins_encode %{
9713     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9714     __ fcseld(as_FloatRegister($dst$$reg),
9715               as_FloatRegister($src2$$reg),
9716               as_FloatRegister($src1$$reg),
9717               cond);
9718   %}
9719 
9720   ins_pipe(pipe_class_default);
9721 %}
9722 
9723 // ============================================================================
9724 // Arithmetic Instructions
9725 //
9726 
9727 // Integer Addition
9728 
9729 // TODO
9730 // these currently employ operations which do not set CR and hence are
9731 // not flagged as killing CR but we would like to isolate the cases
9732 // where we want to set flags from those where we don't. need to work
9733 // out how to do that.
9734 
9735 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9736   match(Set dst (AddI src1 src2));
9737 
9738   ins_cost(INSN_COST);
9739   format %{ "addw  $dst, $src1, $src2" %}
9740 
9741   ins_encode %{
9742     __ addw(as_Register($dst$$reg),
9743             as_Register($src1$$reg),
9744             as_Register($src2$$reg));
9745   %}
9746 
9747   ins_pipe(ialu_reg_reg);
9748 %}
9749 
9750 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9751   match(Set dst (AddI src1 src2));
9752 
9753   ins_cost(INSN_COST);
9754   format %{ "addw $dst, $src1, $src2" %}
9755 
9756   // use opcode to indicate that this is an add not a sub
9757   opcode(0x0);
9758 
9759   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9760 
9761   ins_pipe(ialu_reg_imm);
9762 %}
9763 
9764 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9765   match(Set dst (AddI (ConvL2I src1) src2));
9766 
9767   ins_cost(INSN_COST);
9768   format %{ "addw $dst, $src1, $src2" %}
9769 
9770   // use opcode to indicate that this is an add not a sub
9771   opcode(0x0);
9772 
9773   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9774 
9775   ins_pipe(ialu_reg_imm);
9776 %}
9777 
9778 // Pointer Addition
9779 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9780   match(Set dst (AddP src1 src2));
9781 
9782   ins_cost(INSN_COST);
9783   format %{ "add $dst, $src1, $src2\t# ptr" %}
9784 
9785   ins_encode %{
9786     __ add(as_Register($dst$$reg),
9787            as_Register($src1$$reg),
9788            as_Register($src2$$reg));
9789   %}
9790 
9791   ins_pipe(ialu_reg_reg);
9792 %}
9793 
9794 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9795   match(Set dst (AddP src1 (ConvI2L src2)));
9796 
9797   ins_cost(1.9 * INSN_COST);
9798   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9799 
9800   ins_encode %{
9801     __ add(as_Register($dst$$reg),
9802            as_Register($src1$$reg),
9803            as_Register($src2$$reg), ext::sxtw);
9804   %}
9805 
9806   ins_pipe(ialu_reg_reg);
9807 %}
9808 
9809 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9810   match(Set dst (AddP src1 (LShiftL src2 scale)));
9811 
9812   ins_cost(1.9 * INSN_COST);
9813   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9814 
9815   ins_encode %{
9816     __ lea(as_Register($dst$$reg),
9817            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9818                    Address::lsl($scale$$constant)));
9819   %}
9820 
9821   ins_pipe(ialu_reg_reg_shift);
9822 %}
9823 
9824 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9825   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9826 
9827   ins_cost(1.9 * INSN_COST);
9828   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9829 
9830   ins_encode %{
9831     __ lea(as_Register($dst$$reg),
9832            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9833                    Address::sxtw($scale$$constant)));
9834   %}
9835 
9836   ins_pipe(ialu_reg_reg_shift);
9837 %}
9838 
9839 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9840   match(Set dst (LShiftL (ConvI2L src) scale));
9841 
9842   ins_cost(INSN_COST);
9843   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9844 
9845   ins_encode %{
9846     __ sbfiz(as_Register($dst$$reg),
9847           as_Register($src$$reg),
9848           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9849   %}
9850 
9851   ins_pipe(ialu_reg_shift);
9852 %}
9853 
9854 // Pointer Immediate Addition
9855 // n.b. this needs to be more expensive than using an indirect memory
9856 // operand
9857 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9858   match(Set dst (AddP src1 src2));
9859 
9860   ins_cost(INSN_COST);
9861   format %{ "add $dst, $src1, $src2\t# ptr" %}
9862 
9863   // use opcode to indicate that this is an add not a sub
9864   opcode(0x0);
9865 
9866   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9867 
9868   ins_pipe(ialu_reg_imm);
9869 %}
9870 
9871 // Long Addition
9872 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9873 
9874   match(Set dst (AddL src1 src2));
9875 
9876   ins_cost(INSN_COST);
9877   format %{ "add  $dst, $src1, $src2" %}
9878 
9879   ins_encode %{
9880     __ add(as_Register($dst$$reg),
9881            as_Register($src1$$reg),
9882            as_Register($src2$$reg));
9883   %}
9884 
9885   ins_pipe(ialu_reg_reg);
9886 %}
9887 
9888 // No constant pool entries requiredLong Immediate Addition.
9889 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9890   match(Set dst (AddL src1 src2));
9891 
9892   ins_cost(INSN_COST);
9893   format %{ "add $dst, $src1, $src2" %}
9894 
9895   // use opcode to indicate that this is an add not a sub
9896   opcode(0x0);
9897 
9898   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9899 
9900   ins_pipe(ialu_reg_imm);
9901 %}
9902 
9903 // Integer Subtraction
9904 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9905   match(Set dst (SubI src1 src2));
9906 
9907   ins_cost(INSN_COST);
9908   format %{ "subw  $dst, $src1, $src2" %}
9909 
9910   ins_encode %{
9911     __ subw(as_Register($dst$$reg),
9912             as_Register($src1$$reg),
9913             as_Register($src2$$reg));
9914   %}
9915 
9916   ins_pipe(ialu_reg_reg);
9917 %}
9918 
9919 // Immediate Subtraction
9920 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9921   match(Set dst (SubI src1 src2));
9922 
9923   ins_cost(INSN_COST);
9924   format %{ "subw $dst, $src1, $src2" %}
9925 
9926   // use opcode to indicate that this is a sub not an add
9927   opcode(0x1);
9928 
9929   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9930 
9931   ins_pipe(ialu_reg_imm);
9932 %}
9933 
9934 // Long Subtraction
9935 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9936 
9937   match(Set dst (SubL src1 src2));
9938 
9939   ins_cost(INSN_COST);
9940   format %{ "sub  $dst, $src1, $src2" %}
9941 
9942   ins_encode %{
9943     __ sub(as_Register($dst$$reg),
9944            as_Register($src1$$reg),
9945            as_Register($src2$$reg));
9946   %}
9947 
9948   ins_pipe(ialu_reg_reg);
9949 %}
9950 
9951 // No constant pool entries requiredLong Immediate Subtraction.
9952 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9953   match(Set dst (SubL src1 src2));
9954 
9955   ins_cost(INSN_COST);
9956   format %{ "sub$dst, $src1, $src2" %}
9957 
9958   // use opcode to indicate that this is a sub not an add
9959   opcode(0x1);
9960 
9961   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9962 
9963   ins_pipe(ialu_reg_imm);
9964 %}
9965 
9966 // Integer Negation (special case for sub)
9967 
9968 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9969   match(Set dst (SubI zero src));
9970 
9971   ins_cost(INSN_COST);
9972   format %{ "negw $dst, $src\t# int" %}
9973 
9974   ins_encode %{
9975     __ negw(as_Register($dst$$reg),
9976             as_Register($src$$reg));
9977   %}
9978 
9979   ins_pipe(ialu_reg);
9980 %}
9981 
9982 // Long Negation
9983 
9984 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9985   match(Set dst (SubL zero src));
9986 
9987   ins_cost(INSN_COST);
9988   format %{ "neg $dst, $src\t# long" %}
9989 
9990   ins_encode %{
9991     __ neg(as_Register($dst$$reg),
9992            as_Register($src$$reg));
9993   %}
9994 
9995   ins_pipe(ialu_reg);
9996 %}
9997 
9998 // Integer Multiply
9999 
10000 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10001   match(Set dst (MulI src1 src2));
10002 
10003   ins_cost(INSN_COST * 3);
10004   format %{ "mulw  $dst, $src1, $src2" %}
10005 
10006   ins_encode %{
10007     __ mulw(as_Register($dst$$reg),
10008             as_Register($src1$$reg),
10009             as_Register($src2$$reg));
10010   %}
10011 
10012   ins_pipe(imul_reg_reg);
10013 %}
10014 
10015 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10016   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10017 
10018   ins_cost(INSN_COST * 3);
10019   format %{ "smull  $dst, $src1, $src2" %}
10020 
10021   ins_encode %{
10022     __ smull(as_Register($dst$$reg),
10023              as_Register($src1$$reg),
10024              as_Register($src2$$reg));
10025   %}
10026 
10027   ins_pipe(imul_reg_reg);
10028 %}
10029 
10030 // Long Multiply
10031 
10032 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10033   match(Set dst (MulL src1 src2));
10034 
10035   ins_cost(INSN_COST * 5);
10036   format %{ "mul  $dst, $src1, $src2" %}
10037 
10038   ins_encode %{
10039     __ mul(as_Register($dst$$reg),
10040            as_Register($src1$$reg),
10041            as_Register($src2$$reg));
10042   %}
10043 
10044   ins_pipe(lmul_reg_reg);
10045 %}
10046 
10047 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10048 %{
10049   match(Set dst (MulHiL src1 src2));
10050 
10051   ins_cost(INSN_COST * 7);
10052   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10053 
10054   ins_encode %{
10055     __ smulh(as_Register($dst$$reg),
10056              as_Register($src1$$reg),
10057              as_Register($src2$$reg));
10058   %}
10059 
10060   ins_pipe(lmul_reg_reg);
10061 %}
10062 
10063 // Combined Integer Multiply & Add/Sub
10064 
10065 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10066   match(Set dst (AddI src3 (MulI src1 src2)));
10067 
10068   ins_cost(INSN_COST * 3);
10069   format %{ "madd  $dst, $src1, $src2, $src3" %}
10070 
10071   ins_encode %{
10072     __ maddw(as_Register($dst$$reg),
10073              as_Register($src1$$reg),
10074              as_Register($src2$$reg),
10075              as_Register($src3$$reg));
10076   %}
10077 
10078   ins_pipe(imac_reg_reg);
10079 %}
10080 
10081 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10082   match(Set dst (SubI src3 (MulI src1 src2)));
10083 
10084   ins_cost(INSN_COST * 3);
10085   format %{ "msub  $dst, $src1, $src2, $src3" %}
10086 
10087   ins_encode %{
10088     __ msubw(as_Register($dst$$reg),
10089              as_Register($src1$$reg),
10090              as_Register($src2$$reg),
10091              as_Register($src3$$reg));
10092   %}
10093 
10094   ins_pipe(imac_reg_reg);
10095 %}
10096 
10097 // Combined Long Multiply & Add/Sub
10098 
10099 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10100   match(Set dst (AddL src3 (MulL src1 src2)));
10101 
10102   ins_cost(INSN_COST * 5);
10103   format %{ "madd  $dst, $src1, $src2, $src3" %}
10104 
10105   ins_encode %{
10106     __ madd(as_Register($dst$$reg),
10107             as_Register($src1$$reg),
10108             as_Register($src2$$reg),
10109             as_Register($src3$$reg));
10110   %}
10111 
10112   ins_pipe(lmac_reg_reg);
10113 %}
10114 
10115 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10116   match(Set dst (SubL src3 (MulL src1 src2)));
10117 
10118   ins_cost(INSN_COST * 5);
10119   format %{ "msub  $dst, $src1, $src2, $src3" %}
10120 
10121   ins_encode %{
10122     __ msub(as_Register($dst$$reg),
10123             as_Register($src1$$reg),
10124             as_Register($src2$$reg),
10125             as_Register($src3$$reg));
10126   %}
10127 
10128   ins_pipe(lmac_reg_reg);
10129 %}
10130 
10131 // Integer Divide
10132 
10133 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10134   match(Set dst (DivI src1 src2));
10135 
10136   ins_cost(INSN_COST * 19);
10137   format %{ "sdivw  $dst, $src1, $src2" %}
10138 
10139   ins_encode(aarch64_enc_divw(dst, src1, src2));
10140   ins_pipe(idiv_reg_reg);
10141 %}
10142 
10143 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10144   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10145   ins_cost(INSN_COST);
10146   format %{ "lsrw $dst, $src1, $div1" %}
10147   ins_encode %{
10148     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10149   %}
10150   ins_pipe(ialu_reg_shift);
10151 %}
10152 
10153 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10154   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10155   ins_cost(INSN_COST);
10156   format %{ "addw $dst, $src, LSR $div1" %}
10157 
10158   ins_encode %{
10159     __ addw(as_Register($dst$$reg),
10160               as_Register($src$$reg),
10161               as_Register($src$$reg),
10162               Assembler::LSR, 31);
10163   %}
10164   ins_pipe(ialu_reg);
10165 %}
10166 
10167 // Long Divide
10168 
10169 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10170   match(Set dst (DivL src1 src2));
10171 
10172   ins_cost(INSN_COST * 35);
10173   format %{ "sdiv   $dst, $src1, $src2" %}
10174 
10175   ins_encode(aarch64_enc_div(dst, src1, src2));
10176   ins_pipe(ldiv_reg_reg);
10177 %}
10178 
10179 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10180   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10181   ins_cost(INSN_COST);
10182   format %{ "lsr $dst, $src1, $div1" %}
10183   ins_encode %{
10184     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10185   %}
10186   ins_pipe(ialu_reg_shift);
10187 %}
10188 
10189 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10190   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10191   ins_cost(INSN_COST);
10192   format %{ "add $dst, $src, $div1" %}
10193 
10194   ins_encode %{
10195     __ add(as_Register($dst$$reg),
10196               as_Register($src$$reg),
10197               as_Register($src$$reg),
10198               Assembler::LSR, 63);
10199   %}
10200   ins_pipe(ialu_reg);
10201 %}
10202 
10203 // Integer Remainder
10204 
10205 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10206   match(Set dst (ModI src1 src2));
10207 
10208   ins_cost(INSN_COST * 22);
10209   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10210             "msubw($dst, rscratch1, $src2, $src1" %}
10211 
10212   ins_encode(aarch64_enc_modw(dst, src1, src2));
10213   ins_pipe(idiv_reg_reg);
10214 %}
10215 
10216 // Long Remainder
10217 
10218 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10219   match(Set dst (ModL src1 src2));
10220 
10221   ins_cost(INSN_COST * 38);
10222   format %{ "sdiv   rscratch1, $src1, $src2\n"
10223             "msub($dst, rscratch1, $src2, $src1" %}
10224 
10225   ins_encode(aarch64_enc_mod(dst, src1, src2));
10226   ins_pipe(ldiv_reg_reg);
10227 %}
10228 
10229 // Integer Shifts
10230 
10231 // Shift Left Register
10232 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10233   match(Set dst (LShiftI src1 src2));
10234 
10235   ins_cost(INSN_COST * 2);
10236   format %{ "lslvw  $dst, $src1, $src2" %}
10237 
10238   ins_encode %{
10239     __ lslvw(as_Register($dst$$reg),
10240              as_Register($src1$$reg),
10241              as_Register($src2$$reg));
10242   %}
10243 
10244   ins_pipe(ialu_reg_reg_vshift);
10245 %}
10246 
10247 // Shift Left Immediate
10248 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10249   match(Set dst (LShiftI src1 src2));
10250 
10251   ins_cost(INSN_COST);
10252   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10253 
10254   ins_encode %{
10255     __ lslw(as_Register($dst$$reg),
10256             as_Register($src1$$reg),
10257             $src2$$constant & 0x1f);
10258   %}
10259 
10260   ins_pipe(ialu_reg_shift);
10261 %}
10262 
10263 // Shift Right Logical Register
10264 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10265   match(Set dst (URShiftI src1 src2));
10266 
10267   ins_cost(INSN_COST * 2);
10268   format %{ "lsrvw  $dst, $src1, $src2" %}
10269 
10270   ins_encode %{
10271     __ lsrvw(as_Register($dst$$reg),
10272              as_Register($src1$$reg),
10273              as_Register($src2$$reg));
10274   %}
10275 
10276   ins_pipe(ialu_reg_reg_vshift);
10277 %}
10278 
10279 // Shift Right Logical Immediate
10280 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10281   match(Set dst (URShiftI src1 src2));
10282 
10283   ins_cost(INSN_COST);
10284   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10285 
10286   ins_encode %{
10287     __ lsrw(as_Register($dst$$reg),
10288             as_Register($src1$$reg),
10289             $src2$$constant & 0x1f);
10290   %}
10291 
10292   ins_pipe(ialu_reg_shift);
10293 %}
10294 
10295 // Shift Right Arithmetic Register
10296 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10297   match(Set dst (RShiftI src1 src2));
10298 
10299   ins_cost(INSN_COST * 2);
10300   format %{ "asrvw  $dst, $src1, $src2" %}
10301 
10302   ins_encode %{
10303     __ asrvw(as_Register($dst$$reg),
10304              as_Register($src1$$reg),
10305              as_Register($src2$$reg));
10306   %}
10307 
10308   ins_pipe(ialu_reg_reg_vshift);
10309 %}
10310 
10311 // Shift Right Arithmetic Immediate
10312 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10313   match(Set dst (RShiftI src1 src2));
10314 
10315   ins_cost(INSN_COST);
10316   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10317 
10318   ins_encode %{
10319     __ asrw(as_Register($dst$$reg),
10320             as_Register($src1$$reg),
10321             $src2$$constant & 0x1f);
10322   %}
10323 
10324   ins_pipe(ialu_reg_shift);
10325 %}
10326 
10327 // Combined Int Mask and Right Shift (using UBFM)
10328 // TODO
10329 
10330 // Long Shifts
10331 
10332 // Shift Left Register
10333 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10334   match(Set dst (LShiftL src1 src2));
10335 
10336   ins_cost(INSN_COST * 2);
10337   format %{ "lslv  $dst, $src1, $src2" %}
10338 
10339   ins_encode %{
10340     __ lslv(as_Register($dst$$reg),
10341             as_Register($src1$$reg),
10342             as_Register($src2$$reg));
10343   %}
10344 
10345   ins_pipe(ialu_reg_reg_vshift);
10346 %}
10347 
10348 // Shift Left Immediate
10349 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10350   match(Set dst (LShiftL src1 src2));
10351 
10352   ins_cost(INSN_COST);
10353   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10354 
10355   ins_encode %{
10356     __ lsl(as_Register($dst$$reg),
10357             as_Register($src1$$reg),
10358             $src2$$constant & 0x3f);
10359   %}
10360 
10361   ins_pipe(ialu_reg_shift);
10362 %}
10363 
10364 // Shift Right Logical Register
10365 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10366   match(Set dst (URShiftL src1 src2));
10367 
10368   ins_cost(INSN_COST * 2);
10369   format %{ "lsrv  $dst, $src1, $src2" %}
10370 
10371   ins_encode %{
10372     __ lsrv(as_Register($dst$$reg),
10373             as_Register($src1$$reg),
10374             as_Register($src2$$reg));
10375   %}
10376 
10377   ins_pipe(ialu_reg_reg_vshift);
10378 %}
10379 
10380 // Shift Right Logical Immediate
10381 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10382   match(Set dst (URShiftL src1 src2));
10383 
10384   ins_cost(INSN_COST);
10385   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10386 
10387   ins_encode %{
10388     __ lsr(as_Register($dst$$reg),
10389            as_Register($src1$$reg),
10390            $src2$$constant & 0x3f);
10391   %}
10392 
10393   ins_pipe(ialu_reg_shift);
10394 %}
10395 
10396 // A special-case pattern for card table stores.
10397 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10398   match(Set dst (URShiftL (CastP2X src1) src2));
10399 
10400   ins_cost(INSN_COST);
10401   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10402 
10403   ins_encode %{
10404     __ lsr(as_Register($dst$$reg),
10405            as_Register($src1$$reg),
10406            $src2$$constant & 0x3f);
10407   %}
10408 
10409   ins_pipe(ialu_reg_shift);
10410 %}
10411 
10412 // Shift Right Arithmetic Register
10413 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10414   match(Set dst (RShiftL src1 src2));
10415 
10416   ins_cost(INSN_COST * 2);
10417   format %{ "asrv  $dst, $src1, $src2" %}
10418 
10419   ins_encode %{
10420     __ asrv(as_Register($dst$$reg),
10421             as_Register($src1$$reg),
10422             as_Register($src2$$reg));
10423   %}
10424 
10425   ins_pipe(ialu_reg_reg_vshift);
10426 %}
10427 
10428 // Shift Right Arithmetic Immediate
10429 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10430   match(Set dst (RShiftL src1 src2));
10431 
10432   ins_cost(INSN_COST);
10433   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10434 
10435   ins_encode %{
10436     __ asr(as_Register($dst$$reg),
10437            as_Register($src1$$reg),
10438            $src2$$constant & 0x3f);
10439   %}
10440 
10441   ins_pipe(ialu_reg_shift);
10442 %}
10443 
10444 // BEGIN This section of the file is automatically generated. Do not edit --------------
10445 
10446 instruct regL_not_reg(iRegLNoSp dst,
10447                          iRegL src1, immL_M1 m1,
10448                          rFlagsReg cr) %{
10449   match(Set dst (XorL src1 m1));
10450   ins_cost(INSN_COST);
10451   format %{ "eon  $dst, $src1, zr" %}
10452 
10453   ins_encode %{
10454     __ eon(as_Register($dst$$reg),
10455               as_Register($src1$$reg),
10456               zr,
10457               Assembler::LSL, 0);
10458   %}
10459 
10460   ins_pipe(ialu_reg);
10461 %}
10462 instruct regI_not_reg(iRegINoSp dst,
10463                          iRegIorL2I src1, immI_M1 m1,
10464                          rFlagsReg cr) %{
10465   match(Set dst (XorI src1 m1));
10466   ins_cost(INSN_COST);
10467   format %{ "eonw  $dst, $src1, zr" %}
10468 
10469   ins_encode %{
10470     __ eonw(as_Register($dst$$reg),
10471               as_Register($src1$$reg),
10472               zr,
10473               Assembler::LSL, 0);
10474   %}
10475 
10476   ins_pipe(ialu_reg);
10477 %}
10478 
10479 instruct AndI_reg_not_reg(iRegINoSp dst,
10480                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10481                          rFlagsReg cr) %{
10482   match(Set dst (AndI src1 (XorI src2 m1)));
10483   ins_cost(INSN_COST);
10484   format %{ "bicw  $dst, $src1, $src2" %}
10485 
10486   ins_encode %{
10487     __ bicw(as_Register($dst$$reg),
10488               as_Register($src1$$reg),
10489               as_Register($src2$$reg),
10490               Assembler::LSL, 0);
10491   %}
10492 
10493   ins_pipe(ialu_reg_reg);
10494 %}
10495 
10496 instruct AndL_reg_not_reg(iRegLNoSp dst,
10497                          iRegL src1, iRegL src2, immL_M1 m1,
10498                          rFlagsReg cr) %{
10499   match(Set dst (AndL src1 (XorL src2 m1)));
10500   ins_cost(INSN_COST);
10501   format %{ "bic  $dst, $src1, $src2" %}
10502 
10503   ins_encode %{
10504     __ bic(as_Register($dst$$reg),
10505               as_Register($src1$$reg),
10506               as_Register($src2$$reg),
10507               Assembler::LSL, 0);
10508   %}
10509 
10510   ins_pipe(ialu_reg_reg);
10511 %}
10512 
10513 instruct OrI_reg_not_reg(iRegINoSp dst,
10514                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10515                          rFlagsReg cr) %{
10516   match(Set dst (OrI src1 (XorI src2 m1)));
10517   ins_cost(INSN_COST);
10518   format %{ "ornw  $dst, $src1, $src2" %}
10519 
10520   ins_encode %{
10521     __ ornw(as_Register($dst$$reg),
10522               as_Register($src1$$reg),
10523               as_Register($src2$$reg),
10524               Assembler::LSL, 0);
10525   %}
10526 
10527   ins_pipe(ialu_reg_reg);
10528 %}
10529 
10530 instruct OrL_reg_not_reg(iRegLNoSp dst,
10531                          iRegL src1, iRegL src2, immL_M1 m1,
10532                          rFlagsReg cr) %{
10533   match(Set dst (OrL src1 (XorL src2 m1)));
10534   ins_cost(INSN_COST);
10535   format %{ "orn  $dst, $src1, $src2" %}
10536 
10537   ins_encode %{
10538     __ orn(as_Register($dst$$reg),
10539               as_Register($src1$$reg),
10540               as_Register($src2$$reg),
10541               Assembler::LSL, 0);
10542   %}
10543 
10544   ins_pipe(ialu_reg_reg);
10545 %}
10546 
10547 instruct XorI_reg_not_reg(iRegINoSp dst,
10548                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10549                          rFlagsReg cr) %{
10550   match(Set dst (XorI m1 (XorI src2 src1)));
10551   ins_cost(INSN_COST);
10552   format %{ "eonw  $dst, $src1, $src2" %}
10553 
10554   ins_encode %{
10555     __ eonw(as_Register($dst$$reg),
10556               as_Register($src1$$reg),
10557               as_Register($src2$$reg),
10558               Assembler::LSL, 0);
10559   %}
10560 
10561   ins_pipe(ialu_reg_reg);
10562 %}
10563 
10564 instruct XorL_reg_not_reg(iRegLNoSp dst,
10565                          iRegL src1, iRegL src2, immL_M1 m1,
10566                          rFlagsReg cr) %{
10567   match(Set dst (XorL m1 (XorL src2 src1)));
10568   ins_cost(INSN_COST);
10569   format %{ "eon  $dst, $src1, $src2" %}
10570 
10571   ins_encode %{
10572     __ eon(as_Register($dst$$reg),
10573               as_Register($src1$$reg),
10574               as_Register($src2$$reg),
10575               Assembler::LSL, 0);
10576   %}
10577 
10578   ins_pipe(ialu_reg_reg);
10579 %}
10580 
10581 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10582                          iRegIorL2I src1, iRegIorL2I src2,
10583                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10584   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10585   ins_cost(1.9 * INSN_COST);
10586   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10587 
10588   ins_encode %{
10589     __ bicw(as_Register($dst$$reg),
10590               as_Register($src1$$reg),
10591               as_Register($src2$$reg),
10592               Assembler::LSR,
10593               $src3$$constant & 0x3f);
10594   %}
10595 
10596   ins_pipe(ialu_reg_reg_shift);
10597 %}
10598 
10599 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10600                          iRegL src1, iRegL src2,
10601                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10602   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10603   ins_cost(1.9 * INSN_COST);
10604   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10605 
10606   ins_encode %{
10607     __ bic(as_Register($dst$$reg),
10608               as_Register($src1$$reg),
10609               as_Register($src2$$reg),
10610               Assembler::LSR,
10611               $src3$$constant & 0x3f);
10612   %}
10613 
10614   ins_pipe(ialu_reg_reg_shift);
10615 %}
10616 
10617 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10618                          iRegIorL2I src1, iRegIorL2I src2,
10619                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10620   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10621   ins_cost(1.9 * INSN_COST);
10622   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10623 
10624   ins_encode %{
10625     __ bicw(as_Register($dst$$reg),
10626               as_Register($src1$$reg),
10627               as_Register($src2$$reg),
10628               Assembler::ASR,
10629               $src3$$constant & 0x3f);
10630   %}
10631 
10632   ins_pipe(ialu_reg_reg_shift);
10633 %}
10634 
10635 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10636                          iRegL src1, iRegL src2,
10637                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10638   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10639   ins_cost(1.9 * INSN_COST);
10640   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10641 
10642   ins_encode %{
10643     __ bic(as_Register($dst$$reg),
10644               as_Register($src1$$reg),
10645               as_Register($src2$$reg),
10646               Assembler::ASR,
10647               $src3$$constant & 0x3f);
10648   %}
10649 
10650   ins_pipe(ialu_reg_reg_shift);
10651 %}
10652 
10653 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10654                          iRegIorL2I src1, iRegIorL2I src2,
10655                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10656   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10657   ins_cost(1.9 * INSN_COST);
10658   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10659 
10660   ins_encode %{
10661     __ bicw(as_Register($dst$$reg),
10662               as_Register($src1$$reg),
10663               as_Register($src2$$reg),
10664               Assembler::LSL,
10665               $src3$$constant & 0x3f);
10666   %}
10667 
10668   ins_pipe(ialu_reg_reg_shift);
10669 %}
10670 
10671 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10672                          iRegL src1, iRegL src2,
10673                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10674   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10675   ins_cost(1.9 * INSN_COST);
10676   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10677 
10678   ins_encode %{
10679     __ bic(as_Register($dst$$reg),
10680               as_Register($src1$$reg),
10681               as_Register($src2$$reg),
10682               Assembler::LSL,
10683               $src3$$constant & 0x3f);
10684   %}
10685 
10686   ins_pipe(ialu_reg_reg_shift);
10687 %}
10688 
10689 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10690                          iRegIorL2I src1, iRegIorL2I src2,
10691                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10692   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10693   ins_cost(1.9 * INSN_COST);
10694   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10695 
10696   ins_encode %{
10697     __ eonw(as_Register($dst$$reg),
10698               as_Register($src1$$reg),
10699               as_Register($src2$$reg),
10700               Assembler::LSR,
10701               $src3$$constant & 0x3f);
10702   %}
10703 
10704   ins_pipe(ialu_reg_reg_shift);
10705 %}
10706 
10707 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10708                          iRegL src1, iRegL src2,
10709                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10710   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10711   ins_cost(1.9 * INSN_COST);
10712   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10713 
10714   ins_encode %{
10715     __ eon(as_Register($dst$$reg),
10716               as_Register($src1$$reg),
10717               as_Register($src2$$reg),
10718               Assembler::LSR,
10719               $src3$$constant & 0x3f);
10720   %}
10721 
10722   ins_pipe(ialu_reg_reg_shift);
10723 %}
10724 
10725 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10726                          iRegIorL2I src1, iRegIorL2I src2,
10727                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10728   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10729   ins_cost(1.9 * INSN_COST);
10730   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10731 
10732   ins_encode %{
10733     __ eonw(as_Register($dst$$reg),
10734               as_Register($src1$$reg),
10735               as_Register($src2$$reg),
10736               Assembler::ASR,
10737               $src3$$constant & 0x3f);
10738   %}
10739 
10740   ins_pipe(ialu_reg_reg_shift);
10741 %}
10742 
10743 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10744                          iRegL src1, iRegL src2,
10745                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10746   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10747   ins_cost(1.9 * INSN_COST);
10748   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10749 
10750   ins_encode %{
10751     __ eon(as_Register($dst$$reg),
10752               as_Register($src1$$reg),
10753               as_Register($src2$$reg),
10754               Assembler::ASR,
10755               $src3$$constant & 0x3f);
10756   %}
10757 
10758   ins_pipe(ialu_reg_reg_shift);
10759 %}
10760 
10761 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10762                          iRegIorL2I src1, iRegIorL2I src2,
10763                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10764   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10765   ins_cost(1.9 * INSN_COST);
10766   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10767 
10768   ins_encode %{
10769     __ eonw(as_Register($dst$$reg),
10770               as_Register($src1$$reg),
10771               as_Register($src2$$reg),
10772               Assembler::LSL,
10773               $src3$$constant & 0x3f);
10774   %}
10775 
10776   ins_pipe(ialu_reg_reg_shift);
10777 %}
10778 
10779 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10780                          iRegL src1, iRegL src2,
10781                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10782   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10783   ins_cost(1.9 * INSN_COST);
10784   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10785 
10786   ins_encode %{
10787     __ eon(as_Register($dst$$reg),
10788               as_Register($src1$$reg),
10789               as_Register($src2$$reg),
10790               Assembler::LSL,
10791               $src3$$constant & 0x3f);
10792   %}
10793 
10794   ins_pipe(ialu_reg_reg_shift);
10795 %}
10796 
10797 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10798                          iRegIorL2I src1, iRegIorL2I src2,
10799                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10800   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10801   ins_cost(1.9 * INSN_COST);
10802   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10803 
10804   ins_encode %{
10805     __ ornw(as_Register($dst$$reg),
10806               as_Register($src1$$reg),
10807               as_Register($src2$$reg),
10808               Assembler::LSR,
10809               $src3$$constant & 0x3f);
10810   %}
10811 
10812   ins_pipe(ialu_reg_reg_shift);
10813 %}
10814 
10815 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10816                          iRegL src1, iRegL src2,
10817                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10818   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10819   ins_cost(1.9 * INSN_COST);
10820   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10821 
10822   ins_encode %{
10823     __ orn(as_Register($dst$$reg),
10824               as_Register($src1$$reg),
10825               as_Register($src2$$reg),
10826               Assembler::LSR,
10827               $src3$$constant & 0x3f);
10828   %}
10829 
10830   ins_pipe(ialu_reg_reg_shift);
10831 %}
10832 
10833 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10834                          iRegIorL2I src1, iRegIorL2I src2,
10835                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10836   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10837   ins_cost(1.9 * INSN_COST);
10838   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10839 
10840   ins_encode %{
10841     __ ornw(as_Register($dst$$reg),
10842               as_Register($src1$$reg),
10843               as_Register($src2$$reg),
10844               Assembler::ASR,
10845               $src3$$constant & 0x3f);
10846   %}
10847 
10848   ins_pipe(ialu_reg_reg_shift);
10849 %}
10850 
10851 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10852                          iRegL src1, iRegL src2,
10853                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10854   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10855   ins_cost(1.9 * INSN_COST);
10856   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10857 
10858   ins_encode %{
10859     __ orn(as_Register($dst$$reg),
10860               as_Register($src1$$reg),
10861               as_Register($src2$$reg),
10862               Assembler::ASR,
10863               $src3$$constant & 0x3f);
10864   %}
10865 
10866   ins_pipe(ialu_reg_reg_shift);
10867 %}
10868 
10869 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10870                          iRegIorL2I src1, iRegIorL2I src2,
10871                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10872   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10873   ins_cost(1.9 * INSN_COST);
10874   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10875 
10876   ins_encode %{
10877     __ ornw(as_Register($dst$$reg),
10878               as_Register($src1$$reg),
10879               as_Register($src2$$reg),
10880               Assembler::LSL,
10881               $src3$$constant & 0x3f);
10882   %}
10883 
10884   ins_pipe(ialu_reg_reg_shift);
10885 %}
10886 
10887 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10888                          iRegL src1, iRegL src2,
10889                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10890   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10891   ins_cost(1.9 * INSN_COST);
10892   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10893 
10894   ins_encode %{
10895     __ orn(as_Register($dst$$reg),
10896               as_Register($src1$$reg),
10897               as_Register($src2$$reg),
10898               Assembler::LSL,
10899               $src3$$constant & 0x3f);
10900   %}
10901 
10902   ins_pipe(ialu_reg_reg_shift);
10903 %}
10904 
10905 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10906                          iRegIorL2I src1, iRegIorL2I src2,
10907                          immI src3, rFlagsReg cr) %{
10908   match(Set dst (AndI src1 (URShiftI src2 src3)));
10909 
10910   ins_cost(1.9 * INSN_COST);
10911   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10912 
10913   ins_encode %{
10914     __ andw(as_Register($dst$$reg),
10915               as_Register($src1$$reg),
10916               as_Register($src2$$reg),
10917               Assembler::LSR,
10918               $src3$$constant & 0x3f);
10919   %}
10920 
10921   ins_pipe(ialu_reg_reg_shift);
10922 %}
10923 
10924 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10925                          iRegL src1, iRegL src2,
10926                          immI src3, rFlagsReg cr) %{
10927   match(Set dst (AndL src1 (URShiftL src2 src3)));
10928 
10929   ins_cost(1.9 * INSN_COST);
10930   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10931 
10932   ins_encode %{
10933     __ andr(as_Register($dst$$reg),
10934               as_Register($src1$$reg),
10935               as_Register($src2$$reg),
10936               Assembler::LSR,
10937               $src3$$constant & 0x3f);
10938   %}
10939 
10940   ins_pipe(ialu_reg_reg_shift);
10941 %}
10942 
10943 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10944                          iRegIorL2I src1, iRegIorL2I src2,
10945                          immI src3, rFlagsReg cr) %{
10946   match(Set dst (AndI src1 (RShiftI src2 src3)));
10947 
10948   ins_cost(1.9 * INSN_COST);
10949   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10950 
10951   ins_encode %{
10952     __ andw(as_Register($dst$$reg),
10953               as_Register($src1$$reg),
10954               as_Register($src2$$reg),
10955               Assembler::ASR,
10956               $src3$$constant & 0x3f);
10957   %}
10958 
10959   ins_pipe(ialu_reg_reg_shift);
10960 %}
10961 
10962 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10963                          iRegL src1, iRegL src2,
10964                          immI src3, rFlagsReg cr) %{
10965   match(Set dst (AndL src1 (RShiftL src2 src3)));
10966 
10967   ins_cost(1.9 * INSN_COST);
10968   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10969 
10970   ins_encode %{
10971     __ andr(as_Register($dst$$reg),
10972               as_Register($src1$$reg),
10973               as_Register($src2$$reg),
10974               Assembler::ASR,
10975               $src3$$constant & 0x3f);
10976   %}
10977 
10978   ins_pipe(ialu_reg_reg_shift);
10979 %}
10980 
10981 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10982                          iRegIorL2I src1, iRegIorL2I src2,
10983                          immI src3, rFlagsReg cr) %{
10984   match(Set dst (AndI src1 (LShiftI src2 src3)));
10985 
10986   ins_cost(1.9 * INSN_COST);
10987   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10988 
10989   ins_encode %{
10990     __ andw(as_Register($dst$$reg),
10991               as_Register($src1$$reg),
10992               as_Register($src2$$reg),
10993               Assembler::LSL,
10994               $src3$$constant & 0x3f);
10995   %}
10996 
10997   ins_pipe(ialu_reg_reg_shift);
10998 %}
10999 
11000 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11001                          iRegL src1, iRegL src2,
11002                          immI src3, rFlagsReg cr) %{
11003   match(Set dst (AndL src1 (LShiftL src2 src3)));
11004 
11005   ins_cost(1.9 * INSN_COST);
11006   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11007 
11008   ins_encode %{
11009     __ andr(as_Register($dst$$reg),
11010               as_Register($src1$$reg),
11011               as_Register($src2$$reg),
11012               Assembler::LSL,
11013               $src3$$constant & 0x3f);
11014   %}
11015 
11016   ins_pipe(ialu_reg_reg_shift);
11017 %}
11018 
11019 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11020                          iRegIorL2I src1, iRegIorL2I src2,
11021                          immI src3, rFlagsReg cr) %{
11022   match(Set dst (XorI src1 (URShiftI src2 src3)));
11023 
11024   ins_cost(1.9 * INSN_COST);
11025   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11026 
11027   ins_encode %{
11028     __ eorw(as_Register($dst$$reg),
11029               as_Register($src1$$reg),
11030               as_Register($src2$$reg),
11031               Assembler::LSR,
11032               $src3$$constant & 0x3f);
11033   %}
11034 
11035   ins_pipe(ialu_reg_reg_shift);
11036 %}
11037 
11038 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11039                          iRegL src1, iRegL src2,
11040                          immI src3, rFlagsReg cr) %{
11041   match(Set dst (XorL src1 (URShiftL src2 src3)));
11042 
11043   ins_cost(1.9 * INSN_COST);
11044   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11045 
11046   ins_encode %{
11047     __ eor(as_Register($dst$$reg),
11048               as_Register($src1$$reg),
11049               as_Register($src2$$reg),
11050               Assembler::LSR,
11051               $src3$$constant & 0x3f);
11052   %}
11053 
11054   ins_pipe(ialu_reg_reg_shift);
11055 %}
11056 
11057 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11058                          iRegIorL2I src1, iRegIorL2I src2,
11059                          immI src3, rFlagsReg cr) %{
11060   match(Set dst (XorI src1 (RShiftI src2 src3)));
11061 
11062   ins_cost(1.9 * INSN_COST);
11063   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11064 
11065   ins_encode %{
11066     __ eorw(as_Register($dst$$reg),
11067               as_Register($src1$$reg),
11068               as_Register($src2$$reg),
11069               Assembler::ASR,
11070               $src3$$constant & 0x3f);
11071   %}
11072 
11073   ins_pipe(ialu_reg_reg_shift);
11074 %}
11075 
11076 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11077                          iRegL src1, iRegL src2,
11078                          immI src3, rFlagsReg cr) %{
11079   match(Set dst (XorL src1 (RShiftL src2 src3)));
11080 
11081   ins_cost(1.9 * INSN_COST);
11082   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11083 
11084   ins_encode %{
11085     __ eor(as_Register($dst$$reg),
11086               as_Register($src1$$reg),
11087               as_Register($src2$$reg),
11088               Assembler::ASR,
11089               $src3$$constant & 0x3f);
11090   %}
11091 
11092   ins_pipe(ialu_reg_reg_shift);
11093 %}
11094 
11095 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11096                          iRegIorL2I src1, iRegIorL2I src2,
11097                          immI src3, rFlagsReg cr) %{
11098   match(Set dst (XorI src1 (LShiftI src2 src3)));
11099 
11100   ins_cost(1.9 * INSN_COST);
11101   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11102 
11103   ins_encode %{
11104     __ eorw(as_Register($dst$$reg),
11105               as_Register($src1$$reg),
11106               as_Register($src2$$reg),
11107               Assembler::LSL,
11108               $src3$$constant & 0x3f);
11109   %}
11110 
11111   ins_pipe(ialu_reg_reg_shift);
11112 %}
11113 
11114 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11115                          iRegL src1, iRegL src2,
11116                          immI src3, rFlagsReg cr) %{
11117   match(Set dst (XorL src1 (LShiftL src2 src3)));
11118 
11119   ins_cost(1.9 * INSN_COST);
11120   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11121 
11122   ins_encode %{
11123     __ eor(as_Register($dst$$reg),
11124               as_Register($src1$$reg),
11125               as_Register($src2$$reg),
11126               Assembler::LSL,
11127               $src3$$constant & 0x3f);
11128   %}
11129 
11130   ins_pipe(ialu_reg_reg_shift);
11131 %}
11132 
11133 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11134                          iRegIorL2I src1, iRegIorL2I src2,
11135                          immI src3, rFlagsReg cr) %{
11136   match(Set dst (OrI src1 (URShiftI src2 src3)));
11137 
11138   ins_cost(1.9 * INSN_COST);
11139   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11140 
11141   ins_encode %{
11142     __ orrw(as_Register($dst$$reg),
11143               as_Register($src1$$reg),
11144               as_Register($src2$$reg),
11145               Assembler::LSR,
11146               $src3$$constant & 0x3f);
11147   %}
11148 
11149   ins_pipe(ialu_reg_reg_shift);
11150 %}
11151 
11152 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11153                          iRegL src1, iRegL src2,
11154                          immI src3, rFlagsReg cr) %{
11155   match(Set dst (OrL src1 (URShiftL src2 src3)));
11156 
11157   ins_cost(1.9 * INSN_COST);
11158   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11159 
11160   ins_encode %{
11161     __ orr(as_Register($dst$$reg),
11162               as_Register($src1$$reg),
11163               as_Register($src2$$reg),
11164               Assembler::LSR,
11165               $src3$$constant & 0x3f);
11166   %}
11167 
11168   ins_pipe(ialu_reg_reg_shift);
11169 %}
11170 
11171 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11172                          iRegIorL2I src1, iRegIorL2I src2,
11173                          immI src3, rFlagsReg cr) %{
11174   match(Set dst (OrI src1 (RShiftI src2 src3)));
11175 
11176   ins_cost(1.9 * INSN_COST);
11177   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11178 
11179   ins_encode %{
11180     __ orrw(as_Register($dst$$reg),
11181               as_Register($src1$$reg),
11182               as_Register($src2$$reg),
11183               Assembler::ASR,
11184               $src3$$constant & 0x3f);
11185   %}
11186 
11187   ins_pipe(ialu_reg_reg_shift);
11188 %}
11189 
11190 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11191                          iRegL src1, iRegL src2,
11192                          immI src3, rFlagsReg cr) %{
11193   match(Set dst (OrL src1 (RShiftL src2 src3)));
11194 
11195   ins_cost(1.9 * INSN_COST);
11196   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11197 
11198   ins_encode %{
11199     __ orr(as_Register($dst$$reg),
11200               as_Register($src1$$reg),
11201               as_Register($src2$$reg),
11202               Assembler::ASR,
11203               $src3$$constant & 0x3f);
11204   %}
11205 
11206   ins_pipe(ialu_reg_reg_shift);
11207 %}
11208 
11209 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11210                          iRegIorL2I src1, iRegIorL2I src2,
11211                          immI src3, rFlagsReg cr) %{
11212   match(Set dst (OrI src1 (LShiftI src2 src3)));
11213 
11214   ins_cost(1.9 * INSN_COST);
11215   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11216 
11217   ins_encode %{
11218     __ orrw(as_Register($dst$$reg),
11219               as_Register($src1$$reg),
11220               as_Register($src2$$reg),
11221               Assembler::LSL,
11222               $src3$$constant & 0x3f);
11223   %}
11224 
11225   ins_pipe(ialu_reg_reg_shift);
11226 %}
11227 
11228 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11229                          iRegL src1, iRegL src2,
11230                          immI src3, rFlagsReg cr) %{
11231   match(Set dst (OrL src1 (LShiftL src2 src3)));
11232 
11233   ins_cost(1.9 * INSN_COST);
11234   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11235 
11236   ins_encode %{
11237     __ orr(as_Register($dst$$reg),
11238               as_Register($src1$$reg),
11239               as_Register($src2$$reg),
11240               Assembler::LSL,
11241               $src3$$constant & 0x3f);
11242   %}
11243 
11244   ins_pipe(ialu_reg_reg_shift);
11245 %}
11246 
11247 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11248                          iRegIorL2I src1, iRegIorL2I src2,
11249                          immI src3, rFlagsReg cr) %{
11250   match(Set dst (AddI src1 (URShiftI src2 src3)));
11251 
11252   ins_cost(1.9 * INSN_COST);
11253   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11254 
11255   ins_encode %{
11256     __ addw(as_Register($dst$$reg),
11257               as_Register($src1$$reg),
11258               as_Register($src2$$reg),
11259               Assembler::LSR,
11260               $src3$$constant & 0x3f);
11261   %}
11262 
11263   ins_pipe(ialu_reg_reg_shift);
11264 %}
11265 
11266 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11267                          iRegL src1, iRegL src2,
11268                          immI src3, rFlagsReg cr) %{
11269   match(Set dst (AddL src1 (URShiftL src2 src3)));
11270 
11271   ins_cost(1.9 * INSN_COST);
11272   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11273 
11274   ins_encode %{
11275     __ add(as_Register($dst$$reg),
11276               as_Register($src1$$reg),
11277               as_Register($src2$$reg),
11278               Assembler::LSR,
11279               $src3$$constant & 0x3f);
11280   %}
11281 
11282   ins_pipe(ialu_reg_reg_shift);
11283 %}
11284 
11285 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11286                          iRegIorL2I src1, iRegIorL2I src2,
11287                          immI src3, rFlagsReg cr) %{
11288   match(Set dst (AddI src1 (RShiftI src2 src3)));
11289 
11290   ins_cost(1.9 * INSN_COST);
11291   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11292 
11293   ins_encode %{
11294     __ addw(as_Register($dst$$reg),
11295               as_Register($src1$$reg),
11296               as_Register($src2$$reg),
11297               Assembler::ASR,
11298               $src3$$constant & 0x3f);
11299   %}
11300 
11301   ins_pipe(ialu_reg_reg_shift);
11302 %}
11303 
11304 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11305                          iRegL src1, iRegL src2,
11306                          immI src3, rFlagsReg cr) %{
11307   match(Set dst (AddL src1 (RShiftL src2 src3)));
11308 
11309   ins_cost(1.9 * INSN_COST);
11310   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11311 
11312   ins_encode %{
11313     __ add(as_Register($dst$$reg),
11314               as_Register($src1$$reg),
11315               as_Register($src2$$reg),
11316               Assembler::ASR,
11317               $src3$$constant & 0x3f);
11318   %}
11319 
11320   ins_pipe(ialu_reg_reg_shift);
11321 %}
11322 
11323 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11324                          iRegIorL2I src1, iRegIorL2I src2,
11325                          immI src3, rFlagsReg cr) %{
11326   match(Set dst (AddI src1 (LShiftI src2 src3)));
11327 
11328   ins_cost(1.9 * INSN_COST);
11329   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11330 
11331   ins_encode %{
11332     __ addw(as_Register($dst$$reg),
11333               as_Register($src1$$reg),
11334               as_Register($src2$$reg),
11335               Assembler::LSL,
11336               $src3$$constant & 0x3f);
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg_shift);
11340 %}
11341 
11342 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11343                          iRegL src1, iRegL src2,
11344                          immI src3, rFlagsReg cr) %{
11345   match(Set dst (AddL src1 (LShiftL src2 src3)));
11346 
11347   ins_cost(1.9 * INSN_COST);
11348   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11349 
11350   ins_encode %{
11351     __ add(as_Register($dst$$reg),
11352               as_Register($src1$$reg),
11353               as_Register($src2$$reg),
11354               Assembler::LSL,
11355               $src3$$constant & 0x3f);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg_shift);
11359 %}
11360 
11361 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11362                          iRegIorL2I src1, iRegIorL2I src2,
11363                          immI src3, rFlagsReg cr) %{
11364   match(Set dst (SubI src1 (URShiftI src2 src3)));
11365 
11366   ins_cost(1.9 * INSN_COST);
11367   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11368 
11369   ins_encode %{
11370     __ subw(as_Register($dst$$reg),
11371               as_Register($src1$$reg),
11372               as_Register($src2$$reg),
11373               Assembler::LSR,
11374               $src3$$constant & 0x3f);
11375   %}
11376 
11377   ins_pipe(ialu_reg_reg_shift);
11378 %}
11379 
11380 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11381                          iRegL src1, iRegL src2,
11382                          immI src3, rFlagsReg cr) %{
11383   match(Set dst (SubL src1 (URShiftL src2 src3)));
11384 
11385   ins_cost(1.9 * INSN_COST);
11386   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11387 
11388   ins_encode %{
11389     __ sub(as_Register($dst$$reg),
11390               as_Register($src1$$reg),
11391               as_Register($src2$$reg),
11392               Assembler::LSR,
11393               $src3$$constant & 0x3f);
11394   %}
11395 
11396   ins_pipe(ialu_reg_reg_shift);
11397 %}
11398 
11399 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11400                          iRegIorL2I src1, iRegIorL2I src2,
11401                          immI src3, rFlagsReg cr) %{
11402   match(Set dst (SubI src1 (RShiftI src2 src3)));
11403 
11404   ins_cost(1.9 * INSN_COST);
11405   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11406 
11407   ins_encode %{
11408     __ subw(as_Register($dst$$reg),
11409               as_Register($src1$$reg),
11410               as_Register($src2$$reg),
11411               Assembler::ASR,
11412               $src3$$constant & 0x3f);
11413   %}
11414 
11415   ins_pipe(ialu_reg_reg_shift);
11416 %}
11417 
11418 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11419                          iRegL src1, iRegL src2,
11420                          immI src3, rFlagsReg cr) %{
11421   match(Set dst (SubL src1 (RShiftL src2 src3)));
11422 
11423   ins_cost(1.9 * INSN_COST);
11424   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11425 
11426   ins_encode %{
11427     __ sub(as_Register($dst$$reg),
11428               as_Register($src1$$reg),
11429               as_Register($src2$$reg),
11430               Assembler::ASR,
11431               $src3$$constant & 0x3f);
11432   %}
11433 
11434   ins_pipe(ialu_reg_reg_shift);
11435 %}
11436 
11437 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11438                          iRegIorL2I src1, iRegIorL2I src2,
11439                          immI src3, rFlagsReg cr) %{
11440   match(Set dst (SubI src1 (LShiftI src2 src3)));
11441 
11442   ins_cost(1.9 * INSN_COST);
11443   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11444 
11445   ins_encode %{
11446     __ subw(as_Register($dst$$reg),
11447               as_Register($src1$$reg),
11448               as_Register($src2$$reg),
11449               Assembler::LSL,
11450               $src3$$constant & 0x3f);
11451   %}
11452 
11453   ins_pipe(ialu_reg_reg_shift);
11454 %}
11455 
11456 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11457                          iRegL src1, iRegL src2,
11458                          immI src3, rFlagsReg cr) %{
11459   match(Set dst (SubL src1 (LShiftL src2 src3)));
11460 
11461   ins_cost(1.9 * INSN_COST);
11462   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11463 
11464   ins_encode %{
11465     __ sub(as_Register($dst$$reg),
11466               as_Register($src1$$reg),
11467               as_Register($src2$$reg),
11468               Assembler::LSL,
11469               $src3$$constant & 0x3f);
11470   %}
11471 
11472   ins_pipe(ialu_reg_reg_shift);
11473 %}
11474 
11475 
11476 
11477 // Shift Left followed by Shift Right.
11478 // This idiom is used by the compiler for the i2b bytecode etc.
11479 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11480 %{
11481   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11482   // Make sure we are not going to exceed what sbfm can do.
11483   predicate((unsigned int)n->in(2)->get_int() <= 63
11484             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11485 
11486   ins_cost(INSN_COST * 2);
11487   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11488   ins_encode %{
11489     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11490     int s = 63 - lshift;
11491     int r = (rshift - lshift) & 63;
11492     __ sbfm(as_Register($dst$$reg),
11493             as_Register($src$$reg),
11494             r, s);
11495   %}
11496 
11497   ins_pipe(ialu_reg_shift);
11498 %}
11499 
11500 // Shift Left followed by Shift Right.
11501 // This idiom is used by the compiler for the i2b bytecode etc.
11502 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11503 %{
11504   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11505   // Make sure we are not going to exceed what sbfmw can do.
11506   predicate((unsigned int)n->in(2)->get_int() <= 31
11507             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11508 
11509   ins_cost(INSN_COST * 2);
11510   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11511   ins_encode %{
11512     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11513     int s = 31 - lshift;
11514     int r = (rshift - lshift) & 31;
11515     __ sbfmw(as_Register($dst$$reg),
11516             as_Register($src$$reg),
11517             r, s);
11518   %}
11519 
11520   ins_pipe(ialu_reg_shift);
11521 %}
11522 
11523 // Shift Left followed by Shift Right.
11524 // This idiom is used by the compiler for the i2b bytecode etc.
11525 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11526 %{
11527   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11528   // Make sure we are not going to exceed what ubfm can do.
11529   predicate((unsigned int)n->in(2)->get_int() <= 63
11530             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11531 
11532   ins_cost(INSN_COST * 2);
11533   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11534   ins_encode %{
11535     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11536     int s = 63 - lshift;
11537     int r = (rshift - lshift) & 63;
11538     __ ubfm(as_Register($dst$$reg),
11539             as_Register($src$$reg),
11540             r, s);
11541   %}
11542 
11543   ins_pipe(ialu_reg_shift);
11544 %}
11545 
11546 // Shift Left followed by Shift Right.
11547 // This idiom is used by the compiler for the i2b bytecode etc.
11548 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11549 %{
11550   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11551   // Make sure we are not going to exceed what ubfmw can do.
11552   predicate((unsigned int)n->in(2)->get_int() <= 31
11553             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11554 
11555   ins_cost(INSN_COST * 2);
11556   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11557   ins_encode %{
11558     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11559     int s = 31 - lshift;
11560     int r = (rshift - lshift) & 31;
11561     __ ubfmw(as_Register($dst$$reg),
11562             as_Register($src$$reg),
11563             r, s);
11564   %}
11565 
11566   ins_pipe(ialu_reg_shift);
11567 %}
11568 // Bitfield extract with shift & mask
11569 
11570 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11571 %{
11572   match(Set dst (AndI (URShiftI src rshift) mask));
11573 
11574   ins_cost(INSN_COST);
11575   format %{ "ubfxw $dst, $src, $mask" %}
11576   ins_encode %{
11577     int rshift = $rshift$$constant;
11578     long mask = $mask$$constant;
11579     int width = exact_log2(mask+1);
11580     __ ubfxw(as_Register($dst$$reg),
11581             as_Register($src$$reg), rshift, width);
11582   %}
11583   ins_pipe(ialu_reg_shift);
11584 %}
11585 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11586 %{
11587   match(Set dst (AndL (URShiftL src rshift) mask));
11588 
11589   ins_cost(INSN_COST);
11590   format %{ "ubfx $dst, $src, $mask" %}
11591   ins_encode %{
11592     int rshift = $rshift$$constant;
11593     long mask = $mask$$constant;
11594     int width = exact_log2(mask+1);
11595     __ ubfx(as_Register($dst$$reg),
11596             as_Register($src$$reg), rshift, width);
11597   %}
11598   ins_pipe(ialu_reg_shift);
11599 %}
11600 
11601 // We can use ubfx when extending an And with a mask when we know mask
11602 // is positive.  We know that because immI_bitmask guarantees it.
11603 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11604 %{
11605   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11606 
11607   ins_cost(INSN_COST * 2);
11608   format %{ "ubfx $dst, $src, $mask" %}
11609   ins_encode %{
11610     int rshift = $rshift$$constant;
11611     long mask = $mask$$constant;
11612     int width = exact_log2(mask+1);
11613     __ ubfx(as_Register($dst$$reg),
11614             as_Register($src$$reg), rshift, width);
11615   %}
11616   ins_pipe(ialu_reg_shift);
11617 %}
11618 
11619 // Rotations
11620 
11621 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11622 %{
11623   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11624   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11625 
11626   ins_cost(INSN_COST);
11627   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11628 
11629   ins_encode %{
11630     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11631             $rshift$$constant & 63);
11632   %}
11633   ins_pipe(ialu_reg_reg_extr);
11634 %}
11635 
11636 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11637 %{
11638   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11639   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11640 
11641   ins_cost(INSN_COST);
11642   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11643 
11644   ins_encode %{
11645     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11646             $rshift$$constant & 31);
11647   %}
11648   ins_pipe(ialu_reg_reg_extr);
11649 %}
11650 
11651 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11652 %{
11653   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11654   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11655 
11656   ins_cost(INSN_COST);
11657   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11658 
11659   ins_encode %{
11660     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11661             $rshift$$constant & 63);
11662   %}
11663   ins_pipe(ialu_reg_reg_extr);
11664 %}
11665 
11666 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11667 %{
11668   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11669   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11670 
11671   ins_cost(INSN_COST);
11672   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11673 
11674   ins_encode %{
11675     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11676             $rshift$$constant & 31);
11677   %}
11678   ins_pipe(ialu_reg_reg_extr);
11679 %}
11680 
11681 
11682 // rol expander
11683 
11684 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11685 %{
11686   effect(DEF dst, USE src, USE shift);
11687 
11688   format %{ "rol    $dst, $src, $shift" %}
11689   ins_cost(INSN_COST * 3);
11690   ins_encode %{
11691     __ subw(rscratch1, zr, as_Register($shift$$reg));
11692     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11693             rscratch1);
11694     %}
11695   ins_pipe(ialu_reg_reg_vshift);
11696 %}
11697 
11698 // rol expander
11699 
11700 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11701 %{
11702   effect(DEF dst, USE src, USE shift);
11703 
11704   format %{ "rol    $dst, $src, $shift" %}
11705   ins_cost(INSN_COST * 3);
11706   ins_encode %{
11707     __ subw(rscratch1, zr, as_Register($shift$$reg));
11708     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11709             rscratch1);
11710     %}
11711   ins_pipe(ialu_reg_reg_vshift);
11712 %}
11713 
11714 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11715 %{
11716   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11717 
11718   expand %{
11719     rolL_rReg(dst, src, shift, cr);
11720   %}
11721 %}
11722 
11723 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11724 %{
11725   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11726 
11727   expand %{
11728     rolL_rReg(dst, src, shift, cr);
11729   %}
11730 %}
11731 
11732 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11733 %{
11734   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11735 
11736   expand %{
11737     rolL_rReg(dst, src, shift, cr);
11738   %}
11739 %}
11740 
11741 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11742 %{
11743   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11744 
11745   expand %{
11746     rolL_rReg(dst, src, shift, cr);
11747   %}
11748 %}
11749 
11750 // ror expander
11751 
11752 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11753 %{
11754   effect(DEF dst, USE src, USE shift);
11755 
11756   format %{ "ror    $dst, $src, $shift" %}
11757   ins_cost(INSN_COST);
11758   ins_encode %{
11759     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11760             as_Register($shift$$reg));
11761     %}
11762   ins_pipe(ialu_reg_reg_vshift);
11763 %}
11764 
11765 // ror expander
11766 
11767 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11768 %{
11769   effect(DEF dst, USE src, USE shift);
11770 
11771   format %{ "ror    $dst, $src, $shift" %}
11772   ins_cost(INSN_COST);
11773   ins_encode %{
11774     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11775             as_Register($shift$$reg));
11776     %}
11777   ins_pipe(ialu_reg_reg_vshift);
11778 %}
11779 
11780 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11781 %{
11782   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11783 
11784   expand %{
11785     rorL_rReg(dst, src, shift, cr);
11786   %}
11787 %}
11788 
11789 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11790 %{
11791   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11792 
11793   expand %{
11794     rorL_rReg(dst, src, shift, cr);
11795   %}
11796 %}
11797 
11798 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11799 %{
11800   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11801 
11802   expand %{
11803     rorL_rReg(dst, src, shift, cr);
11804   %}
11805 %}
11806 
11807 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11808 %{
11809   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11810 
11811   expand %{
11812     rorL_rReg(dst, src, shift, cr);
11813   %}
11814 %}
11815 
11816 // Add/subtract (extended)
11817 
11818 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11819 %{
11820   match(Set dst (AddL src1 (ConvI2L src2)));
11821   ins_cost(INSN_COST);
11822   format %{ "add  $dst, $src1, sxtw $src2" %}
11823 
11824    ins_encode %{
11825      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11826             as_Register($src2$$reg), ext::sxtw);
11827    %}
11828   ins_pipe(ialu_reg_reg);
11829 %};
11830 
11831 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11832 %{
11833   match(Set dst (SubL src1 (ConvI2L src2)));
11834   ins_cost(INSN_COST);
11835   format %{ "sub  $dst, $src1, sxtw $src2" %}
11836 
11837    ins_encode %{
11838      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11839             as_Register($src2$$reg), ext::sxtw);
11840    %}
11841   ins_pipe(ialu_reg_reg);
11842 %};
11843 
11844 
11845 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11846 %{
11847   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11848   ins_cost(INSN_COST);
11849   format %{ "add  $dst, $src1, sxth $src2" %}
11850 
11851    ins_encode %{
11852      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11853             as_Register($src2$$reg), ext::sxth);
11854    %}
11855   ins_pipe(ialu_reg_reg);
11856 %}
11857 
11858 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11859 %{
11860   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11861   ins_cost(INSN_COST);
11862   format %{ "add  $dst, $src1, sxtb $src2" %}
11863 
11864    ins_encode %{
11865      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11866             as_Register($src2$$reg), ext::sxtb);
11867    %}
11868   ins_pipe(ialu_reg_reg);
11869 %}
11870 
11871 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11872 %{
11873   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11874   ins_cost(INSN_COST);
11875   format %{ "add  $dst, $src1, uxtb $src2" %}
11876 
11877    ins_encode %{
11878      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11879             as_Register($src2$$reg), ext::uxtb);
11880    %}
11881   ins_pipe(ialu_reg_reg);
11882 %}
11883 
11884 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11885 %{
11886   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11887   ins_cost(INSN_COST);
11888   format %{ "add  $dst, $src1, sxth $src2" %}
11889 
11890    ins_encode %{
11891      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11892             as_Register($src2$$reg), ext::sxth);
11893    %}
11894   ins_pipe(ialu_reg_reg);
11895 %}
11896 
11897 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11898 %{
11899   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11900   ins_cost(INSN_COST);
11901   format %{ "add  $dst, $src1, sxtw $src2" %}
11902 
11903    ins_encode %{
11904      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11905             as_Register($src2$$reg), ext::sxtw);
11906    %}
11907   ins_pipe(ialu_reg_reg);
11908 %}
11909 
11910 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11911 %{
11912   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11913   ins_cost(INSN_COST);
11914   format %{ "add  $dst, $src1, sxtb $src2" %}
11915 
11916    ins_encode %{
11917      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11918             as_Register($src2$$reg), ext::sxtb);
11919    %}
11920   ins_pipe(ialu_reg_reg);
11921 %}
11922 
11923 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11924 %{
11925   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11926   ins_cost(INSN_COST);
11927   format %{ "add  $dst, $src1, uxtb $src2" %}
11928 
11929    ins_encode %{
11930      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11931             as_Register($src2$$reg), ext::uxtb);
11932    %}
11933   ins_pipe(ialu_reg_reg);
11934 %}
11935 
11936 
11937 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11938 %{
11939   match(Set dst (AddI src1 (AndI src2 mask)));
11940   ins_cost(INSN_COST);
11941   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11942 
11943    ins_encode %{
11944      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11945             as_Register($src2$$reg), ext::uxtb);
11946    %}
11947   ins_pipe(ialu_reg_reg);
11948 %}
11949 
11950 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11951 %{
11952   match(Set dst (AddI src1 (AndI src2 mask)));
11953   ins_cost(INSN_COST);
11954   format %{ "addw  $dst, $src1, $src2, uxth" %}
11955 
11956    ins_encode %{
11957      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11958             as_Register($src2$$reg), ext::uxth);
11959    %}
11960   ins_pipe(ialu_reg_reg);
11961 %}
11962 
11963 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11964 %{
11965   match(Set dst (AddL src1 (AndL src2 mask)));
11966   ins_cost(INSN_COST);
11967   format %{ "add  $dst, $src1, $src2, uxtb" %}
11968 
11969    ins_encode %{
11970      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11971             as_Register($src2$$reg), ext::uxtb);
11972    %}
11973   ins_pipe(ialu_reg_reg);
11974 %}
11975 
11976 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11977 %{
11978   match(Set dst (AddL src1 (AndL src2 mask)));
11979   ins_cost(INSN_COST);
11980   format %{ "add  $dst, $src1, $src2, uxth" %}
11981 
11982    ins_encode %{
11983      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11984             as_Register($src2$$reg), ext::uxth);
11985    %}
11986   ins_pipe(ialu_reg_reg);
11987 %}
11988 
11989 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11990 %{
11991   match(Set dst (AddL src1 (AndL src2 mask)));
11992   ins_cost(INSN_COST);
11993   format %{ "add  $dst, $src1, $src2, uxtw" %}
11994 
11995    ins_encode %{
11996      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11997             as_Register($src2$$reg), ext::uxtw);
11998    %}
11999   ins_pipe(ialu_reg_reg);
12000 %}
12001 
12002 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12003 %{
12004   match(Set dst (SubI src1 (AndI src2 mask)));
12005   ins_cost(INSN_COST);
12006   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12007 
12008    ins_encode %{
12009      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12010             as_Register($src2$$reg), ext::uxtb);
12011    %}
12012   ins_pipe(ialu_reg_reg);
12013 %}
12014 
12015 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12016 %{
12017   match(Set dst (SubI src1 (AndI src2 mask)));
12018   ins_cost(INSN_COST);
12019   format %{ "subw  $dst, $src1, $src2, uxth" %}
12020 
12021    ins_encode %{
12022      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12023             as_Register($src2$$reg), ext::uxth);
12024    %}
12025   ins_pipe(ialu_reg_reg);
12026 %}
12027 
12028 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12029 %{
12030   match(Set dst (SubL src1 (AndL src2 mask)));
12031   ins_cost(INSN_COST);
12032   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12033 
12034    ins_encode %{
12035      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12036             as_Register($src2$$reg), ext::uxtb);
12037    %}
12038   ins_pipe(ialu_reg_reg);
12039 %}
12040 
12041 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12042 %{
12043   match(Set dst (SubL src1 (AndL src2 mask)));
12044   ins_cost(INSN_COST);
12045   format %{ "sub  $dst, $src1, $src2, uxth" %}
12046 
12047    ins_encode %{
12048      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12049             as_Register($src2$$reg), ext::uxth);
12050    %}
12051   ins_pipe(ialu_reg_reg);
12052 %}
12053 
12054 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12055 %{
12056   match(Set dst (SubL src1 (AndL src2 mask)));
12057   ins_cost(INSN_COST);
12058   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12059 
12060    ins_encode %{
12061      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12062             as_Register($src2$$reg), ext::uxtw);
12063    %}
12064   ins_pipe(ialu_reg_reg);
12065 %}
12066 
12067 // END This section of the file is automatically generated. Do not edit --------------
12068 
12069 // ============================================================================
12070 // Floating Point Arithmetic Instructions
12071 
12072 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12073   match(Set dst (AddF src1 src2));
12074 
12075   ins_cost(INSN_COST * 5);
12076   format %{ "fadds   $dst, $src1, $src2" %}
12077 
12078   ins_encode %{
12079     __ fadds(as_FloatRegister($dst$$reg),
12080              as_FloatRegister($src1$$reg),
12081              as_FloatRegister($src2$$reg));
12082   %}
12083 
12084   ins_pipe(pipe_class_default);
12085 %}
12086 
12087 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12088   match(Set dst (AddD src1 src2));
12089 
12090   ins_cost(INSN_COST * 5);
12091   format %{ "faddd   $dst, $src1, $src2" %}
12092 
12093   ins_encode %{
12094     __ faddd(as_FloatRegister($dst$$reg),
12095              as_FloatRegister($src1$$reg),
12096              as_FloatRegister($src2$$reg));
12097   %}
12098 
12099   ins_pipe(pipe_class_default);
12100 %}
12101 
12102 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12103   match(Set dst (SubF src1 src2));
12104 
12105   ins_cost(INSN_COST * 5);
12106   format %{ "fsubs   $dst, $src1, $src2" %}
12107 
12108   ins_encode %{
12109     __ fsubs(as_FloatRegister($dst$$reg),
12110              as_FloatRegister($src1$$reg),
12111              as_FloatRegister($src2$$reg));
12112   %}
12113 
12114   ins_pipe(pipe_class_default);
12115 %}
12116 
12117 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12118   match(Set dst (SubD src1 src2));
12119 
12120   ins_cost(INSN_COST * 5);
12121   format %{ "fsubd   $dst, $src1, $src2" %}
12122 
12123   ins_encode %{
12124     __ fsubd(as_FloatRegister($dst$$reg),
12125              as_FloatRegister($src1$$reg),
12126              as_FloatRegister($src2$$reg));
12127   %}
12128 
12129   ins_pipe(pipe_class_default);
12130 %}
12131 
12132 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12133   match(Set dst (MulF src1 src2));
12134 
12135   ins_cost(INSN_COST * 6);
12136   format %{ "fmuls   $dst, $src1, $src2" %}
12137 
12138   ins_encode %{
12139     __ fmuls(as_FloatRegister($dst$$reg),
12140              as_FloatRegister($src1$$reg),
12141              as_FloatRegister($src2$$reg));
12142   %}
12143 
12144   ins_pipe(pipe_class_default);
12145 %}
12146 
12147 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12148   match(Set dst (MulD src1 src2));
12149 
12150   ins_cost(INSN_COST * 6);
12151   format %{ "fmuld   $dst, $src1, $src2" %}
12152 
12153   ins_encode %{
12154     __ fmuld(as_FloatRegister($dst$$reg),
12155              as_FloatRegister($src1$$reg),
12156              as_FloatRegister($src2$$reg));
12157   %}
12158 
12159   ins_pipe(pipe_class_default);
12160 %}
12161 
12162 // We cannot use these fused mul w add/sub ops because they don't
12163 // produce the same result as the equivalent separated ops
12164 // (essentially they don't round the intermediate result). that's a
12165 // shame. leaving them here in case we can idenitfy cases where it is
12166 // legitimate to use them
12167 
12168 
12169 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12170 //   match(Set dst (AddF (MulF src1 src2) src3));
12171 
12172 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12173 
12174 //   ins_encode %{
12175 //     __ fmadds(as_FloatRegister($dst$$reg),
12176 //              as_FloatRegister($src1$$reg),
12177 //              as_FloatRegister($src2$$reg),
12178 //              as_FloatRegister($src3$$reg));
12179 //   %}
12180 
12181 //   ins_pipe(pipe_class_default);
12182 // %}
12183 
12184 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12185 //   match(Set dst (AddD (MulD src1 src2) src3));
12186 
12187 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12188 
12189 //   ins_encode %{
12190 //     __ fmaddd(as_FloatRegister($dst$$reg),
12191 //              as_FloatRegister($src1$$reg),
12192 //              as_FloatRegister($src2$$reg),
12193 //              as_FloatRegister($src3$$reg));
12194 //   %}
12195 
12196 //   ins_pipe(pipe_class_default);
12197 // %}
12198 
12199 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12200 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12201 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12202 
12203 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12204 
12205 //   ins_encode %{
12206 //     __ fmsubs(as_FloatRegister($dst$$reg),
12207 //               as_FloatRegister($src1$$reg),
12208 //               as_FloatRegister($src2$$reg),
12209 //              as_FloatRegister($src3$$reg));
12210 //   %}
12211 
12212 //   ins_pipe(pipe_class_default);
12213 // %}
12214 
12215 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12216 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12217 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12218 
12219 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12220 
12221 //   ins_encode %{
12222 //     __ fmsubd(as_FloatRegister($dst$$reg),
12223 //               as_FloatRegister($src1$$reg),
12224 //               as_FloatRegister($src2$$reg),
12225 //               as_FloatRegister($src3$$reg));
12226 //   %}
12227 
12228 //   ins_pipe(pipe_class_default);
12229 // %}
12230 
12231 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12232 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12233 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12234 
12235 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12236 
12237 //   ins_encode %{
12238 //     __ fnmadds(as_FloatRegister($dst$$reg),
12239 //                as_FloatRegister($src1$$reg),
12240 //                as_FloatRegister($src2$$reg),
12241 //                as_FloatRegister($src3$$reg));
12242 //   %}
12243 
12244 //   ins_pipe(pipe_class_default);
12245 // %}
12246 
12247 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12248 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12249 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12250 
12251 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12252 
12253 //   ins_encode %{
12254 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12255 //                as_FloatRegister($src1$$reg),
12256 //                as_FloatRegister($src2$$reg),
12257 //                as_FloatRegister($src3$$reg));
12258 //   %}
12259 
12260 //   ins_pipe(pipe_class_default);
12261 // %}
12262 
12263 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12264 //   match(Set dst (SubF (MulF src1 src2) src3));
12265 
12266 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12267 
12268 //   ins_encode %{
12269 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12270 //                as_FloatRegister($src1$$reg),
12271 //                as_FloatRegister($src2$$reg),
12272 //                as_FloatRegister($src3$$reg));
12273 //   %}
12274 
12275 //   ins_pipe(pipe_class_default);
12276 // %}
12277 
12278 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12279 //   match(Set dst (SubD (MulD src1 src2) src3));
12280 
12281 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12282 
12283 //   ins_encode %{
12284 //   // n.b. insn name should be fnmsubd
12285 //     __ fnmsub(as_FloatRegister($dst$$reg),
12286 //                as_FloatRegister($src1$$reg),
12287 //                as_FloatRegister($src2$$reg),
12288 //                as_FloatRegister($src3$$reg));
12289 //   %}
12290 
12291 //   ins_pipe(pipe_class_default);
12292 // %}
12293 
12294 
12295 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12296   match(Set dst (DivF src1  src2));
12297 
12298   ins_cost(INSN_COST * 18);
12299   format %{ "fdivs   $dst, $src1, $src2" %}
12300 
12301   ins_encode %{
12302     __ fdivs(as_FloatRegister($dst$$reg),
12303              as_FloatRegister($src1$$reg),
12304              as_FloatRegister($src2$$reg));
12305   %}
12306 
12307   ins_pipe(pipe_class_default);
12308 %}
12309 
12310 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12311   match(Set dst (DivD src1  src2));
12312 
12313   ins_cost(INSN_COST * 32);
12314   format %{ "fdivd   $dst, $src1, $src2" %}
12315 
12316   ins_encode %{
12317     __ fdivd(as_FloatRegister($dst$$reg),
12318              as_FloatRegister($src1$$reg),
12319              as_FloatRegister($src2$$reg));
12320   %}
12321 
12322   ins_pipe(pipe_class_default);
12323 %}
12324 
12325 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12326   match(Set dst (NegF src));
12327 
12328   ins_cost(INSN_COST * 3);
12329   format %{ "fneg   $dst, $src" %}
12330 
12331   ins_encode %{
12332     __ fnegs(as_FloatRegister($dst$$reg),
12333              as_FloatRegister($src$$reg));
12334   %}
12335 
12336   ins_pipe(pipe_class_default);
12337 %}
12338 
12339 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12340   match(Set dst (NegD src));
12341 
12342   ins_cost(INSN_COST * 3);
12343   format %{ "fnegd   $dst, $src" %}
12344 
12345   ins_encode %{
12346     __ fnegd(as_FloatRegister($dst$$reg),
12347              as_FloatRegister($src$$reg));
12348   %}
12349 
12350   ins_pipe(pipe_class_default);
12351 %}
12352 
12353 instruct absF_reg(vRegF dst, vRegF src) %{
12354   match(Set dst (AbsF src));
12355 
12356   ins_cost(INSN_COST * 3);
12357   format %{ "fabss   $dst, $src" %}
12358   ins_encode %{
12359     __ fabss(as_FloatRegister($dst$$reg),
12360              as_FloatRegister($src$$reg));
12361   %}
12362 
12363   ins_pipe(pipe_class_default);
12364 %}
12365 
12366 instruct absD_reg(vRegD dst, vRegD src) %{
12367   match(Set dst (AbsD src));
12368 
12369   ins_cost(INSN_COST * 3);
12370   format %{ "fabsd   $dst, $src" %}
12371   ins_encode %{
12372     __ fabsd(as_FloatRegister($dst$$reg),
12373              as_FloatRegister($src$$reg));
12374   %}
12375 
12376   ins_pipe(pipe_class_default);
12377 %}
12378 
12379 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12380   match(Set dst (SqrtD src));
12381 
12382   ins_cost(INSN_COST * 50);
12383   format %{ "fsqrtd  $dst, $src" %}
12384   ins_encode %{
12385     __ fsqrtd(as_FloatRegister($dst$$reg),
12386              as_FloatRegister($src$$reg));
12387   %}
12388 
12389   ins_pipe(pipe_class_default);
12390 %}
12391 
12392 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12393   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12394 
12395   ins_cost(INSN_COST * 50);
12396   format %{ "fsqrts  $dst, $src" %}
12397   ins_encode %{
12398     __ fsqrts(as_FloatRegister($dst$$reg),
12399              as_FloatRegister($src$$reg));
12400   %}
12401 
12402   ins_pipe(pipe_class_default);
12403 %}
12404 
12405 // ============================================================================
12406 // Logical Instructions
12407 
12408 // Integer Logical Instructions
12409 
12410 // And Instructions
12411 
12412 
12413 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12414   match(Set dst (AndI src1 src2));
12415 
12416   format %{ "andw  $dst, $src1, $src2\t# int" %}
12417 
12418   ins_cost(INSN_COST);
12419   ins_encode %{
12420     __ andw(as_Register($dst$$reg),
12421             as_Register($src1$$reg),
12422             as_Register($src2$$reg));
12423   %}
12424 
12425   ins_pipe(ialu_reg_reg);
12426 %}
12427 
12428 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12429   match(Set dst (AndI src1 src2));
12430 
12431   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12432 
12433   ins_cost(INSN_COST);
12434   ins_encode %{
12435     __ andw(as_Register($dst$$reg),
12436             as_Register($src1$$reg),
12437             (unsigned long)($src2$$constant));
12438   %}
12439 
12440   ins_pipe(ialu_reg_imm);
12441 %}
12442 
12443 // Or Instructions
12444 
12445 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12446   match(Set dst (OrI src1 src2));
12447 
12448   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12449 
12450   ins_cost(INSN_COST);
12451   ins_encode %{
12452     __ orrw(as_Register($dst$$reg),
12453             as_Register($src1$$reg),
12454             as_Register($src2$$reg));
12455   %}
12456 
12457   ins_pipe(ialu_reg_reg);
12458 %}
12459 
12460 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12461   match(Set dst (OrI src1 src2));
12462 
12463   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12464 
12465   ins_cost(INSN_COST);
12466   ins_encode %{
12467     __ orrw(as_Register($dst$$reg),
12468             as_Register($src1$$reg),
12469             (unsigned long)($src2$$constant));
12470   %}
12471 
12472   ins_pipe(ialu_reg_imm);
12473 %}
12474 
12475 // Xor Instructions
12476 
12477 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12478   match(Set dst (XorI src1 src2));
12479 
12480   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12481 
12482   ins_cost(INSN_COST);
12483   ins_encode %{
12484     __ eorw(as_Register($dst$$reg),
12485             as_Register($src1$$reg),
12486             as_Register($src2$$reg));
12487   %}
12488 
12489   ins_pipe(ialu_reg_reg);
12490 %}
12491 
12492 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12493   match(Set dst (XorI src1 src2));
12494 
12495   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12496 
12497   ins_cost(INSN_COST);
12498   ins_encode %{
12499     __ eorw(as_Register($dst$$reg),
12500             as_Register($src1$$reg),
12501             (unsigned long)($src2$$constant));
12502   %}
12503 
12504   ins_pipe(ialu_reg_imm);
12505 %}
12506 
12507 // Long Logical Instructions
12508 // TODO
12509 
12510 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12511   match(Set dst (AndL src1 src2));
12512 
12513   format %{ "and  $dst, $src1, $src2\t# int" %}
12514 
12515   ins_cost(INSN_COST);
12516   ins_encode %{
12517     __ andr(as_Register($dst$$reg),
12518             as_Register($src1$$reg),
12519             as_Register($src2$$reg));
12520   %}
12521 
12522   ins_pipe(ialu_reg_reg);
12523 %}
12524 
12525 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12526   match(Set dst (AndL src1 src2));
12527 
12528   format %{ "and  $dst, $src1, $src2\t# int" %}
12529 
12530   ins_cost(INSN_COST);
12531   ins_encode %{
12532     __ andr(as_Register($dst$$reg),
12533             as_Register($src1$$reg),
12534             (unsigned long)($src2$$constant));
12535   %}
12536 
12537   ins_pipe(ialu_reg_imm);
12538 %}
12539 
12540 // Or Instructions
12541 
12542 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12543   match(Set dst (OrL src1 src2));
12544 
12545   format %{ "orr  $dst, $src1, $src2\t# int" %}
12546 
12547   ins_cost(INSN_COST);
12548   ins_encode %{
12549     __ orr(as_Register($dst$$reg),
12550            as_Register($src1$$reg),
12551            as_Register($src2$$reg));
12552   %}
12553 
12554   ins_pipe(ialu_reg_reg);
12555 %}
12556 
12557 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12558   match(Set dst (OrL src1 src2));
12559 
12560   format %{ "orr  $dst, $src1, $src2\t# int" %}
12561 
12562   ins_cost(INSN_COST);
12563   ins_encode %{
12564     __ orr(as_Register($dst$$reg),
12565            as_Register($src1$$reg),
12566            (unsigned long)($src2$$constant));
12567   %}
12568 
12569   ins_pipe(ialu_reg_imm);
12570 %}
12571 
12572 // Xor Instructions
12573 
12574 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12575   match(Set dst (XorL src1 src2));
12576 
12577   format %{ "eor  $dst, $src1, $src2\t# int" %}
12578 
12579   ins_cost(INSN_COST);
12580   ins_encode %{
12581     __ eor(as_Register($dst$$reg),
12582            as_Register($src1$$reg),
12583            as_Register($src2$$reg));
12584   %}
12585 
12586   ins_pipe(ialu_reg_reg);
12587 %}
12588 
12589 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12590   match(Set dst (XorL src1 src2));
12591 
12592   ins_cost(INSN_COST);
12593   format %{ "eor  $dst, $src1, $src2\t# int" %}
12594 
12595   ins_encode %{
12596     __ eor(as_Register($dst$$reg),
12597            as_Register($src1$$reg),
12598            (unsigned long)($src2$$constant));
12599   %}
12600 
12601   ins_pipe(ialu_reg_imm);
12602 %}
12603 
12604 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12605 %{
12606   match(Set dst (ConvI2L src));
12607 
12608   ins_cost(INSN_COST);
12609   format %{ "sxtw  $dst, $src\t# i2l" %}
12610   ins_encode %{
12611     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12612   %}
12613   ins_pipe(ialu_reg_shift);
12614 %}
12615 
12616 // this pattern occurs in bigmath arithmetic
12617 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12618 %{
12619   match(Set dst (AndL (ConvI2L src) mask));
12620 
12621   ins_cost(INSN_COST);
12622   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12623   ins_encode %{
12624     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12625   %}
12626 
12627   ins_pipe(ialu_reg_shift);
12628 %}
12629 
12630 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12631   match(Set dst (ConvL2I src));
12632 
12633   ins_cost(INSN_COST);
12634   format %{ "movw  $dst, $src \t// l2i" %}
12635 
12636   ins_encode %{
12637     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12638   %}
12639 
12640   ins_pipe(ialu_reg);
12641 %}
12642 
12643 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12644 %{
12645   match(Set dst (Conv2B src));
12646   effect(KILL cr);
12647 
12648   format %{
12649     "cmpw $src, zr\n\t"
12650     "cset $dst, ne"
12651   %}
12652 
12653   ins_encode %{
12654     __ cmpw(as_Register($src$$reg), zr);
12655     __ cset(as_Register($dst$$reg), Assembler::NE);
12656   %}
12657 
12658   ins_pipe(ialu_reg);
12659 %}
12660 
12661 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12662 %{
12663   match(Set dst (Conv2B src));
12664   effect(KILL cr);
12665 
12666   format %{
12667     "cmp  $src, zr\n\t"
12668     "cset $dst, ne"
12669   %}
12670 
12671   ins_encode %{
12672     __ cmp(as_Register($src$$reg), zr);
12673     __ cset(as_Register($dst$$reg), Assembler::NE);
12674   %}
12675 
12676   ins_pipe(ialu_reg);
12677 %}
12678 
12679 instruct convD2F_reg(vRegF dst, vRegD src) %{
12680   match(Set dst (ConvD2F src));
12681 
12682   ins_cost(INSN_COST * 5);
12683   format %{ "fcvtd  $dst, $src \t// d2f" %}
12684 
12685   ins_encode %{
12686     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12687   %}
12688 
12689   ins_pipe(pipe_class_default);
12690 %}
12691 
12692 instruct convF2D_reg(vRegD dst, vRegF src) %{
12693   match(Set dst (ConvF2D src));
12694 
12695   ins_cost(INSN_COST * 5);
12696   format %{ "fcvts  $dst, $src \t// f2d" %}
12697 
12698   ins_encode %{
12699     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12700   %}
12701 
12702   ins_pipe(pipe_class_default);
12703 %}
12704 
12705 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12706   match(Set dst (ConvF2I src));
12707 
12708   ins_cost(INSN_COST * 5);
12709   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12710 
12711   ins_encode %{
12712     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12713   %}
12714 
12715   ins_pipe(pipe_class_default);
12716 %}
12717 
12718 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12719   match(Set dst (ConvF2L src));
12720 
12721   ins_cost(INSN_COST * 5);
12722   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12723 
12724   ins_encode %{
12725     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12726   %}
12727 
12728   ins_pipe(pipe_class_default);
12729 %}
12730 
12731 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12732   match(Set dst (ConvI2F src));
12733 
12734   ins_cost(INSN_COST * 5);
12735   format %{ "scvtfws  $dst, $src \t// i2f" %}
12736 
12737   ins_encode %{
12738     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12739   %}
12740 
12741   ins_pipe(pipe_class_default);
12742 %}
12743 
12744 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12745   match(Set dst (ConvL2F src));
12746 
12747   ins_cost(INSN_COST * 5);
12748   format %{ "scvtfs  $dst, $src \t// l2f" %}
12749 
12750   ins_encode %{
12751     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12752   %}
12753 
12754   ins_pipe(pipe_class_default);
12755 %}
12756 
12757 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12758   match(Set dst (ConvD2I src));
12759 
12760   ins_cost(INSN_COST * 5);
12761   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12762 
12763   ins_encode %{
12764     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12765   %}
12766 
12767   ins_pipe(pipe_class_default);
12768 %}
12769 
12770 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12771   match(Set dst (ConvD2L src));
12772 
12773   ins_cost(INSN_COST * 5);
12774   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12775 
12776   ins_encode %{
12777     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12778   %}
12779 
12780   ins_pipe(pipe_class_default);
12781 %}
12782 
12783 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12784   match(Set dst (ConvI2D src));
12785 
12786   ins_cost(INSN_COST * 5);
12787   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12788 
12789   ins_encode %{
12790     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12791   %}
12792 
12793   ins_pipe(pipe_class_default);
12794 %}
12795 
12796 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12797   match(Set dst (ConvL2D src));
12798 
12799   ins_cost(INSN_COST * 5);
12800   format %{ "scvtfd  $dst, $src \t// l2d" %}
12801 
12802   ins_encode %{
12803     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12804   %}
12805 
12806   ins_pipe(pipe_class_default);
12807 %}
12808 
12809 // stack <-> reg and reg <-> reg shuffles with no conversion
12810 
12811 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12812 
12813   match(Set dst (MoveF2I src));
12814 
12815   effect(DEF dst, USE src);
12816 
12817   ins_cost(4 * INSN_COST);
12818 
12819   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12820 
12821   ins_encode %{
12822     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12823   %}
12824 
12825   ins_pipe(iload_reg_reg);
12826 
12827 %}
12828 
12829 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12830 
12831   match(Set dst (MoveI2F src));
12832 
12833   effect(DEF dst, USE src);
12834 
12835   ins_cost(4 * INSN_COST);
12836 
12837   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12838 
12839   ins_encode %{
12840     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12841   %}
12842 
12843   ins_pipe(pipe_class_memory);
12844 
12845 %}
12846 
12847 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12848 
12849   match(Set dst (MoveD2L src));
12850 
12851   effect(DEF dst, USE src);
12852 
12853   ins_cost(4 * INSN_COST);
12854 
12855   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12856 
12857   ins_encode %{
12858     __ ldr($dst$$Register, Address(sp, $src$$disp));
12859   %}
12860 
12861   ins_pipe(iload_reg_reg);
12862 
12863 %}
12864 
12865 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12866 
12867   match(Set dst (MoveL2D src));
12868 
12869   effect(DEF dst, USE src);
12870 
12871   ins_cost(4 * INSN_COST);
12872 
12873   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12874 
12875   ins_encode %{
12876     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12877   %}
12878 
12879   ins_pipe(pipe_class_memory);
12880 
12881 %}
12882 
12883 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12884 
12885   match(Set dst (MoveF2I src));
12886 
12887   effect(DEF dst, USE src);
12888 
12889   ins_cost(INSN_COST);
12890 
12891   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12892 
12893   ins_encode %{
12894     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12895   %}
12896 
12897   ins_pipe(pipe_class_memory);
12898 
12899 %}
12900 
12901 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12902 
12903   match(Set dst (MoveI2F src));
12904 
12905   effect(DEF dst, USE src);
12906 
12907   ins_cost(INSN_COST);
12908 
12909   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12910 
12911   ins_encode %{
12912     __ strw($src$$Register, Address(sp, $dst$$disp));
12913   %}
12914 
12915   ins_pipe(istore_reg_reg);
12916 
12917 %}
12918 
12919 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12920 
12921   match(Set dst (MoveD2L src));
12922 
12923   effect(DEF dst, USE src);
12924 
12925   ins_cost(INSN_COST);
12926 
12927   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12928 
12929   ins_encode %{
12930     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12931   %}
12932 
12933   ins_pipe(pipe_class_memory);
12934 
12935 %}
12936 
12937 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12938 
12939   match(Set dst (MoveL2D src));
12940 
12941   effect(DEF dst, USE src);
12942 
12943   ins_cost(INSN_COST);
12944 
12945   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12946 
12947   ins_encode %{
12948     __ str($src$$Register, Address(sp, $dst$$disp));
12949   %}
12950 
12951   ins_pipe(istore_reg_reg);
12952 
12953 %}
12954 
12955 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12956 
12957   match(Set dst (MoveF2I src));
12958 
12959   effect(DEF dst, USE src);
12960 
12961   ins_cost(INSN_COST);
12962 
12963   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12964 
12965   ins_encode %{
12966     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12967   %}
12968 
12969   ins_pipe(pipe_class_memory);
12970 
12971 %}
12972 
12973 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12974 
12975   match(Set dst (MoveI2F src));
12976 
12977   effect(DEF dst, USE src);
12978 
12979   ins_cost(INSN_COST);
12980 
12981   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12982 
12983   ins_encode %{
12984     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12985   %}
12986 
12987   ins_pipe(pipe_class_memory);
12988 
12989 %}
12990 
12991 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12992 
12993   match(Set dst (MoveD2L src));
12994 
12995   effect(DEF dst, USE src);
12996 
12997   ins_cost(INSN_COST);
12998 
12999   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13000 
13001   ins_encode %{
13002     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13003   %}
13004 
13005   ins_pipe(pipe_class_memory);
13006 
13007 %}
13008 
13009 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13010 
13011   match(Set dst (MoveL2D src));
13012 
13013   effect(DEF dst, USE src);
13014 
13015   ins_cost(INSN_COST);
13016 
13017   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13018 
13019   ins_encode %{
13020     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13021   %}
13022 
13023   ins_pipe(pipe_class_memory);
13024 
13025 %}
13026 
13027 // ============================================================================
13028 // clearing of an array
13029 
13030 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13031 %{
13032   match(Set dummy (ClearArray cnt base));
13033   effect(USE_KILL cnt, USE_KILL base);
13034 
13035   ins_cost(4 * INSN_COST);
13036   format %{ "ClearArray $cnt, $base" %}
13037 
13038   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13039 
13040   ins_pipe(pipe_class_memory);
13041 %}
13042 
13043 // ============================================================================
13044 // Overflow Math Instructions
13045 
13046 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13047 %{
13048   match(Set cr (OverflowAddI op1 op2));
13049 
13050   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13051   ins_cost(INSN_COST);
13052   ins_encode %{
13053     __ cmnw($op1$$Register, $op2$$Register);
13054   %}
13055 
13056   ins_pipe(icmp_reg_reg);
13057 %}
13058 
13059 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13060 %{
13061   match(Set cr (OverflowAddI op1 op2));
13062 
13063   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13064   ins_cost(INSN_COST);
13065   ins_encode %{
13066     __ cmnw($op1$$Register, $op2$$constant);
13067   %}
13068 
13069   ins_pipe(icmp_reg_imm);
13070 %}
13071 
13072 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13073 %{
13074   match(Set cr (OverflowAddL op1 op2));
13075 
13076   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13077   ins_cost(INSN_COST);
13078   ins_encode %{
13079     __ cmn($op1$$Register, $op2$$Register);
13080   %}
13081 
13082   ins_pipe(icmp_reg_reg);
13083 %}
13084 
13085 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13086 %{
13087   match(Set cr (OverflowAddL op1 op2));
13088 
13089   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13090   ins_cost(INSN_COST);
13091   ins_encode %{
13092     __ cmn($op1$$Register, $op2$$constant);
13093   %}
13094 
13095   ins_pipe(icmp_reg_imm);
13096 %}
13097 
13098 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13099 %{
13100   match(Set cr (OverflowSubI op1 op2));
13101 
13102   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13103   ins_cost(INSN_COST);
13104   ins_encode %{
13105     __ cmpw($op1$$Register, $op2$$Register);
13106   %}
13107 
13108   ins_pipe(icmp_reg_reg);
13109 %}
13110 
13111 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13112 %{
13113   match(Set cr (OverflowSubI op1 op2));
13114 
13115   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13116   ins_cost(INSN_COST);
13117   ins_encode %{
13118     __ cmpw($op1$$Register, $op2$$constant);
13119   %}
13120 
13121   ins_pipe(icmp_reg_imm);
13122 %}
13123 
13124 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13125 %{
13126   match(Set cr (OverflowSubL op1 op2));
13127 
13128   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13129   ins_cost(INSN_COST);
13130   ins_encode %{
13131     __ cmp($op1$$Register, $op2$$Register);
13132   %}
13133 
13134   ins_pipe(icmp_reg_reg);
13135 %}
13136 
13137 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13138 %{
13139   match(Set cr (OverflowSubL op1 op2));
13140 
13141   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13142   ins_cost(INSN_COST);
13143   ins_encode %{
13144     __ cmp($op1$$Register, $op2$$constant);
13145   %}
13146 
13147   ins_pipe(icmp_reg_imm);
13148 %}
13149 
13150 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13151 %{
13152   match(Set cr (OverflowSubI zero op1));
13153 
13154   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13155   ins_cost(INSN_COST);
13156   ins_encode %{
13157     __ cmpw(zr, $op1$$Register);
13158   %}
13159 
13160   ins_pipe(icmp_reg_imm);
13161 %}
13162 
13163 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13164 %{
13165   match(Set cr (OverflowSubL zero op1));
13166 
13167   format %{ "cmp   zr, $op1\t# overflow check long" %}
13168   ins_cost(INSN_COST);
13169   ins_encode %{
13170     __ cmp(zr, $op1$$Register);
13171   %}
13172 
13173   ins_pipe(icmp_reg_imm);
13174 %}
13175 
13176 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13177 %{
13178   match(Set cr (OverflowMulI op1 op2));
13179 
13180   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13181             "cmp   rscratch1, rscratch1, sxtw\n\t"
13182             "movw  rscratch1, #0x80000000\n\t"
13183             "cselw rscratch1, rscratch1, zr, NE\n\t"
13184             "cmpw  rscratch1, #1" %}
13185   ins_cost(5 * INSN_COST);
13186   ins_encode %{
13187     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13188     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13189     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13190     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13191     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13192   %}
13193 
13194   ins_pipe(pipe_slow);
13195 %}
13196 
13197 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13198 %{
13199   match(If cmp (OverflowMulI op1 op2));
13200   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13201             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13202   effect(USE labl, KILL cr);
13203 
13204   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13205             "cmp   rscratch1, rscratch1, sxtw\n\t"
13206             "b$cmp   $labl" %}
13207   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13208   ins_encode %{
13209     Label* L = $labl$$label;
13210     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13211     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13212     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13213     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13214   %}
13215 
13216   ins_pipe(pipe_serial);
13217 %}
13218 
13219 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13220 %{
13221   match(Set cr (OverflowMulL op1 op2));
13222 
13223   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13224             "smulh rscratch2, $op1, $op2\n\t"
13225             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13226             "movw  rscratch1, #0x80000000\n\t"
13227             "cselw rscratch1, rscratch1, zr, NE\n\t"
13228             "cmpw  rscratch1, #1" %}
13229   ins_cost(6 * INSN_COST);
13230   ins_encode %{
13231     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13232     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13233     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13234     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13235     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13236     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13237   %}
13238 
13239   ins_pipe(pipe_slow);
13240 %}
13241 
13242 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13243 %{
13244   match(If cmp (OverflowMulL op1 op2));
13245   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13246             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13247   effect(USE labl, KILL cr);
13248 
13249   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13250             "smulh rscratch2, $op1, $op2\n\t"
13251             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13252             "b$cmp $labl" %}
13253   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13254   ins_encode %{
13255     Label* L = $labl$$label;
13256     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13257     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13258     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13259     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13260     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13261   %}
13262 
13263   ins_pipe(pipe_serial);
13264 %}
13265 
13266 // ============================================================================
13267 // Compare Instructions
13268 
13269 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13270 %{
13271   match(Set cr (CmpI op1 op2));
13272 
13273   effect(DEF cr, USE op1, USE op2);
13274 
13275   ins_cost(INSN_COST);
13276   format %{ "cmpw  $op1, $op2" %}
13277 
13278   ins_encode(aarch64_enc_cmpw(op1, op2));
13279 
13280   ins_pipe(icmp_reg_reg);
13281 %}
13282 
13283 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13284 %{
13285   match(Set cr (CmpI op1 zero));
13286 
13287   effect(DEF cr, USE op1);
13288 
13289   ins_cost(INSN_COST);
13290   format %{ "cmpw $op1, 0" %}
13291 
13292   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13293 
13294   ins_pipe(icmp_reg_imm);
13295 %}
13296 
13297 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13298 %{
13299   match(Set cr (CmpI op1 op2));
13300 
13301   effect(DEF cr, USE op1);
13302 
13303   ins_cost(INSN_COST);
13304   format %{ "cmpw  $op1, $op2" %}
13305 
13306   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13307 
13308   ins_pipe(icmp_reg_imm);
13309 %}
13310 
13311 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13312 %{
13313   match(Set cr (CmpI op1 op2));
13314 
13315   effect(DEF cr, USE op1);
13316 
13317   ins_cost(INSN_COST * 2);
13318   format %{ "cmpw  $op1, $op2" %}
13319 
13320   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13321 
13322   ins_pipe(icmp_reg_imm);
13323 %}
13324 
13325 // Unsigned compare Instructions; really, same as signed compare
13326 // except it should only be used to feed an If or a CMovI which takes a
13327 // cmpOpU.
13328 
13329 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13330 %{
13331   match(Set cr (CmpU op1 op2));
13332 
13333   effect(DEF cr, USE op1, USE op2);
13334 
13335   ins_cost(INSN_COST);
13336   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13337 
13338   ins_encode(aarch64_enc_cmpw(op1, op2));
13339 
13340   ins_pipe(icmp_reg_reg);
13341 %}
13342 
13343 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13344 %{
13345   match(Set cr (CmpU op1 zero));
13346 
13347   effect(DEF cr, USE op1);
13348 
13349   ins_cost(INSN_COST);
13350   format %{ "cmpw $op1, #0\t# unsigned" %}
13351 
13352   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13353 
13354   ins_pipe(icmp_reg_imm);
13355 %}
13356 
13357 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13358 %{
13359   match(Set cr (CmpU op1 op2));
13360 
13361   effect(DEF cr, USE op1);
13362 
13363   ins_cost(INSN_COST);
13364   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13365 
13366   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13367 
13368   ins_pipe(icmp_reg_imm);
13369 %}
13370 
13371 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13372 %{
13373   match(Set cr (CmpU op1 op2));
13374 
13375   effect(DEF cr, USE op1);
13376 
13377   ins_cost(INSN_COST * 2);
13378   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13379 
13380   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13381 
13382   ins_pipe(icmp_reg_imm);
13383 %}
13384 
13385 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13386 %{
13387   match(Set cr (CmpL op1 op2));
13388 
13389   effect(DEF cr, USE op1, USE op2);
13390 
13391   ins_cost(INSN_COST);
13392   format %{ "cmp  $op1, $op2" %}
13393 
13394   ins_encode(aarch64_enc_cmp(op1, op2));
13395 
13396   ins_pipe(icmp_reg_reg);
13397 %}
13398 
13399 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13400 %{
13401   match(Set cr (CmpL op1 zero));
13402 
13403   effect(DEF cr, USE op1);
13404 
13405   ins_cost(INSN_COST);
13406   format %{ "tst  $op1" %}
13407 
13408   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13409 
13410   ins_pipe(icmp_reg_imm);
13411 %}
13412 
13413 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13414 %{
13415   match(Set cr (CmpL op1 op2));
13416 
13417   effect(DEF cr, USE op1);
13418 
13419   ins_cost(INSN_COST);
13420   format %{ "cmp  $op1, $op2" %}
13421 
13422   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13423 
13424   ins_pipe(icmp_reg_imm);
13425 %}
13426 
13427 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13428 %{
13429   match(Set cr (CmpL op1 op2));
13430 
13431   effect(DEF cr, USE op1);
13432 
13433   ins_cost(INSN_COST * 2);
13434   format %{ "cmp  $op1, $op2" %}
13435 
13436   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13437 
13438   ins_pipe(icmp_reg_imm);
13439 %}
13440 
13441 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13442 %{
13443   match(Set cr (CmpP op1 op2));
13444 
13445   effect(DEF cr, USE op1, USE op2);
13446 
13447   ins_cost(INSN_COST);
13448   format %{ "cmp  $op1, $op2\t // ptr" %}
13449 
13450   ins_encode(aarch64_enc_cmpp(op1, op2));
13451 
13452   ins_pipe(icmp_reg_reg);
13453 %}
13454 
13455 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13456 %{
13457   match(Set cr (CmpN op1 op2));
13458 
13459   effect(DEF cr, USE op1, USE op2);
13460 
13461   ins_cost(INSN_COST);
13462   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13463 
13464   ins_encode(aarch64_enc_cmpn(op1, op2));
13465 
13466   ins_pipe(icmp_reg_reg);
13467 %}
13468 
13469 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13470 %{
13471   match(Set cr (CmpP op1 zero));
13472 
13473   effect(DEF cr, USE op1, USE zero);
13474 
13475   ins_cost(INSN_COST);
13476   format %{ "cmp  $op1, 0\t // ptr" %}
13477 
13478   ins_encode(aarch64_enc_testp(op1));
13479 
13480   ins_pipe(icmp_reg_imm);
13481 %}
13482 
13483 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13484 %{
13485   match(Set cr (CmpN op1 zero));
13486 
13487   effect(DEF cr, USE op1, USE zero);
13488 
13489   ins_cost(INSN_COST);
13490   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13491 
13492   ins_encode(aarch64_enc_testn(op1));
13493 
13494   ins_pipe(icmp_reg_imm);
13495 %}
13496 
13497 // FP comparisons
13498 //
13499 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13500 // using normal cmpOp. See declaration of rFlagsReg for details.
13501 
13502 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13503 %{
13504   match(Set cr (CmpF src1 src2));
13505 
13506   ins_cost(3 * INSN_COST);
13507   format %{ "fcmps $src1, $src2" %}
13508 
13509   ins_encode %{
13510     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13511   %}
13512 
13513   ins_pipe(pipe_class_compare);
13514 %}
13515 
13516 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13517 %{
13518   match(Set cr (CmpF src1 src2));
13519 
13520   ins_cost(3 * INSN_COST);
13521   format %{ "fcmps $src1, 0.0" %}
13522 
13523   ins_encode %{
13524     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13525   %}
13526 
13527   ins_pipe(pipe_class_compare);
13528 %}
13529 // FROM HERE
13530 
13531 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13532 %{
13533   match(Set cr (CmpD src1 src2));
13534 
13535   ins_cost(3 * INSN_COST);
13536   format %{ "fcmpd $src1, $src2" %}
13537 
13538   ins_encode %{
13539     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13540   %}
13541 
13542   ins_pipe(pipe_class_compare);
13543 %}
13544 
13545 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13546 %{
13547   match(Set cr (CmpD src1 src2));
13548 
13549   ins_cost(3 * INSN_COST);
13550   format %{ "fcmpd $src1, 0.0" %}
13551 
13552   ins_encode %{
13553     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13554   %}
13555 
13556   ins_pipe(pipe_class_compare);
13557 %}
13558 
13559 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13560 %{
13561   match(Set dst (CmpF3 src1 src2));
13562   effect(KILL cr);
13563 
13564   ins_cost(5 * INSN_COST);
13565   format %{ "fcmps $src1, $src2\n\t"
13566             "csinvw($dst, zr, zr, eq\n\t"
13567             "csnegw($dst, $dst, $dst, lt)"
13568   %}
13569 
13570   ins_encode %{
13571     Label done;
13572     FloatRegister s1 = as_FloatRegister($src1$$reg);
13573     FloatRegister s2 = as_FloatRegister($src2$$reg);
13574     Register d = as_Register($dst$$reg);
13575     __ fcmps(s1, s2);
13576     // installs 0 if EQ else -1
13577     __ csinvw(d, zr, zr, Assembler::EQ);
13578     // keeps -1 if less or unordered else installs 1
13579     __ csnegw(d, d, d, Assembler::LT);
13580     __ bind(done);
13581   %}
13582 
13583   ins_pipe(pipe_class_default);
13584 
13585 %}
13586 
13587 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13588 %{
13589   match(Set dst (CmpD3 src1 src2));
13590   effect(KILL cr);
13591 
13592   ins_cost(5 * INSN_COST);
13593   format %{ "fcmpd $src1, $src2\n\t"
13594             "csinvw($dst, zr, zr, eq\n\t"
13595             "csnegw($dst, $dst, $dst, lt)"
13596   %}
13597 
13598   ins_encode %{
13599     Label done;
13600     FloatRegister s1 = as_FloatRegister($src1$$reg);
13601     FloatRegister s2 = as_FloatRegister($src2$$reg);
13602     Register d = as_Register($dst$$reg);
13603     __ fcmpd(s1, s2);
13604     // installs 0 if EQ else -1
13605     __ csinvw(d, zr, zr, Assembler::EQ);
13606     // keeps -1 if less or unordered else installs 1
13607     __ csnegw(d, d, d, Assembler::LT);
13608     __ bind(done);
13609   %}
13610   ins_pipe(pipe_class_default);
13611 
13612 %}
13613 
13614 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13615 %{
13616   match(Set dst (CmpF3 src1 zero));
13617   effect(KILL cr);
13618 
13619   ins_cost(5 * INSN_COST);
13620   format %{ "fcmps $src1, 0.0\n\t"
13621             "csinvw($dst, zr, zr, eq\n\t"
13622             "csnegw($dst, $dst, $dst, lt)"
13623   %}
13624 
13625   ins_encode %{
13626     Label done;
13627     FloatRegister s1 = as_FloatRegister($src1$$reg);
13628     Register d = as_Register($dst$$reg);
13629     __ fcmps(s1, 0.0D);
13630     // installs 0 if EQ else -1
13631     __ csinvw(d, zr, zr, Assembler::EQ);
13632     // keeps -1 if less or unordered else installs 1
13633     __ csnegw(d, d, d, Assembler::LT);
13634     __ bind(done);
13635   %}
13636 
13637   ins_pipe(pipe_class_default);
13638 
13639 %}
13640 
13641 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13642 %{
13643   match(Set dst (CmpD3 src1 zero));
13644   effect(KILL cr);
13645 
13646   ins_cost(5 * INSN_COST);
13647   format %{ "fcmpd $src1, 0.0\n\t"
13648             "csinvw($dst, zr, zr, eq\n\t"
13649             "csnegw($dst, $dst, $dst, lt)"
13650   %}
13651 
13652   ins_encode %{
13653     Label done;
13654     FloatRegister s1 = as_FloatRegister($src1$$reg);
13655     Register d = as_Register($dst$$reg);
13656     __ fcmpd(s1, 0.0D);
13657     // installs 0 if EQ else -1
13658     __ csinvw(d, zr, zr, Assembler::EQ);
13659     // keeps -1 if less or unordered else installs 1
13660     __ csnegw(d, d, d, Assembler::LT);
13661     __ bind(done);
13662   %}
13663   ins_pipe(pipe_class_default);
13664 
13665 %}
13666 
13667 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13668 %{
13669   match(Set dst (CmpLTMask p q));
13670   effect(KILL cr);
13671 
13672   ins_cost(3 * INSN_COST);
13673 
13674   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13675             "csetw $dst, lt\n\t"
13676             "subw $dst, zr, $dst"
13677   %}
13678 
13679   ins_encode %{
13680     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13681     __ csetw(as_Register($dst$$reg), Assembler::LT);
13682     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13683   %}
13684 
13685   ins_pipe(ialu_reg_reg);
13686 %}
13687 
13688 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13689 %{
13690   match(Set dst (CmpLTMask src zero));
13691   effect(KILL cr);
13692 
13693   ins_cost(INSN_COST);
13694 
13695   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13696 
13697   ins_encode %{
13698     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13699   %}
13700 
13701   ins_pipe(ialu_reg_shift);
13702 %}
13703 
13704 // ============================================================================
13705 // Max and Min
13706 
13707 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13708 %{
13709   match(Set dst (MinI src1 src2));
13710 
13711   effect(DEF dst, USE src1, USE src2, KILL cr);
13712   size(8);
13713 
13714   ins_cost(INSN_COST * 3);
13715   format %{
13716     "cmpw $src1 $src2\t signed int\n\t"
13717     "cselw $dst, $src1, $src2 lt\t"
13718   %}
13719 
13720   ins_encode %{
13721     __ cmpw(as_Register($src1$$reg),
13722             as_Register($src2$$reg));
13723     __ cselw(as_Register($dst$$reg),
13724              as_Register($src1$$reg),
13725              as_Register($src2$$reg),
13726              Assembler::LT);
13727   %}
13728 
13729   ins_pipe(ialu_reg_reg);
13730 %}
13731 // FROM HERE
13732 
13733 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13734 %{
13735   match(Set dst (MaxI src1 src2));
13736 
13737   effect(DEF dst, USE src1, USE src2, KILL cr);
13738   size(8);
13739 
13740   ins_cost(INSN_COST * 3);
13741   format %{
13742     "cmpw $src1 $src2\t signed int\n\t"
13743     "cselw $dst, $src1, $src2 gt\t"
13744   %}
13745 
13746   ins_encode %{
13747     __ cmpw(as_Register($src1$$reg),
13748             as_Register($src2$$reg));
13749     __ cselw(as_Register($dst$$reg),
13750              as_Register($src1$$reg),
13751              as_Register($src2$$reg),
13752              Assembler::GT);
13753   %}
13754 
13755   ins_pipe(ialu_reg_reg);
13756 %}
13757 
13758 // ============================================================================
13759 // Branch Instructions
13760 
13761 // Direct Branch.
13762 instruct branch(label lbl)
13763 %{
13764   match(Goto);
13765 
13766   effect(USE lbl);
13767 
13768   ins_cost(BRANCH_COST);
13769   format %{ "b  $lbl" %}
13770 
13771   ins_encode(aarch64_enc_b(lbl));
13772 
13773   ins_pipe(pipe_branch);
13774 %}
13775 
13776 // Conditional Near Branch
13777 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13778 %{
13779   // Same match rule as `branchConFar'.
13780   match(If cmp cr);
13781 
13782   effect(USE lbl);
13783 
13784   ins_cost(BRANCH_COST);
13785   // If set to 1 this indicates that the current instruction is a
13786   // short variant of a long branch. This avoids using this
13787   // instruction in first-pass matching. It will then only be used in
13788   // the `Shorten_branches' pass.
13789   // ins_short_branch(1);
13790   format %{ "b$cmp  $lbl" %}
13791 
13792   ins_encode(aarch64_enc_br_con(cmp, lbl));
13793 
13794   ins_pipe(pipe_branch_cond);
13795 %}
13796 
13797 // Conditional Near Branch Unsigned
13798 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13799 %{
13800   // Same match rule as `branchConFar'.
13801   match(If cmp cr);
13802 
13803   effect(USE lbl);
13804 
13805   ins_cost(BRANCH_COST);
13806   // If set to 1 this indicates that the current instruction is a
13807   // short variant of a long branch. This avoids using this
13808   // instruction in first-pass matching. It will then only be used in
13809   // the `Shorten_branches' pass.
13810   // ins_short_branch(1);
13811   format %{ "b$cmp  $lbl\t# unsigned" %}
13812 
13813   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13814 
13815   ins_pipe(pipe_branch_cond);
13816 %}
13817 
13818 // Make use of CBZ and CBNZ.  These instructions, as well as being
13819 // shorter than (cmp; branch), have the additional benefit of not
13820 // killing the flags.
13821 
13822 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13823   match(If cmp (CmpI op1 op2));
13824   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13825             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13826   effect(USE labl);
13827 
13828   ins_cost(BRANCH_COST);
13829   format %{ "cbw$cmp   $op1, $labl" %}
13830   ins_encode %{
13831     Label* L = $labl$$label;
13832     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13833     if (cond == Assembler::EQ)
13834       __ cbzw($op1$$Register, *L);
13835     else
13836       __ cbnzw($op1$$Register, *L);
13837   %}
13838   ins_pipe(pipe_cmp_branch);
13839 %}
13840 
13841 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13842   match(If cmp (CmpL op1 op2));
13843   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13844             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13845   effect(USE labl);
13846 
13847   ins_cost(BRANCH_COST);
13848   format %{ "cb$cmp   $op1, $labl" %}
13849   ins_encode %{
13850     Label* L = $labl$$label;
13851     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13852     if (cond == Assembler::EQ)
13853       __ cbz($op1$$Register, *L);
13854     else
13855       __ cbnz($op1$$Register, *L);
13856   %}
13857   ins_pipe(pipe_cmp_branch);
13858 %}
13859 
13860 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13861   match(If cmp (CmpP op1 op2));
13862   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13863             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13864   effect(USE labl);
13865 
13866   ins_cost(BRANCH_COST);
13867   format %{ "cb$cmp   $op1, $labl" %}
13868   ins_encode %{
13869     Label* L = $labl$$label;
13870     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13871     if (cond == Assembler::EQ)
13872       __ cbz($op1$$Register, *L);
13873     else
13874       __ cbnz($op1$$Register, *L);
13875   %}
13876   ins_pipe(pipe_cmp_branch);
13877 %}
13878 
13879 // Conditional Far Branch
13880 // Conditional Far Branch Unsigned
13881 // TODO: fixme
13882 
13883 // counted loop end branch near
13884 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13885 %{
13886   match(CountedLoopEnd cmp cr);
13887 
13888   effect(USE lbl);
13889 
13890   ins_cost(BRANCH_COST);
13891   // short variant.
13892   // ins_short_branch(1);
13893   format %{ "b$cmp $lbl \t// counted loop end" %}
13894 
13895   ins_encode(aarch64_enc_br_con(cmp, lbl));
13896 
13897   ins_pipe(pipe_branch);
13898 %}
13899 
13900 // counted loop end branch near Unsigned
13901 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13902 %{
13903   match(CountedLoopEnd cmp cr);
13904 
13905   effect(USE lbl);
13906 
13907   ins_cost(BRANCH_COST);
13908   // short variant.
13909   // ins_short_branch(1);
13910   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13911 
13912   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13913 
13914   ins_pipe(pipe_branch);
13915 %}
13916 
13917 // counted loop end branch far
13918 // counted loop end branch far unsigned
13919 // TODO: fixme
13920 
13921 // ============================================================================
13922 // inlined locking and unlocking
13923 
13924 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13925 %{
13926   match(Set cr (FastLock object box));
13927   effect(TEMP tmp, TEMP tmp2);
13928 
13929   // TODO
13930   // identify correct cost
13931   ins_cost(5 * INSN_COST);
13932   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13933 
13934   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13935 
13936   ins_pipe(pipe_serial);
13937 %}
13938 
13939 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13940 %{
13941   match(Set cr (FastUnlock object box));
13942   effect(TEMP tmp, TEMP tmp2);
13943 
13944   ins_cost(5 * INSN_COST);
13945   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13946 
13947   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13948 
13949   ins_pipe(pipe_serial);
13950 %}
13951 
13952 
13953 // ============================================================================
13954 // Safepoint Instructions
13955 
13956 // TODO
13957 // provide a near and far version of this code
13958 
13959 instruct safePoint(iRegP poll)
13960 %{
13961   match(SafePoint poll);
13962 
13963   format %{
13964     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13965   %}
13966   ins_encode %{
13967     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13968   %}
13969   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13970 %}
13971 
13972 
13973 // ============================================================================
13974 // Procedure Call/Return Instructions
13975 
13976 // Call Java Static Instruction
13977 
13978 instruct CallStaticJavaDirect(method meth)
13979 %{
13980   match(CallStaticJava);
13981 
13982   effect(USE meth);
13983 
13984   ins_cost(CALL_COST);
13985 
13986   format %{ "call,static $meth \t// ==> " %}
13987 
13988   ins_encode( aarch64_enc_java_static_call(meth),
13989               aarch64_enc_call_epilog );
13990 
13991   ins_pipe(pipe_class_call);
13992 %}
13993 
13994 // TO HERE
13995 
13996 // Call Java Dynamic Instruction
13997 instruct CallDynamicJavaDirect(method meth)
13998 %{
13999   match(CallDynamicJava);
14000 
14001   effect(USE meth);
14002 
14003   ins_cost(CALL_COST);
14004 
14005   format %{ "CALL,dynamic $meth \t// ==> " %}
14006 
14007   ins_encode( aarch64_enc_java_dynamic_call(meth),
14008                aarch64_enc_call_epilog );
14009 
14010   ins_pipe(pipe_class_call);
14011 %}
14012 
14013 // Call Runtime Instruction
14014 
14015 instruct CallRuntimeDirect(method meth)
14016 %{
14017   match(CallRuntime);
14018 
14019   effect(USE meth);
14020 
14021   ins_cost(CALL_COST);
14022 
14023   format %{ "CALL, runtime $meth" %}
14024 
14025   ins_encode( aarch64_enc_java_to_runtime(meth) );
14026 
14027   ins_pipe(pipe_class_call);
14028 %}
14029 
14030 // Call Runtime Instruction
14031 
14032 instruct CallLeafDirect(method meth)
14033 %{
14034   match(CallLeaf);
14035 
14036   effect(USE meth);
14037 
14038   ins_cost(CALL_COST);
14039 
14040   format %{ "CALL, runtime leaf $meth" %}
14041 
14042   ins_encode( aarch64_enc_java_to_runtime(meth) );
14043 
14044   ins_pipe(pipe_class_call);
14045 %}
14046 
14047 // Call Runtime Instruction
14048 
14049 instruct CallLeafNoFPDirect(method meth)
14050 %{
14051   match(CallLeafNoFP);
14052 
14053   effect(USE meth);
14054 
14055   ins_cost(CALL_COST);
14056 
14057   format %{ "CALL, runtime leaf nofp $meth" %}
14058 
14059   ins_encode( aarch64_enc_java_to_runtime(meth) );
14060 
14061   ins_pipe(pipe_class_call);
14062 %}
14063 
14064 // Tail Call; Jump from runtime stub to Java code.
14065 // Also known as an 'interprocedural jump'.
14066 // Target of jump will eventually return to caller.
14067 // TailJump below removes the return address.
14068 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14069 %{
14070   match(TailCall jump_target method_oop);
14071 
14072   ins_cost(CALL_COST);
14073 
14074   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14075 
14076   ins_encode(aarch64_enc_tail_call(jump_target));
14077 
14078   ins_pipe(pipe_class_call);
14079 %}
14080 
14081 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14082 %{
14083   match(TailJump jump_target ex_oop);
14084 
14085   ins_cost(CALL_COST);
14086 
14087   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14088 
14089   ins_encode(aarch64_enc_tail_jmp(jump_target));
14090 
14091   ins_pipe(pipe_class_call);
14092 %}
14093 
14094 // Create exception oop: created by stack-crawling runtime code.
14095 // Created exception is now available to this handler, and is setup
14096 // just prior to jumping to this handler. No code emitted.
14097 // TODO check
14098 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14099 instruct CreateException(iRegP_R0 ex_oop)
14100 %{
14101   match(Set ex_oop (CreateEx));
14102 
14103   format %{ " -- \t// exception oop; no code emitted" %}
14104 
14105   size(0);
14106 
14107   ins_encode( /*empty*/ );
14108 
14109   ins_pipe(pipe_class_empty);
14110 %}
14111 
14112 // Rethrow exception: The exception oop will come in the first
14113 // argument position. Then JUMP (not call) to the rethrow stub code.
14114 instruct RethrowException() %{
14115   match(Rethrow);
14116   ins_cost(CALL_COST);
14117 
14118   format %{ "b rethrow_stub" %}
14119 
14120   ins_encode( aarch64_enc_rethrow() );
14121 
14122   ins_pipe(pipe_class_call);
14123 %}
14124 
14125 
14126 // Return Instruction
14127 // epilog node loads ret address into lr as part of frame pop
14128 instruct Ret()
14129 %{
14130   match(Return);
14131 
14132   format %{ "ret\t// return register" %}
14133 
14134   ins_encode( aarch64_enc_ret() );
14135 
14136   ins_pipe(pipe_branch);
14137 %}
14138 
14139 // Die now.
14140 instruct ShouldNotReachHere() %{
14141   match(Halt);
14142 
14143   ins_cost(CALL_COST);
14144   format %{ "ShouldNotReachHere" %}
14145 
14146   ins_encode %{
14147     // TODO
14148     // implement proper trap call here
14149     __ brk(999);
14150   %}
14151 
14152   ins_pipe(pipe_class_default);
14153 %}
14154 
14155 // ============================================================================
14156 // Partial Subtype Check
14157 //
14158 // superklass array for an instance of the superklass.  Set a hidden
14159 // internal cache on a hit (cache is checked with exposed code in
14160 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14161 // encoding ALSO sets flags.
14162 
14163 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14164 %{
14165   match(Set result (PartialSubtypeCheck sub super));
14166   effect(KILL cr, KILL temp);
14167 
14168   ins_cost(1100);  // slightly larger than the next version
14169   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14170 
14171   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14172 
14173   opcode(0x1); // Force zero of result reg on hit
14174 
14175   ins_pipe(pipe_class_memory);
14176 %}
14177 
14178 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14179 %{
14180   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14181   effect(KILL temp, KILL result);
14182 
14183   ins_cost(1100);  // slightly larger than the next version
14184   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14185 
14186   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14187 
14188   opcode(0x0); // Don't zero result reg on hit
14189 
14190   ins_pipe(pipe_class_memory);
14191 %}
14192 
14193 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14194                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14195 %{
14196   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14197   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14198 
14199   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14200   ins_encode %{
14201     __ string_compare($str1$$Register, $str2$$Register,
14202                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14203                       $tmp1$$Register);
14204   %}
14205   ins_pipe(pipe_class_memory);
14206 %}
14207 
14208 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14209        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14210 %{
14211   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14212   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14213          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14214   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14215 
14216   ins_encode %{
14217     __ string_indexof($str1$$Register, $str2$$Register,
14218                       $cnt1$$Register, $cnt2$$Register,
14219                       $tmp1$$Register, $tmp2$$Register,
14220                       $tmp3$$Register, $tmp4$$Register,
14221                       -1, $result$$Register);
14222   %}
14223   ins_pipe(pipe_class_memory);
14224 %}
14225 
14226 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14227                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14228                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14229 %{
14230   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14231   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14232          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14233   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14234 
14235   ins_encode %{
14236     int icnt2 = (int)$int_cnt2$$constant;
14237     __ string_indexof($str1$$Register, $str2$$Register,
14238                       $cnt1$$Register, zr,
14239                       $tmp1$$Register, $tmp2$$Register,
14240                       $tmp3$$Register, $tmp4$$Register,
14241                       icnt2, $result$$Register);
14242   %}
14243   ins_pipe(pipe_class_memory);
14244 %}
14245 
14246 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14247                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14248 %{
14249   match(Set result (StrEquals (Binary str1 str2) cnt));
14250   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14251 
14252   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14253   ins_encode %{
14254     __ string_equals($str1$$Register, $str2$$Register,
14255                       $cnt$$Register, $result$$Register,
14256                       $tmp$$Register);
14257   %}
14258   ins_pipe(pipe_class_memory);
14259 %}
14260 
14261 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14262                       iRegP_R10 tmp, rFlagsReg cr)
14263 %{
14264   match(Set result (AryEq ary1 ary2));
14265   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14266 
14267   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14268   ins_encode %{
14269     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14270                           $result$$Register, $tmp$$Register);
14271   %}
14272   ins_pipe(pipe_class_memory);
14273 %}
14274 
14275 // encode char[] to byte[] in ISO_8859_1
14276 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14277                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14278                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14279                           iRegI_R0 result, rFlagsReg cr)
14280 %{
14281   match(Set result (EncodeISOArray src (Binary dst len)));
14282   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14283          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14284 
14285   format %{ "Encode array $src,$dst,$len -> $result" %}
14286   ins_encode %{
14287     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14288          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14289          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14290   %}
14291   ins_pipe( pipe_class_memory );
14292 %}
14293 
14294 // ============================================================================
14295 // This name is KNOWN by the ADLC and cannot be changed.
14296 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14297 // for this guy.
14298 instruct tlsLoadP(thread_RegP dst)
14299 %{
14300   match(Set dst (ThreadLocal));
14301 
14302   ins_cost(0);
14303 
14304   format %{ " -- \t// $dst=Thread::current(), empty" %}
14305 
14306   size(0);
14307 
14308   ins_encode( /*empty*/ );
14309 
14310   ins_pipe(pipe_class_empty);
14311 %}
14312 
14313 // ====================VECTOR INSTRUCTIONS=====================================
14314 
14315 // Load vector (32 bits)
14316 instruct loadV4(vecD dst, vmem mem)
14317 %{
14318   predicate(n->as_LoadVector()->memory_size() == 4);
14319   match(Set dst (LoadVector mem));
14320   ins_cost(4 * INSN_COST);
14321   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14322   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14323   ins_pipe(pipe_class_memory);
14324 %}
14325 
14326 // Load vector (64 bits)
14327 instruct loadV8(vecD dst, vmem mem)
14328 %{
14329   predicate(n->as_LoadVector()->memory_size() == 8);
14330   match(Set dst (LoadVector mem));
14331   ins_cost(4 * INSN_COST);
14332   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14333   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14334   ins_pipe(pipe_class_memory);
14335 %}
14336 
14337 // Load Vector (128 bits)
14338 instruct loadV16(vecX dst, vmem mem)
14339 %{
14340   predicate(n->as_LoadVector()->memory_size() == 16);
14341   match(Set dst (LoadVector mem));
14342   ins_cost(4 * INSN_COST);
14343   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14344   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14345   ins_pipe(pipe_class_memory);
14346 %}
14347 
14348 // Store Vector (32 bits)
14349 instruct storeV4(vecD src, vmem mem)
14350 %{
14351   predicate(n->as_StoreVector()->memory_size() == 4);
14352   match(Set mem (StoreVector mem src));
14353   ins_cost(4 * INSN_COST);
14354   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14355   ins_encode( aarch64_enc_strvS(src, mem) );
14356   ins_pipe(pipe_class_memory);
14357 %}
14358 
14359 // Store Vector (64 bits)
14360 instruct storeV8(vecD src, vmem mem)
14361 %{
14362   predicate(n->as_StoreVector()->memory_size() == 8);
14363   match(Set mem (StoreVector mem src));
14364   ins_cost(4 * INSN_COST);
14365   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14366   ins_encode( aarch64_enc_strvD(src, mem) );
14367   ins_pipe(pipe_class_memory);
14368 %}
14369 
14370 // Store Vector (128 bits)
14371 instruct storeV16(vecX src, vmem mem)
14372 %{
14373   predicate(n->as_StoreVector()->memory_size() == 16);
14374   match(Set mem (StoreVector mem src));
14375   ins_cost(4 * INSN_COST);
14376   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14377   ins_encode( aarch64_enc_strvQ(src, mem) );
14378   ins_pipe(pipe_class_memory);
14379 %}
14380 
14381 instruct replicate8B(vecD dst, iRegIorL2I src)
14382 %{
14383   predicate(n->as_Vector()->length() == 4 ||
14384             n->as_Vector()->length() == 8);
14385   match(Set dst (ReplicateB src));
14386   ins_cost(INSN_COST);
14387   format %{ "dup  $dst, $src\t# vector (8B)" %}
14388   ins_encode %{
14389     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14390   %}
14391   ins_pipe(pipe_class_default);
14392 %}
14393 
14394 instruct replicate16B(vecX dst, iRegIorL2I src)
14395 %{
14396   predicate(n->as_Vector()->length() == 16);
14397   match(Set dst (ReplicateB src));
14398   ins_cost(INSN_COST);
14399   format %{ "dup  $dst, $src\t# vector (16B)" %}
14400   ins_encode %{
14401     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14402   %}
14403   ins_pipe(pipe_class_default);
14404 %}
14405 
14406 instruct replicate8B_imm(vecD dst, immI con)
14407 %{
14408   predicate(n->as_Vector()->length() == 4 ||
14409             n->as_Vector()->length() == 8);
14410   match(Set dst (ReplicateB con));
14411   ins_cost(INSN_COST);
14412   format %{ "movi  $dst, $con\t# vector(8B)" %}
14413   ins_encode %{
14414     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14415   %}
14416   ins_pipe(pipe_class_default);
14417 %}
14418 
14419 instruct replicate16B_imm(vecX dst, immI con)
14420 %{
14421   predicate(n->as_Vector()->length() == 16);
14422   match(Set dst (ReplicateB con));
14423   ins_cost(INSN_COST);
14424   format %{ "movi  $dst, $con\t# vector(16B)" %}
14425   ins_encode %{
14426     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14427   %}
14428   ins_pipe(pipe_class_default);
14429 %}
14430 
14431 instruct replicate4S(vecD dst, iRegIorL2I src)
14432 %{
14433   predicate(n->as_Vector()->length() == 2 ||
14434             n->as_Vector()->length() == 4);
14435   match(Set dst (ReplicateS src));
14436   ins_cost(INSN_COST);
14437   format %{ "dup  $dst, $src\t# vector (4S)" %}
14438   ins_encode %{
14439     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14440   %}
14441   ins_pipe(pipe_class_default);
14442 %}
14443 
14444 instruct replicate8S(vecX dst, iRegIorL2I src)
14445 %{
14446   predicate(n->as_Vector()->length() == 8);
14447   match(Set dst (ReplicateS src));
14448   ins_cost(INSN_COST);
14449   format %{ "dup  $dst, $src\t# vector (8S)" %}
14450   ins_encode %{
14451     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14452   %}
14453   ins_pipe(pipe_class_default);
14454 %}
14455 
14456 instruct replicate4S_imm(vecD dst, immI con)
14457 %{
14458   predicate(n->as_Vector()->length() == 2 ||
14459             n->as_Vector()->length() == 4);
14460   match(Set dst (ReplicateS con));
14461   ins_cost(INSN_COST);
14462   format %{ "movi  $dst, $con\t# vector(4H)" %}
14463   ins_encode %{
14464     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14465   %}
14466   ins_pipe(pipe_class_default);
14467 %}
14468 
14469 instruct replicate8S_imm(vecX dst, immI con)
14470 %{
14471   predicate(n->as_Vector()->length() == 8);
14472   match(Set dst (ReplicateS con));
14473   ins_cost(INSN_COST);
14474   format %{ "movi  $dst, $con\t# vector(8H)" %}
14475   ins_encode %{
14476     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14477   %}
14478   ins_pipe(pipe_class_default);
14479 %}
14480 
14481 instruct replicate2I(vecD dst, iRegIorL2I src)
14482 %{
14483   predicate(n->as_Vector()->length() == 2);
14484   match(Set dst (ReplicateI src));
14485   ins_cost(INSN_COST);
14486   format %{ "dup  $dst, $src\t# vector (2I)" %}
14487   ins_encode %{
14488     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14489   %}
14490   ins_pipe(pipe_class_default);
14491 %}
14492 
14493 instruct replicate4I(vecX dst, iRegIorL2I src)
14494 %{
14495   predicate(n->as_Vector()->length() == 4);
14496   match(Set dst (ReplicateI src));
14497   ins_cost(INSN_COST);
14498   format %{ "dup  $dst, $src\t# vector (4I)" %}
14499   ins_encode %{
14500     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14501   %}
14502   ins_pipe(pipe_class_default);
14503 %}
14504 
14505 instruct replicate2I_imm(vecD dst, immI con)
14506 %{
14507   predicate(n->as_Vector()->length() == 2);
14508   match(Set dst (ReplicateI con));
14509   ins_cost(INSN_COST);
14510   format %{ "movi  $dst, $con\t# vector(2I)" %}
14511   ins_encode %{
14512     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14513   %}
14514   ins_pipe(pipe_class_default);
14515 %}
14516 
14517 instruct replicate4I_imm(vecX dst, immI con)
14518 %{
14519   predicate(n->as_Vector()->length() == 4);
14520   match(Set dst (ReplicateI con));
14521   ins_cost(INSN_COST);
14522   format %{ "movi  $dst, $con\t# vector(4I)" %}
14523   ins_encode %{
14524     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14525   %}
14526   ins_pipe(pipe_class_default);
14527 %}
14528 
14529 instruct replicate2L(vecX dst, iRegL src)
14530 %{
14531   predicate(n->as_Vector()->length() == 2);
14532   match(Set dst (ReplicateL src));
14533   ins_cost(INSN_COST);
14534   format %{ "dup  $dst, $src\t# vector (2L)" %}
14535   ins_encode %{
14536     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14537   %}
14538   ins_pipe(pipe_class_default);
14539 %}
14540 
14541 instruct replicate2L_zero(vecX dst, immI0 zero)
14542 %{
14543   predicate(n->as_Vector()->length() == 2);
14544   match(Set dst (ReplicateI zero));
14545   ins_cost(INSN_COST);
14546   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14547   ins_encode %{
14548     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14549            as_FloatRegister($dst$$reg),
14550            as_FloatRegister($dst$$reg));
14551   %}
14552   ins_pipe(pipe_class_default);
14553 %}
14554 
14555 instruct replicate2F(vecD dst, vRegF src)
14556 %{
14557   predicate(n->as_Vector()->length() == 2);
14558   match(Set dst (ReplicateF src));
14559   ins_cost(INSN_COST);
14560   format %{ "dup  $dst, $src\t# vector (2F)" %}
14561   ins_encode %{
14562     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14563            as_FloatRegister($src$$reg));
14564   %}
14565   ins_pipe(pipe_class_default);
14566 %}
14567 
14568 instruct replicate4F(vecX dst, vRegF src)
14569 %{
14570   predicate(n->as_Vector()->length() == 4);
14571   match(Set dst (ReplicateF src));
14572   ins_cost(INSN_COST);
14573   format %{ "dup  $dst, $src\t# vector (4F)" %}
14574   ins_encode %{
14575     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14576            as_FloatRegister($src$$reg));
14577   %}
14578   ins_pipe(pipe_class_default);
14579 %}
14580 
14581 instruct replicate2D(vecX dst, vRegD src)
14582 %{
14583   predicate(n->as_Vector()->length() == 2);
14584   match(Set dst (ReplicateD src));
14585   ins_cost(INSN_COST);
14586   format %{ "dup  $dst, $src\t# vector (2D)" %}
14587   ins_encode %{
14588     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14589            as_FloatRegister($src$$reg));
14590   %}
14591   ins_pipe(pipe_class_default);
14592 %}
14593 
14594 // ====================REDUCTION ARITHMETIC====================================
14595 
14596 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14597 %{
14598   match(Set dst (AddReductionVI src1 src2));
14599   ins_cost(INSN_COST);
14600   effect(TEMP tmp, TEMP tmp2);
14601   format %{ "umov  $tmp, $src2, S, 0\n\t"
14602             "umov  $tmp2, $src2, S, 1\n\t"
14603             "addw  $dst, $src1, $tmp\n\t"
14604             "addw  $dst, $dst, $tmp2\t add reduction2i"
14605   %}
14606   ins_encode %{
14607     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14608     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14609     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14610     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14611   %}
14612   ins_pipe(pipe_class_default);
14613 %}
14614 
14615 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14616 %{
14617   match(Set dst (AddReductionVI src1 src2));
14618   ins_cost(INSN_COST);
14619   effect(TEMP tmp, TEMP tmp2);
14620   format %{ "addv  $tmp, T4S, $src2\n\t"
14621             "umov  $tmp2, $tmp, S, 0\n\t"
14622             "addw  $dst, $tmp2, $src1\t add reduction4i"
14623   %}
14624   ins_encode %{
14625     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14626             as_FloatRegister($src2$$reg));
14627     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14628     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14629   %}
14630   ins_pipe(pipe_class_default);
14631 %}
14632 
14633 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14634 %{
14635   match(Set dst (MulReductionVI src1 src2));
14636   ins_cost(INSN_COST);
14637   effect(TEMP tmp, TEMP dst);
14638   format %{ "umov  $tmp, $src2, S, 0\n\t"
14639             "mul   $dst, $tmp, $src1\n\t"
14640             "umov  $tmp, $src2, S, 1\n\t"
14641             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14642   %}
14643   ins_encode %{
14644     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14645     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14646     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14647     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14648   %}
14649   ins_pipe(pipe_class_default);
14650 %}
14651 
14652 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14653 %{
14654   match(Set dst (MulReductionVI src1 src2));
14655   ins_cost(INSN_COST);
14656   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14657   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14658             "mul   $tmp, $tmp, $src2\n\t"
14659             "umov  $tmp2, $tmp, S, 0\n\t"
14660             "mul   $dst, $tmp2, $src1\n\t"
14661             "umov  $tmp2, $tmp, S, 1\n\t"
14662             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14663   %}
14664   ins_encode %{
14665     __ ins(as_FloatRegister($tmp$$reg), __ D,
14666            as_FloatRegister($src2$$reg), 0, 1);
14667     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14668            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14669     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14670     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14671     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14672     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14673   %}
14674   ins_pipe(pipe_class_default);
14675 %}
14676 
14677 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14678 %{
14679   match(Set dst (AddReductionVF src1 src2));
14680   ins_cost(INSN_COST);
14681   effect(TEMP tmp, TEMP dst);
14682   format %{ "fadds $dst, $src1, $src2\n\t"
14683             "ins   $tmp, S, $src2, 0, 1\n\t"
14684             "fadds $dst, $dst, $tmp\t add reduction2f"
14685   %}
14686   ins_encode %{
14687     __ fadds(as_FloatRegister($dst$$reg),
14688              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14689     __ ins(as_FloatRegister($tmp$$reg), __ S,
14690            as_FloatRegister($src2$$reg), 0, 1);
14691     __ fadds(as_FloatRegister($dst$$reg),
14692              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14693   %}
14694   ins_pipe(pipe_class_default);
14695 %}
14696 
14697 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14698 %{
14699   match(Set dst (AddReductionVF src1 src2));
14700   ins_cost(INSN_COST);
14701   effect(TEMP tmp, TEMP dst);
14702   format %{ "fadds $dst, $src1, $src2\n\t"
14703             "ins   $tmp, S, $src2, 0, 1\n\t"
14704             "fadds $dst, $dst, $tmp\n\t"
14705             "ins   $tmp, S, $src2, 0, 2\n\t"
14706             "fadds $dst, $dst, $tmp\n\t"
14707             "ins   $tmp, S, $src2, 0, 3\n\t"
14708             "fadds $dst, $dst, $tmp\t add reduction4f"
14709   %}
14710   ins_encode %{
14711     __ fadds(as_FloatRegister($dst$$reg),
14712              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14713     __ ins(as_FloatRegister($tmp$$reg), __ S,
14714            as_FloatRegister($src2$$reg), 0, 1);
14715     __ fadds(as_FloatRegister($dst$$reg),
14716              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14717     __ ins(as_FloatRegister($tmp$$reg), __ S,
14718            as_FloatRegister($src2$$reg), 0, 2);
14719     __ fadds(as_FloatRegister($dst$$reg),
14720              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14721     __ ins(as_FloatRegister($tmp$$reg), __ S,
14722            as_FloatRegister($src2$$reg), 0, 3);
14723     __ fadds(as_FloatRegister($dst$$reg),
14724              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14725   %}
14726   ins_pipe(pipe_class_default);
14727 %}
14728 
14729 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14730 %{
14731   match(Set dst (MulReductionVF src1 src2));
14732   ins_cost(INSN_COST);
14733   effect(TEMP tmp, TEMP dst);
14734   format %{ "fmuls $dst, $src1, $src2\n\t"
14735             "ins   $tmp, S, $src2, 0, 1\n\t"
14736             "fmuls $dst, $dst, $tmp\t add reduction4f"
14737   %}
14738   ins_encode %{
14739     __ fmuls(as_FloatRegister($dst$$reg),
14740              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14741     __ ins(as_FloatRegister($tmp$$reg), __ S,
14742            as_FloatRegister($src2$$reg), 0, 1);
14743     __ fmuls(as_FloatRegister($dst$$reg),
14744              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14745   %}
14746   ins_pipe(pipe_class_default);
14747 %}
14748 
14749 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14750 %{
14751   match(Set dst (MulReductionVF src1 src2));
14752   ins_cost(INSN_COST);
14753   effect(TEMP tmp, TEMP dst);
14754   format %{ "fmuls $dst, $src1, $src2\n\t"
14755             "ins   $tmp, S, $src2, 0, 1\n\t"
14756             "fmuls $dst, $dst, $tmp\n\t"
14757             "ins   $tmp, S, $src2, 0, 2\n\t"
14758             "fmuls $dst, $dst, $tmp\n\t"
14759             "ins   $tmp, S, $src2, 0, 3\n\t"
14760             "fmuls $dst, $dst, $tmp\t add reduction4f"
14761   %}
14762   ins_encode %{
14763     __ fmuls(as_FloatRegister($dst$$reg),
14764              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14765     __ ins(as_FloatRegister($tmp$$reg), __ S,
14766            as_FloatRegister($src2$$reg), 0, 1);
14767     __ fmuls(as_FloatRegister($dst$$reg),
14768              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14769     __ ins(as_FloatRegister($tmp$$reg), __ S,
14770            as_FloatRegister($src2$$reg), 0, 2);
14771     __ fmuls(as_FloatRegister($dst$$reg),
14772              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14773     __ ins(as_FloatRegister($tmp$$reg), __ S,
14774            as_FloatRegister($src2$$reg), 0, 3);
14775     __ fmuls(as_FloatRegister($dst$$reg),
14776              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14777   %}
14778   ins_pipe(pipe_class_default);
14779 %}
14780 
14781 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14782 %{
14783   match(Set dst (AddReductionVD src1 src2));
14784   ins_cost(INSN_COST);
14785   effect(TEMP tmp, TEMP dst);
14786   format %{ "faddd $dst, $src1, $src2\n\t"
14787             "ins   $tmp, D, $src2, 0, 1\n\t"
14788             "faddd $dst, $dst, $tmp\t add reduction2d"
14789   %}
14790   ins_encode %{
14791     __ faddd(as_FloatRegister($dst$$reg),
14792              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14793     __ ins(as_FloatRegister($tmp$$reg), __ D,
14794            as_FloatRegister($src2$$reg), 0, 1);
14795     __ faddd(as_FloatRegister($dst$$reg),
14796              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14797   %}
14798   ins_pipe(pipe_class_default);
14799 %}
14800 
14801 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14802 %{
14803   match(Set dst (MulReductionVD src1 src2));
14804   ins_cost(INSN_COST);
14805   effect(TEMP tmp, TEMP dst);
14806   format %{ "fmuld $dst, $src1, $src2\n\t"
14807             "ins   $tmp, D, $src2, 0, 1\n\t"
14808             "fmuld $dst, $dst, $tmp\t add reduction2d"
14809   %}
14810   ins_encode %{
14811     __ fmuld(as_FloatRegister($dst$$reg),
14812              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14813     __ ins(as_FloatRegister($tmp$$reg), __ D,
14814            as_FloatRegister($src2$$reg), 0, 1);
14815     __ fmuld(as_FloatRegister($dst$$reg),
14816              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14817   %}
14818   ins_pipe(pipe_class_default);
14819 %}
14820 
14821 // ====================VECTOR ARITHMETIC=======================================
14822 
14823 // --------------------------------- ADD --------------------------------------
14824 
14825 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14826 %{
14827   predicate(n->as_Vector()->length() == 4 ||
14828             n->as_Vector()->length() == 8);
14829   match(Set dst (AddVB src1 src2));
14830   ins_cost(INSN_COST);
14831   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14832   ins_encode %{
14833     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14834             as_FloatRegister($src1$$reg),
14835             as_FloatRegister($src2$$reg));
14836   %}
14837   ins_pipe(pipe_class_default);
14838 %}
14839 
14840 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14841 %{
14842   predicate(n->as_Vector()->length() == 16);
14843   match(Set dst (AddVB src1 src2));
14844   ins_cost(INSN_COST);
14845   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14846   ins_encode %{
14847     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14848             as_FloatRegister($src1$$reg),
14849             as_FloatRegister($src2$$reg));
14850   %}
14851   ins_pipe(pipe_class_default);
14852 %}
14853 
14854 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14855 %{
14856   predicate(n->as_Vector()->length() == 2 ||
14857             n->as_Vector()->length() == 4);
14858   match(Set dst (AddVS src1 src2));
14859   ins_cost(INSN_COST);
14860   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14861   ins_encode %{
14862     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14863             as_FloatRegister($src1$$reg),
14864             as_FloatRegister($src2$$reg));
14865   %}
14866   ins_pipe(pipe_class_default);
14867 %}
14868 
14869 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14870 %{
14871   predicate(n->as_Vector()->length() == 8);
14872   match(Set dst (AddVS src1 src2));
14873   ins_cost(INSN_COST);
14874   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14875   ins_encode %{
14876     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14877             as_FloatRegister($src1$$reg),
14878             as_FloatRegister($src2$$reg));
14879   %}
14880   ins_pipe(pipe_class_default);
14881 %}
14882 
14883 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14884 %{
14885   predicate(n->as_Vector()->length() == 2);
14886   match(Set dst (AddVI src1 src2));
14887   ins_cost(INSN_COST);
14888   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14889   ins_encode %{
14890     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14891             as_FloatRegister($src1$$reg),
14892             as_FloatRegister($src2$$reg));
14893   %}
14894   ins_pipe(pipe_class_default);
14895 %}
14896 
14897 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14898 %{
14899   predicate(n->as_Vector()->length() == 4);
14900   match(Set dst (AddVI src1 src2));
14901   ins_cost(INSN_COST);
14902   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14903   ins_encode %{
14904     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14905             as_FloatRegister($src1$$reg),
14906             as_FloatRegister($src2$$reg));
14907   %}
14908   ins_pipe(pipe_class_default);
14909 %}
14910 
14911 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14912 %{
14913   predicate(n->as_Vector()->length() == 2);
14914   match(Set dst (AddVL src1 src2));
14915   ins_cost(INSN_COST);
14916   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14917   ins_encode %{
14918     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14919             as_FloatRegister($src1$$reg),
14920             as_FloatRegister($src2$$reg));
14921   %}
14922   ins_pipe(pipe_class_default);
14923 %}
14924 
14925 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14926 %{
14927   predicate(n->as_Vector()->length() == 2);
14928   match(Set dst (AddVF src1 src2));
14929   ins_cost(INSN_COST);
14930   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14931   ins_encode %{
14932     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14933             as_FloatRegister($src1$$reg),
14934             as_FloatRegister($src2$$reg));
14935   %}
14936   ins_pipe(pipe_class_default);
14937 %}
14938 
14939 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14940 %{
14941   predicate(n->as_Vector()->length() == 4);
14942   match(Set dst (AddVF src1 src2));
14943   ins_cost(INSN_COST);
14944   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14945   ins_encode %{
14946     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14947             as_FloatRegister($src1$$reg),
14948             as_FloatRegister($src2$$reg));
14949   %}
14950   ins_pipe(pipe_class_default);
14951 %}
14952 
14953 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14954 %{
14955   match(Set dst (AddVD src1 src2));
14956   ins_cost(INSN_COST);
14957   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14958   ins_encode %{
14959     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14960             as_FloatRegister($src1$$reg),
14961             as_FloatRegister($src2$$reg));
14962   %}
14963   ins_pipe(pipe_class_default);
14964 %}
14965 
14966 // --------------------------------- SUB --------------------------------------
14967 
14968 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14969 %{
14970   predicate(n->as_Vector()->length() == 4 ||
14971             n->as_Vector()->length() == 8);
14972   match(Set dst (SubVB src1 src2));
14973   ins_cost(INSN_COST);
14974   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14975   ins_encode %{
14976     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14977             as_FloatRegister($src1$$reg),
14978             as_FloatRegister($src2$$reg));
14979   %}
14980   ins_pipe(pipe_class_default);
14981 %}
14982 
14983 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14984 %{
14985   predicate(n->as_Vector()->length() == 16);
14986   match(Set dst (SubVB src1 src2));
14987   ins_cost(INSN_COST);
14988   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14989   ins_encode %{
14990     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14991             as_FloatRegister($src1$$reg),
14992             as_FloatRegister($src2$$reg));
14993   %}
14994   ins_pipe(pipe_class_default);
14995 %}
14996 
14997 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14998 %{
14999   predicate(n->as_Vector()->length() == 2 ||
15000             n->as_Vector()->length() == 4);
15001   match(Set dst (SubVS src1 src2));
15002   ins_cost(INSN_COST);
15003   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15004   ins_encode %{
15005     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15006             as_FloatRegister($src1$$reg),
15007             as_FloatRegister($src2$$reg));
15008   %}
15009   ins_pipe(pipe_class_default);
15010 %}
15011 
15012 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15013 %{
15014   predicate(n->as_Vector()->length() == 8);
15015   match(Set dst (SubVS src1 src2));
15016   ins_cost(INSN_COST);
15017   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15018   ins_encode %{
15019     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15020             as_FloatRegister($src1$$reg),
15021             as_FloatRegister($src2$$reg));
15022   %}
15023   ins_pipe(pipe_class_default);
15024 %}
15025 
15026 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15027 %{
15028   predicate(n->as_Vector()->length() == 2);
15029   match(Set dst (SubVI src1 src2));
15030   ins_cost(INSN_COST);
15031   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15032   ins_encode %{
15033     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15034             as_FloatRegister($src1$$reg),
15035             as_FloatRegister($src2$$reg));
15036   %}
15037   ins_pipe(pipe_class_default);
15038 %}
15039 
15040 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15041 %{
15042   predicate(n->as_Vector()->length() == 4);
15043   match(Set dst (SubVI src1 src2));
15044   ins_cost(INSN_COST);
15045   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15046   ins_encode %{
15047     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15048             as_FloatRegister($src1$$reg),
15049             as_FloatRegister($src2$$reg));
15050   %}
15051   ins_pipe(pipe_class_default);
15052 %}
15053 
15054 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15055 %{
15056   predicate(n->as_Vector()->length() == 2);
15057   match(Set dst (SubVL src1 src2));
15058   ins_cost(INSN_COST);
15059   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15060   ins_encode %{
15061     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15062             as_FloatRegister($src1$$reg),
15063             as_FloatRegister($src2$$reg));
15064   %}
15065   ins_pipe(pipe_class_default);
15066 %}
15067 
15068 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15069 %{
15070   predicate(n->as_Vector()->length() == 2);
15071   match(Set dst (SubVF src1 src2));
15072   ins_cost(INSN_COST);
15073   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15074   ins_encode %{
15075     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15076             as_FloatRegister($src1$$reg),
15077             as_FloatRegister($src2$$reg));
15078   %}
15079   ins_pipe(pipe_class_default);
15080 %}
15081 
15082 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15083 %{
15084   predicate(n->as_Vector()->length() == 4);
15085   match(Set dst (SubVF src1 src2));
15086   ins_cost(INSN_COST);
15087   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15088   ins_encode %{
15089     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15090             as_FloatRegister($src1$$reg),
15091             as_FloatRegister($src2$$reg));
15092   %}
15093   ins_pipe(pipe_class_default);
15094 %}
15095 
15096 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15097 %{
15098   predicate(n->as_Vector()->length() == 2);
15099   match(Set dst (SubVD src1 src2));
15100   ins_cost(INSN_COST);
15101   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15102   ins_encode %{
15103     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15104             as_FloatRegister($src1$$reg),
15105             as_FloatRegister($src2$$reg));
15106   %}
15107   ins_pipe(pipe_class_default);
15108 %}
15109 
15110 // --------------------------------- MUL --------------------------------------
15111 
15112 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15113 %{
15114   predicate(n->as_Vector()->length() == 2 ||
15115             n->as_Vector()->length() == 4);
15116   match(Set dst (MulVS src1 src2));
15117   ins_cost(INSN_COST);
15118   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15119   ins_encode %{
15120     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15121             as_FloatRegister($src1$$reg),
15122             as_FloatRegister($src2$$reg));
15123   %}
15124   ins_pipe(pipe_class_default);
15125 %}
15126 
15127 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15128 %{
15129   predicate(n->as_Vector()->length() == 8);
15130   match(Set dst (MulVS src1 src2));
15131   ins_cost(INSN_COST);
15132   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15133   ins_encode %{
15134     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15135             as_FloatRegister($src1$$reg),
15136             as_FloatRegister($src2$$reg));
15137   %}
15138   ins_pipe(pipe_class_default);
15139 %}
15140 
15141 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15142 %{
15143   predicate(n->as_Vector()->length() == 2);
15144   match(Set dst (MulVI src1 src2));
15145   ins_cost(INSN_COST);
15146   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15147   ins_encode %{
15148     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15149             as_FloatRegister($src1$$reg),
15150             as_FloatRegister($src2$$reg));
15151   %}
15152   ins_pipe(pipe_class_default);
15153 %}
15154 
15155 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15156 %{
15157   predicate(n->as_Vector()->length() == 4);
15158   match(Set dst (MulVI src1 src2));
15159   ins_cost(INSN_COST);
15160   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15161   ins_encode %{
15162     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15163             as_FloatRegister($src1$$reg),
15164             as_FloatRegister($src2$$reg));
15165   %}
15166   ins_pipe(pipe_class_default);
15167 %}
15168 
15169 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15170 %{
15171   predicate(n->as_Vector()->length() == 2);
15172   match(Set dst (MulVF src1 src2));
15173   ins_cost(INSN_COST);
15174   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15175   ins_encode %{
15176     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15177             as_FloatRegister($src1$$reg),
15178             as_FloatRegister($src2$$reg));
15179   %}
15180   ins_pipe(pipe_class_default);
15181 %}
15182 
15183 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15184 %{
15185   predicate(n->as_Vector()->length() == 4);
15186   match(Set dst (MulVF src1 src2));
15187   ins_cost(INSN_COST);
15188   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15189   ins_encode %{
15190     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15191             as_FloatRegister($src1$$reg),
15192             as_FloatRegister($src2$$reg));
15193   %}
15194   ins_pipe(pipe_class_default);
15195 %}
15196 
15197 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15198 %{
15199   predicate(n->as_Vector()->length() == 2);
15200   match(Set dst (MulVD src1 src2));
15201   ins_cost(INSN_COST);
15202   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15203   ins_encode %{
15204     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15205             as_FloatRegister($src1$$reg),
15206             as_FloatRegister($src2$$reg));
15207   %}
15208   ins_pipe(pipe_class_default);
15209 %}
15210 
15211 // --------------------------------- DIV --------------------------------------
15212 
15213 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15214 %{
15215   predicate(n->as_Vector()->length() == 2);
15216   match(Set dst (DivVF src1 src2));
15217   ins_cost(INSN_COST);
15218   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15219   ins_encode %{
15220     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15221             as_FloatRegister($src1$$reg),
15222             as_FloatRegister($src2$$reg));
15223   %}
15224   ins_pipe(pipe_class_default);
15225 %}
15226 
15227 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15228 %{
15229   predicate(n->as_Vector()->length() == 4);
15230   match(Set dst (DivVF src1 src2));
15231   ins_cost(INSN_COST);
15232   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15233   ins_encode %{
15234     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15235             as_FloatRegister($src1$$reg),
15236             as_FloatRegister($src2$$reg));
15237   %}
15238   ins_pipe(pipe_class_default);
15239 %}
15240 
15241 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15242 %{
15243   predicate(n->as_Vector()->length() == 2);
15244   match(Set dst (DivVD src1 src2));
15245   ins_cost(INSN_COST);
15246   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15247   ins_encode %{
15248     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15249             as_FloatRegister($src1$$reg),
15250             as_FloatRegister($src2$$reg));
15251   %}
15252   ins_pipe(pipe_class_default);
15253 %}
15254 
15255 // --------------------------------- AND --------------------------------------
15256 
15257 instruct vand8B(vecD dst, vecD src1, vecD src2)
15258 %{
15259   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15260             n->as_Vector()->length_in_bytes() == 8);
15261   match(Set dst (AndV src1 src2));
15262   ins_cost(INSN_COST);
15263   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15264   ins_encode %{
15265     __ andr(as_FloatRegister($dst$$reg), __ T8B,
15266             as_FloatRegister($src1$$reg),
15267             as_FloatRegister($src2$$reg));
15268   %}
15269   ins_pipe(pipe_class_default);
15270 %}
15271 
15272 instruct vand16B(vecX dst, vecX src1, vecX src2)
15273 %{
15274   predicate(n->as_Vector()->length_in_bytes() == 16);
15275   match(Set dst (AndV src1 src2));
15276   ins_cost(INSN_COST);
15277   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
15278   ins_encode %{
15279     __ andr(as_FloatRegister($dst$$reg), __ T16B,
15280             as_FloatRegister($src1$$reg),
15281             as_FloatRegister($src2$$reg));
15282   %}
15283   ins_pipe(pipe_class_default);
15284 %}
15285 
15286 // --------------------------------- OR ---------------------------------------
15287 
15288 instruct vor8B(vecD dst, vecD src1, vecD src2)
15289 %{
15290   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15291             n->as_Vector()->length_in_bytes() == 8);
15292   match(Set dst (OrV src1 src2));
15293   ins_cost(INSN_COST);
15294   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15295   ins_encode %{
15296     __ orr(as_FloatRegister($dst$$reg), __ T8B,
15297             as_FloatRegister($src1$$reg),
15298             as_FloatRegister($src2$$reg));
15299   %}
15300   ins_pipe(pipe_class_default);
15301 %}
15302 
15303 instruct vor16B(vecX dst, vecX src1, vecX src2)
15304 %{
15305   predicate(n->as_Vector()->length_in_bytes() == 16);
15306   match(Set dst (OrV src1 src2));
15307   ins_cost(INSN_COST);
15308   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
15309   ins_encode %{
15310     __ orr(as_FloatRegister($dst$$reg), __ T16B,
15311             as_FloatRegister($src1$$reg),
15312             as_FloatRegister($src2$$reg));
15313   %}
15314   ins_pipe(pipe_class_default);
15315 %}
15316 
15317 // --------------------------------- XOR --------------------------------------
15318 
15319 instruct vxor8B(vecD dst, vecD src1, vecD src2)
15320 %{
15321   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15322             n->as_Vector()->length_in_bytes() == 8);
15323   match(Set dst (XorV src1 src2));
15324   ins_cost(INSN_COST);
15325   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
15326   ins_encode %{
15327     __ eor(as_FloatRegister($dst$$reg), __ T8B,
15328             as_FloatRegister($src1$$reg),
15329             as_FloatRegister($src2$$reg));
15330   %}
15331   ins_pipe(pipe_class_default);
15332 %}
15333 
15334 instruct vxor16B(vecX dst, vecX src1, vecX src2)
15335 %{
15336   predicate(n->as_Vector()->length_in_bytes() == 16);
15337   match(Set dst (XorV src1 src2));
15338   ins_cost(INSN_COST);
15339   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
15340   ins_encode %{
15341     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15342             as_FloatRegister($src1$$reg),
15343             as_FloatRegister($src2$$reg));
15344   %}
15345   ins_pipe(pipe_class_default);
15346 %}
15347 
15348 // ------------------------------ Shift ---------------------------------------
15349 
15350 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
15351   match(Set dst (LShiftCntV cnt));
15352   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
15353   ins_encode %{
15354     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15355   %}
15356   ins_pipe(pipe_class_default);
15357 %}
15358 
15359 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
15360 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
15361   match(Set dst (RShiftCntV cnt));
15362   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
15363   ins_encode %{
15364     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15365     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
15366   %}
15367   ins_pipe(pipe_class_default);
15368 %}
15369 
15370 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
15371   predicate(n->as_Vector()->length() == 4 ||
15372             n->as_Vector()->length() == 8);
15373   match(Set dst (LShiftVB src shift));
15374   match(Set dst (RShiftVB src shift));
15375   ins_cost(INSN_COST);
15376   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
15377   ins_encode %{
15378     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
15379             as_FloatRegister($src$$reg),
15380             as_FloatRegister($shift$$reg));
15381   %}
15382   ins_pipe(pipe_class_default);
15383 %}
15384 
15385 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
15386   predicate(n->as_Vector()->length() == 16);
15387   match(Set dst (LShiftVB src shift));
15388   match(Set dst (RShiftVB src shift));
15389   ins_cost(INSN_COST);
15390   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
15391   ins_encode %{
15392     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
15393             as_FloatRegister($src$$reg),
15394             as_FloatRegister($shift$$reg));
15395   %}
15396   ins_pipe(pipe_class_default);
15397 %}
15398 
15399 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
15400   predicate(n->as_Vector()->length() == 4 ||
15401             n->as_Vector()->length() == 8);
15402   match(Set dst (URShiftVB src shift));
15403   ins_cost(INSN_COST);
15404   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
15405   ins_encode %{
15406     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
15407             as_FloatRegister($src$$reg),
15408             as_FloatRegister($shift$$reg));
15409   %}
15410   ins_pipe(pipe_class_default);
15411 %}
15412 
15413 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
15414   predicate(n->as_Vector()->length() == 16);
15415   match(Set dst (URShiftVB src shift));
15416   ins_cost(INSN_COST);
15417   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
15418   ins_encode %{
15419     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
15420             as_FloatRegister($src$$reg),
15421             as_FloatRegister($shift$$reg));
15422   %}
15423   ins_pipe(pipe_class_default);
15424 %}
15425 
15426 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
15427   predicate(n->as_Vector()->length() == 4 ||
15428             n->as_Vector()->length() == 8);
15429   match(Set dst (LShiftVB src shift));
15430   ins_cost(INSN_COST);
15431   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
15432   ins_encode %{
15433     int sh = (int)$shift$$constant & 31;
15434     if (sh >= 8) {
15435       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15436              as_FloatRegister($src$$reg),
15437              as_FloatRegister($src$$reg));
15438     } else {
15439       __ shl(as_FloatRegister($dst$$reg), __ T8B,
15440              as_FloatRegister($src$$reg), sh);
15441     }
15442   %}
15443   ins_pipe(pipe_class_default);
15444 %}
15445 
15446 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
15447   predicate(n->as_Vector()->length() == 16);
15448   match(Set dst (LShiftVB src shift));
15449   ins_cost(INSN_COST);
15450   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15451   ins_encode %{
15452     int sh = (int)$shift$$constant & 31;
15453     if (sh >= 8) {
15454       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15455              as_FloatRegister($src$$reg),
15456              as_FloatRegister($src$$reg));
15457     } else {
15458       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15459              as_FloatRegister($src$$reg), sh);
15460     }
15461   %}
15462   ins_pipe(pipe_class_default);
15463 %}
15464 
15465 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15466   predicate(n->as_Vector()->length() == 4 ||
15467             n->as_Vector()->length() == 8);
15468   match(Set dst (RShiftVB src shift));
15469   ins_cost(INSN_COST);
15470   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15471   ins_encode %{
15472     int sh = (int)$shift$$constant & 31;
15473     if (sh >= 8) sh = 7;
15474     sh = -sh & 7;
15475     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15476            as_FloatRegister($src$$reg), sh);
15477   %}
15478   ins_pipe(pipe_class_default);
15479 %}
15480 
15481 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15482   predicate(n->as_Vector()->length() == 16);
15483   match(Set dst (RShiftVB src shift));
15484   ins_cost(INSN_COST);
15485   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15486   ins_encode %{
15487     int sh = (int)$shift$$constant & 31;
15488     if (sh >= 8) sh = 7;
15489     sh = -sh & 7;
15490     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15491            as_FloatRegister($src$$reg), sh);
15492   %}
15493   ins_pipe(pipe_class_default);
15494 %}
15495 
15496 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15497   predicate(n->as_Vector()->length() == 4 ||
15498             n->as_Vector()->length() == 8);
15499   match(Set dst (URShiftVB src shift));
15500   ins_cost(INSN_COST);
15501   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15502   ins_encode %{
15503     int sh = (int)$shift$$constant & 31;
15504     if (sh >= 8) {
15505       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15506              as_FloatRegister($src$$reg),
15507              as_FloatRegister($src$$reg));
15508     } else {
15509       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15510              as_FloatRegister($src$$reg), -sh & 7);
15511     }
15512   %}
15513   ins_pipe(pipe_class_default);
15514 %}
15515 
15516 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15517   predicate(n->as_Vector()->length() == 16);
15518   match(Set dst (URShiftVB src shift));
15519   ins_cost(INSN_COST);
15520   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15521   ins_encode %{
15522     int sh = (int)$shift$$constant & 31;
15523     if (sh >= 8) {
15524       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15525              as_FloatRegister($src$$reg),
15526              as_FloatRegister($src$$reg));
15527     } else {
15528       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15529              as_FloatRegister($src$$reg), -sh & 7);
15530     }
15531   %}
15532   ins_pipe(pipe_class_default);
15533 %}
15534 
15535 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
15536   predicate(n->as_Vector()->length() == 2 ||
15537             n->as_Vector()->length() == 4);
15538   match(Set dst (LShiftVS src shift));
15539   match(Set dst (RShiftVS src shift));
15540   ins_cost(INSN_COST);
15541   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15542   ins_encode %{
15543     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15544             as_FloatRegister($src$$reg),
15545             as_FloatRegister($shift$$reg));
15546   %}
15547   ins_pipe(pipe_class_default);
15548 %}
15549 
15550 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15551   predicate(n->as_Vector()->length() == 8);
15552   match(Set dst (LShiftVS src shift));
15553   match(Set dst (RShiftVS src shift));
15554   ins_cost(INSN_COST);
15555   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15556   ins_encode %{
15557     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15558             as_FloatRegister($src$$reg),
15559             as_FloatRegister($shift$$reg));
15560   %}
15561   ins_pipe(pipe_class_default);
15562 %}
15563 
15564 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15565   predicate(n->as_Vector()->length() == 2 ||
15566             n->as_Vector()->length() == 4);
15567   match(Set dst (URShiftVS src shift));
15568   ins_cost(INSN_COST);
15569   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15570   ins_encode %{
15571     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15572             as_FloatRegister($src$$reg),
15573             as_FloatRegister($shift$$reg));
15574   %}
15575   ins_pipe(pipe_class_default);
15576 %}
15577 
15578 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15579   predicate(n->as_Vector()->length() == 8);
15580   match(Set dst (URShiftVS src shift));
15581   ins_cost(INSN_COST);
15582   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15583   ins_encode %{
15584     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15585             as_FloatRegister($src$$reg),
15586             as_FloatRegister($shift$$reg));
15587   %}
15588   ins_pipe(pipe_class_default);
15589 %}
15590 
15591 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15592   predicate(n->as_Vector()->length() == 2 ||
15593             n->as_Vector()->length() == 4);
15594   match(Set dst (LShiftVS src shift));
15595   ins_cost(INSN_COST);
15596   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15597   ins_encode %{
15598     int sh = (int)$shift$$constant & 31;
15599     if (sh >= 16) {
15600       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15601              as_FloatRegister($src$$reg),
15602              as_FloatRegister($src$$reg));
15603     } else {
15604       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15605              as_FloatRegister($src$$reg), sh);
15606     }
15607   %}
15608   ins_pipe(pipe_class_default);
15609 %}
15610 
15611 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15612   predicate(n->as_Vector()->length() == 8);
15613   match(Set dst (LShiftVS src shift));
15614   ins_cost(INSN_COST);
15615   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15616   ins_encode %{
15617     int sh = (int)$shift$$constant & 31;
15618     if (sh >= 16) {
15619       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15620              as_FloatRegister($src$$reg),
15621              as_FloatRegister($src$$reg));
15622     } else {
15623       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15624              as_FloatRegister($src$$reg), sh);
15625     }
15626   %}
15627   ins_pipe(pipe_class_default);
15628 %}
15629 
15630 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15631   predicate(n->as_Vector()->length() == 2 ||
15632             n->as_Vector()->length() == 4);
15633   match(Set dst (RShiftVS src shift));
15634   ins_cost(INSN_COST);
15635   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15636   ins_encode %{
15637     int sh = (int)$shift$$constant & 31;
15638     if (sh >= 16) sh = 15;
15639     sh = -sh & 15;
15640     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15641            as_FloatRegister($src$$reg), sh);
15642   %}
15643   ins_pipe(pipe_class_default);
15644 %}
15645 
15646 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15647   predicate(n->as_Vector()->length() == 8);
15648   match(Set dst (RShiftVS src shift));
15649   ins_cost(INSN_COST);
15650   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15651   ins_encode %{
15652     int sh = (int)$shift$$constant & 31;
15653     if (sh >= 16) sh = 15;
15654     sh = -sh & 15;
15655     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15656            as_FloatRegister($src$$reg), sh);
15657   %}
15658   ins_pipe(pipe_class_default);
15659 %}
15660 
15661 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15662   predicate(n->as_Vector()->length() == 2 ||
15663             n->as_Vector()->length() == 4);
15664   match(Set dst (URShiftVS src shift));
15665   ins_cost(INSN_COST);
15666   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15667   ins_encode %{
15668     int sh = (int)$shift$$constant & 31;
15669     if (sh >= 16) {
15670       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15671              as_FloatRegister($src$$reg),
15672              as_FloatRegister($src$$reg));
15673     } else {
15674       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15675              as_FloatRegister($src$$reg), -sh & 15);
15676     }
15677   %}
15678   ins_pipe(pipe_class_default);
15679 %}
15680 
15681 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15682   predicate(n->as_Vector()->length() == 8);
15683   match(Set dst (URShiftVS src shift));
15684   ins_cost(INSN_COST);
15685   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15686   ins_encode %{
15687     int sh = (int)$shift$$constant & 31;
15688     if (sh >= 16) {
15689       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15690              as_FloatRegister($src$$reg),
15691              as_FloatRegister($src$$reg));
15692     } else {
15693       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15694              as_FloatRegister($src$$reg), -sh & 15);
15695     }
15696   %}
15697   ins_pipe(pipe_class_default);
15698 %}
15699 
15700 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15701   predicate(n->as_Vector()->length() == 2);
15702   match(Set dst (LShiftVI src shift));
15703   match(Set dst (RShiftVI src shift));
15704   ins_cost(INSN_COST);
15705   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15706   ins_encode %{
15707     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15708             as_FloatRegister($src$$reg),
15709             as_FloatRegister($shift$$reg));
15710   %}
15711   ins_pipe(pipe_class_default);
15712 %}
15713 
15714 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15715   predicate(n->as_Vector()->length() == 4);
15716   match(Set dst (LShiftVI src shift));
15717   match(Set dst (RShiftVI src shift));
15718   ins_cost(INSN_COST);
15719   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15720   ins_encode %{
15721     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15722             as_FloatRegister($src$$reg),
15723             as_FloatRegister($shift$$reg));
15724   %}
15725   ins_pipe(pipe_class_default);
15726 %}
15727 
15728 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15729   predicate(n->as_Vector()->length() == 2);
15730   match(Set dst (URShiftVI src shift));
15731   ins_cost(INSN_COST);
15732   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15733   ins_encode %{
15734     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15735             as_FloatRegister($src$$reg),
15736             as_FloatRegister($shift$$reg));
15737   %}
15738   ins_pipe(pipe_class_default);
15739 %}
15740 
15741 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15742   predicate(n->as_Vector()->length() == 4);
15743   match(Set dst (URShiftVI src shift));
15744   ins_cost(INSN_COST);
15745   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15746   ins_encode %{
15747     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15748             as_FloatRegister($src$$reg),
15749             as_FloatRegister($shift$$reg));
15750   %}
15751   ins_pipe(pipe_class_default);
15752 %}
15753 
15754 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15755   predicate(n->as_Vector()->length() == 2);
15756   match(Set dst (LShiftVI src shift));
15757   ins_cost(INSN_COST);
15758   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15759   ins_encode %{
15760     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15761            as_FloatRegister($src$$reg),
15762            (int)$shift$$constant & 31);
15763   %}
15764   ins_pipe(pipe_class_default);
15765 %}
15766 
15767 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15768   predicate(n->as_Vector()->length() == 4);
15769   match(Set dst (LShiftVI src shift));
15770   ins_cost(INSN_COST);
15771   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15772   ins_encode %{
15773     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15774            as_FloatRegister($src$$reg),
15775            (int)$shift$$constant & 31);
15776   %}
15777   ins_pipe(pipe_class_default);
15778 %}
15779 
15780 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15781   predicate(n->as_Vector()->length() == 2);
15782   match(Set dst (RShiftVI src shift));
15783   ins_cost(INSN_COST);
15784   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15785   ins_encode %{
15786     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15787             as_FloatRegister($src$$reg),
15788             -(int)$shift$$constant & 31);
15789   %}
15790   ins_pipe(pipe_class_default);
15791 %}
15792 
15793 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15794   predicate(n->as_Vector()->length() == 4);
15795   match(Set dst (RShiftVI src shift));
15796   ins_cost(INSN_COST);
15797   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15798   ins_encode %{
15799     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15800             as_FloatRegister($src$$reg),
15801             -(int)$shift$$constant & 31);
15802   %}
15803   ins_pipe(pipe_class_default);
15804 %}
15805 
15806 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15807   predicate(n->as_Vector()->length() == 2);
15808   match(Set dst (URShiftVI src shift));
15809   ins_cost(INSN_COST);
15810   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15811   ins_encode %{
15812     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15813             as_FloatRegister($src$$reg),
15814             -(int)$shift$$constant & 31);
15815   %}
15816   ins_pipe(pipe_class_default);
15817 %}
15818 
15819 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15820   predicate(n->as_Vector()->length() == 4);
15821   match(Set dst (URShiftVI src shift));
15822   ins_cost(INSN_COST);
15823   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15824   ins_encode %{
15825     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15826             as_FloatRegister($src$$reg),
15827             -(int)$shift$$constant & 31);
15828   %}
15829   ins_pipe(pipe_class_default);
15830 %}
15831 
15832 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15833   predicate(n->as_Vector()->length() == 2);
15834   match(Set dst (LShiftVL src shift));
15835   match(Set dst (RShiftVL src shift));
15836   ins_cost(INSN_COST);
15837   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15838   ins_encode %{
15839     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15840             as_FloatRegister($src$$reg),
15841             as_FloatRegister($shift$$reg));
15842   %}
15843   ins_pipe(pipe_class_default);
15844 %}
15845 
15846 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15847   predicate(n->as_Vector()->length() == 2);
15848   match(Set dst (URShiftVL src shift));
15849   ins_cost(INSN_COST);
15850   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15851   ins_encode %{
15852     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15853             as_FloatRegister($src$$reg),
15854             as_FloatRegister($shift$$reg));
15855   %}
15856   ins_pipe(pipe_class_default);
15857 %}
15858 
15859 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15860   predicate(n->as_Vector()->length() == 2);
15861   match(Set dst (LShiftVL src shift));
15862   ins_cost(INSN_COST);
15863   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15864   ins_encode %{
15865     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15866            as_FloatRegister($src$$reg),
15867            (int)$shift$$constant & 63);
15868   %}
15869   ins_pipe(pipe_class_default);
15870 %}
15871 
15872 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15873   predicate(n->as_Vector()->length() == 2);
15874   match(Set dst (RShiftVL src shift));
15875   ins_cost(INSN_COST);
15876   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15877   ins_encode %{
15878     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15879             as_FloatRegister($src$$reg),
15880             -(int)$shift$$constant & 63);
15881   %}
15882   ins_pipe(pipe_class_default);
15883 %}
15884 
15885 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15886   predicate(n->as_Vector()->length() == 2);
15887   match(Set dst (URShiftVL src shift));
15888   ins_cost(INSN_COST);
15889   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15890   ins_encode %{
15891     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15892             as_FloatRegister($src$$reg),
15893             -(int)$shift$$constant & 63);
15894   %}
15895   ins_pipe(pipe_class_default);
15896 %}
15897 
15898 //----------PEEPHOLE RULES-----------------------------------------------------
15899 // These must follow all instruction definitions as they use the names
15900 // defined in the instructions definitions.
15901 //
15902 // peepmatch ( root_instr_name [preceding_instruction]* );
15903 //
15904 // peepconstraint %{
15905 // (instruction_number.operand_name relational_op instruction_number.operand_name
15906 //  [, ...] );
15907 // // instruction numbers are zero-based using left to right order in peepmatch
15908 //
15909 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15910 // // provide an instruction_number.operand_name for each operand that appears
15911 // // in the replacement instruction's match rule
15912 //
15913 // ---------VM FLAGS---------------------------------------------------------
15914 //
15915 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15916 //
15917 // Each peephole rule is given an identifying number starting with zero and
15918 // increasing by one in the order seen by the parser.  An individual peephole
15919 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15920 // on the command-line.
15921 //
15922 // ---------CURRENT LIMITATIONS----------------------------------------------
15923 //
15924 // Only match adjacent instructions in same basic block
15925 // Only equality constraints
15926 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15927 // Only one replacement instruction
15928 //
15929 // ---------EXAMPLE----------------------------------------------------------
15930 //
15931 // // pertinent parts of existing instructions in architecture description
15932 // instruct movI(iRegINoSp dst, iRegI src)
15933 // %{
15934 //   match(Set dst (CopyI src));
15935 // %}
15936 //
15937 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15938 // %{
15939 //   match(Set dst (AddI dst src));
15940 //   effect(KILL cr);
15941 // %}
15942 //
15943 // // Change (inc mov) to lea
15944 // peephole %{
15945 //   // increment preceeded by register-register move
15946 //   peepmatch ( incI_iReg movI );
15947 //   // require that the destination register of the increment
15948 //   // match the destination register of the move
15949 //   peepconstraint ( 0.dst == 1.dst );
15950 //   // construct a replacement instruction that sets
15951 //   // the destination to ( move's source register + one )
15952 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
15953 // %}
15954 //
15955 
15956 // Implementation no longer uses movX instructions since
15957 // machine-independent system no longer uses CopyX nodes.
15958 //
15959 // peephole
15960 // %{
15961 //   peepmatch (incI_iReg movI);
15962 //   peepconstraint (0.dst == 1.dst);
15963 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15964 // %}
15965 
15966 // peephole
15967 // %{
15968 //   peepmatch (decI_iReg movI);
15969 //   peepconstraint (0.dst == 1.dst);
15970 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15971 // %}
15972 
15973 // peephole
15974 // %{
15975 //   peepmatch (addI_iReg_imm movI);
15976 //   peepconstraint (0.dst == 1.dst);
15977 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15978 // %}
15979 
15980 // peephole
15981 // %{
15982 //   peepmatch (incL_iReg movL);
15983 //   peepconstraint (0.dst == 1.dst);
15984 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15985 // %}
15986 
15987 // peephole
15988 // %{
15989 //   peepmatch (decL_iReg movL);
15990 //   peepconstraint (0.dst == 1.dst);
15991 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15992 // %}
15993 
15994 // peephole
15995 // %{
15996 //   peepmatch (addL_iReg_imm movL);
15997 //   peepconstraint (0.dst == 1.dst);
15998 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15999 // %}
16000 
16001 // peephole
16002 // %{
16003 //   peepmatch (addP_iReg_imm movP);
16004 //   peepconstraint (0.dst == 1.dst);
16005 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16006 // %}
16007 
16008 // // Change load of spilled value to only a spill
16009 // instruct storeI(memory mem, iRegI src)
16010 // %{
16011 //   match(Set mem (StoreI mem src));
16012 // %}
16013 //
16014 // instruct loadI(iRegINoSp dst, memory mem)
16015 // %{
16016 //   match(Set dst (LoadI mem));
16017 // %}
16018 //
16019 
16020 //----------SMARTSPILL RULES---------------------------------------------------
16021 // These must follow all instruction definitions as they use the names
16022 // defined in the instructions definitions.
16023 
16024 // Local Variables:
16025 // mode: c++
16026 // End: